hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce7d407480d8a7f47b9d9e91e7767883c6f1e739
| 994
|
py
|
Python
|
tests/test_imphooks.py
|
Granitosaurus/xonsh
|
3796c4ba3b2e857a02760b260b5a911b148a511d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_imphooks.py
|
Granitosaurus/xonsh
|
3796c4ba3b2e857a02760b260b5a911b148a511d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_imphooks.py
|
Granitosaurus/xonsh
|
3796c4ba3b2e857a02760b260b5a911b148a511d
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Testing xonsh import hooks"""
import pytest
from xonsh import imphooks # noqa
from xonsh import built_ins
from xonsh.environ import Env
from xonsh.execer import Execer
from xonsh.built_ins import load_builtins, unload_builtins
import builtins
@pytest.yield_fixture(autouse=True)
def imp_env(xonsh_execer):
"""Call `load_builtins` with `xonsh_execer`"""
load_builtins(execer=xonsh_execer)
builtins.__xonsh_env__ = Env({'PATH': [], 'PATHEXT': []})
yield
unload_builtins()
def test_import():
import sample
assert ('hello mom jawaka\n' == sample.x)
def test_absolute_import():
from xpack import sample
assert ('hello mom jawaka\n' == sample.x)
def test_relative_import():
from xpack import relimp
assert ('hello mom jawaka\n' == relimp.sample.x)
assert ('hello mom jawaka\ndark chest of wonders' == relimp.y)
def test_sub_import():
from xpack.sub import sample
assert ('hello mom jawaka\n' == sample.x)
| 24.243902
| 66
| 0.709256
|
f4a3806a891c33b26809c9c1ba6dd699b56e328e
| 21,001
|
py
|
Python
|
tests/components/insteon/test_config_flow.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
tests/components/insteon/test_config_flow.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
tests/components/insteon/test_config_flow.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 2
|
2019-08-04T13:39:43.000Z
|
2020-02-07T23:01:23.000Z
|
"""Test the config flow for the Insteon integration."""
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.insteon.config_flow import (
HUB1,
HUB2,
MODEM_TYPE,
PLM,
STEP_ADD_OVERRIDE,
STEP_ADD_X10,
STEP_CHANGE_HUB_CONFIG,
STEP_HUB_V2,
STEP_REMOVE_OVERRIDE,
STEP_REMOVE_X10,
)
from homeassistant.components.insteon.const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_HUB_VERSION,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
MOCK_HOSTNAME,
MOCK_IMPORT_CONFIG_PLM,
MOCK_IMPORT_MINIMUM_HUB_V1,
MOCK_IMPORT_MINIMUM_HUB_V2,
MOCK_PASSWORD,
MOCK_USER_INPUT_HUB_V1,
MOCK_USER_INPUT_HUB_V2,
MOCK_USER_INPUT_PLM,
MOCK_USERNAME,
PATCH_ASYNC_SETUP,
PATCH_ASYNC_SETUP_ENTRY,
PATCH_CONNECTION,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True
async def mock_failed_connection(*args, **kwargs):
"""Return a failed connection."""
raise ConnectionError("Connection failed")
async def _init_form(hass, modem_type):
"""Run the user form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{MODEM_TYPE: modem_type},
)
return result2
async def _device_form(hass, flow_id, connection, user_input):
"""Test the PLM, Hub v1 or Hub v2 form."""
with patch(PATCH_CONNECTION, new=connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
) as mock_setup, patch(
PATCH_ASYNC_SETUP_ENTRY,
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(flow_id, user_input)
return result, mock_setup, mock_setup_entry
async def test_form_select_modem(hass: HomeAssistantType):
"""Test we get a modem form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
assert result["step_id"] == STEP_HUB_V2
assert result["type"] == "form"
async def test_fail_on_existing(hass: HomeAssistantType):
"""Test we fail if the integration is already configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
assert config_entry.state == config_entries.ENTRY_STATE_NOT_LOADED
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
context={"source": config_entries.SOURCE_USER},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_form_select_plm(hass: HomeAssistantType):
"""Test we set up the PLM correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, PLM)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_PLM
)
assert result2["type"] == "create_entry"
assert result2["data"] == MOCK_USER_INPUT_PLM
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_select_hub_v1(hass: HomeAssistantType):
"""Test we set up the Hub v1 correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB1)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_HUB_V1
)
assert result2["type"] == "create_entry"
assert result2["data"] == {
**MOCK_USER_INPUT_HUB_V1,
CONF_HUB_VERSION: 1,
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_select_hub_v2(hass: HomeAssistantType):
"""Test we set up the Hub v2 correctly."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
result2, mock_setup, mock_setup_entry = await _device_form(
hass, result["flow_id"], mock_successful_connection, MOCK_USER_INPUT_HUB_V2
)
assert result2["type"] == "create_entry"
assert result2["data"] == {
**MOCK_USER_INPUT_HUB_V2,
CONF_HUB_VERSION: 2,
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_failed_connection_plm(hass: HomeAssistantType):
"""Test a failed connection with the PLM."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, PLM)
result2, _, _ = await _device_form(
hass, result["flow_id"], mock_failed_connection, MOCK_USER_INPUT_PLM
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_failed_connection_hub(hass: HomeAssistantType):
"""Test a failed connection with a Hub."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _init_form(hass, HUB2)
result2, _, _ = await _device_form(
hass, result["flow_id"], mock_failed_connection, MOCK_USER_INPUT_HUB_V2
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def _import_config(hass, config):
"""Run the import step."""
with patch(PATCH_CONNECTION, new=mock_successful_connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
), patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
return await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
async def test_import_plm(hass: HomeAssistantType):
"""Test importing a minimum PLM config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(hass, MOCK_IMPORT_CONFIG_PLM)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data == MOCK_IMPORT_CONFIG_PLM
async def _options_init_form(hass, entry_id, step):
"""Run the init options form."""
with patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
result = await hass.config_entries.options.async_init(entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
{step: True},
)
return result2
async def test_import_min_hub_v2(hass: HomeAssistantType):
"""Test importing a minimum Hub v2 config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2}
)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data[CONF_HOST] == MOCK_HOSTNAME
assert entry.data[CONF_PORT] == 25105
assert entry.data[CONF_USERNAME] == MOCK_USERNAME
assert entry.data[CONF_PASSWORD] == MOCK_PASSWORD
assert entry.data[CONF_HUB_VERSION] == 2
async def test_import_min_hub_v1(hass: HomeAssistantType):
"""Test importing a minimum Hub v1 config from yaml."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V1, CONF_PORT: 9761, CONF_HUB_VERSION: 1}
)
assert result["type"] == "create_entry"
assert hass.config_entries.async_entries(DOMAIN)
for entry in hass.config_entries.async_entries(DOMAIN):
assert entry.data[CONF_HOST] == MOCK_HOSTNAME
assert entry.data[CONF_PORT] == 9761
assert entry.data[CONF_HUB_VERSION] == 1
async def test_import_existing(hass: HomeAssistantType):
"""Test we fail on an existing config imported."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
assert config_entry.state == config_entries.ENTRY_STATE_NOT_LOADED
result = await _import_config(
hass, {**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_import_failed_connection(hass: HomeAssistantType):
"""Test a failed connection on import."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(PATCH_CONNECTION, new=mock_failed_connection,), patch(
PATCH_ASYNC_SETUP, return_value=True
), patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={**MOCK_IMPORT_MINIMUM_HUB_V2, CONF_PORT: 25105, CONF_HUB_VERSION: 2},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def _options_form(hass, flow_id, user_input):
"""Test an options form."""
with patch(PATCH_ASYNC_SETUP_ENTRY, return_value=True) as mock_setup_entry:
result = await hass.config_entries.options.async_configure(flow_id, user_input)
return result, mock_setup_entry
async def test_options_change_hub_config(hass: HomeAssistantType):
"""Test changing Hub v2 config."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(
hass, config_entry.entry_id, STEP_CHANGE_HUB_CONFIG
)
user_input = {
CONF_HOST: "2.3.4.5",
CONF_PORT: 9999,
CONF_USERNAME: "new username",
CONF_PASSWORD: "new password",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {}
assert config_entry.data == {**user_input, CONF_HUB_VERSION: 2}
async def test_options_add_device_override(hass: HomeAssistantType):
"""Test adding a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "1a2b3c",
CONF_CAT: "0x04",
CONF_SUBCAT: "0xaa",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
assert config_entry.options[CONF_OVERRIDE][0][CONF_ADDRESS] == "1A.2B.3C"
assert config_entry.options[CONF_OVERRIDE][0][CONF_CAT] == 4
assert config_entry.options[CONF_OVERRIDE][0][CONF_SUBCAT] == 170
result2 = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "4d5e6f",
CONF_CAT: "05",
CONF_SUBCAT: "bb",
}
await _options_form(hass, result2["flow_id"], user_input)
assert len(config_entry.options[CONF_OVERRIDE]) == 2
assert config_entry.options[CONF_OVERRIDE][1][CONF_ADDRESS] == "4D.5E.6F"
assert config_entry.options[CONF_OVERRIDE][1][CONF_CAT] == 5
assert config_entry.options[CONF_OVERRIDE][1][CONF_SUBCAT] == 187
async def test_options_remove_device_override(hass: HomeAssistantType):
"""Test removing a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_OVERRIDE: [
{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 6, CONF_SUBCAT: 100},
{CONF_ADDRESS: "4D.5E.6F", CONF_CAT: 7, CONF_SUBCAT: 200},
]
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_OVERRIDE)
user_input = {CONF_ADDRESS: "1A.2B.3C"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
async def test_options_remove_device_override_with_x10(hass: HomeAssistantType):
"""Test removing a device override when an X10 device is configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_OVERRIDE: [
{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 6, CONF_SUBCAT: 100},
{CONF_ADDRESS: "4D.5E.6F", CONF_CAT: 7, CONF_SUBCAT: 200},
],
CONF_X10: [
{
CONF_HOUSECODE: "d",
CONF_UNITCODE: 5,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 22,
}
],
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_OVERRIDE)
user_input = {CONF_ADDRESS: "1A.2B.3C"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_OVERRIDE]) == 1
assert len(config_entry.options[CONF_X10]) == 1
async def test_options_add_x10_device(hass: HomeAssistantType):
"""Test adding an X10 device."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_X10)
user_input = {
CONF_HOUSECODE: "c",
CONF_UNITCODE: 12,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
}
result2, _ = await _options_form(hass, result["flow_id"], user_input)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
assert config_entry.options[CONF_X10][0][CONF_HOUSECODE] == "c"
assert config_entry.options[CONF_X10][0][CONF_UNITCODE] == 12
assert config_entry.options[CONF_X10][0][CONF_PLATFORM] == "light"
assert config_entry.options[CONF_X10][0][CONF_DIM_STEPS] == 18
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_X10)
user_input = {
CONF_HOUSECODE: "d",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
}
result3, _ = await _options_form(hass, result["flow_id"], user_input)
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 2
assert config_entry.options[CONF_X10][1][CONF_HOUSECODE] == "d"
assert config_entry.options[CONF_X10][1][CONF_UNITCODE] == 10
assert config_entry.options[CONF_X10][1][CONF_PLATFORM] == "binary_sensor"
assert config_entry.options[CONF_X10][1][CONF_DIM_STEPS] == 15
async def test_options_remove_x10_device(hass: HomeAssistantType):
"""Test removing an X10 device."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_X10: [
{
CONF_HOUSECODE: "C",
CONF_UNITCODE: 4,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
},
{
CONF_HOUSECODE: "D",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
},
]
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_X10)
for device in config_entry.options[CONF_X10]:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
print(f"Housecode: {housecode}, Unitcode: {unitcode}")
user_input = {CONF_DEVICE: "Housecode: C, Unitcode: 4"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
async def test_options_remove_x10_device_with_override(hass: HomeAssistantType):
"""Test removing an X10 device when a device override is configured."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={
CONF_X10: [
{
CONF_HOUSECODE: "C",
CONF_UNITCODE: 4,
CONF_PLATFORM: "light",
CONF_DIM_STEPS: 18,
},
{
CONF_HOUSECODE: "D",
CONF_UNITCODE: 10,
CONF_PLATFORM: "binary_sensor",
CONF_DIM_STEPS: 15,
},
],
CONF_OVERRIDE: [{CONF_ADDRESS: "1A.2B.3C", CONF_CAT: 1, CONF_SUBCAT: 18}],
},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_REMOVE_X10)
for device in config_entry.options[CONF_X10]:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
print(f"Housecode: {housecode}, Unitcode: {unitcode}")
user_input = {CONF_DEVICE: "Housecode: C, Unitcode: 4"}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(config_entry.options[CONF_X10]) == 1
assert len(config_entry.options[CONF_OVERRIDE]) == 1
async def test_options_dup_selection(hass: HomeAssistantType):
"""Test if a duplicate selection was made in options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
{STEP_ADD_OVERRIDE: True, STEP_ADD_X10: True},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "select_single"}
async def test_options_override_bad_data(hass: HomeAssistantType):
"""Test for bad data in a device override."""
config_entry = MockConfigEntry(
domain=DOMAIN,
entry_id="abcde12345",
data={**MOCK_USER_INPUT_HUB_V2, CONF_HUB_VERSION: 2},
options={},
)
config_entry.add_to_hass(hass)
result = await _options_init_form(hass, config_entry.entry_id, STEP_ADD_OVERRIDE)
user_input = {
CONF_ADDRESS: "zzzzzz",
CONF_CAT: "bad",
CONF_SUBCAT: "data",
}
result, _ = await _options_form(hass, result["flow_id"], user_input)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "input_error"}
| 34.655116
| 88
| 0.677968
|
ef66ae8f5b16f6c48c967efd62cb118cc537be4d
| 23,917
|
py
|
Python
|
onmt/inputters/inputter.py
|
axenov/BERT-Summarization-for-OpenNMT
|
b453b33efb4961ec11b9e424f65049e7b7a5146f
|
[
"MIT"
] | 5
|
2021-03-08T12:53:48.000Z
|
2021-09-14T16:18:23.000Z
|
onmt/inputters/inputter.py
|
axenov/BERT-Summarization-for-OpenNMT
|
b453b33efb4961ec11b9e424f65049e7b7a5146f
|
[
"MIT"
] | null | null | null |
onmt/inputters/inputter.py
|
axenov/BERT-Summarization-for-OpenNMT
|
b453b33efb4961ec11b9e424f65049e7b7a5146f
|
[
"MIT"
] | 2
|
2021-03-06T15:28:44.000Z
|
2021-07-30T19:36:08.000Z
|
# -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field
from torchtext.vocab import Vocab
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
#pad='<blank>',
pad ='[PAD]',
#bos='<s>',
#eos='</s>',
bos='[CLS]',
eos='[SEP]',
dynamic_dict=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters, min_freq):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i + min_freq
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
counters = defaultdict(Counter)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# Load vocabulary
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters,
src_words_min_frequency)
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters,
tgt_words_min_frequency)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
batch_size_multiple=1,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
def create_batches(self):
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = batch_iter(
sorted(p, key=self.sort_key),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, repeat=True,
num_batches_multiple=1):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
def _iter_dataset(self, path):
cur_dataset = torch.load(path)
logger.info('Loading dataset from %s, number of examples: %d' %
(path, len(cur_dataset)))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False
)
for batch in cur_iter:
yield batch
cur_dataset.examples = None
gc.collect()
del cur_dataset
gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '*.pt')))
if not dataset_paths:
return None
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
repeat=not opt.single_pass,
num_batches_multiple=max(opt.accum_count) * opt.world_size)
| 36.458841
| 79
| 0.61772
|
f866b865e5666a56b6b97532f601f6abf5871732
| 12,759
|
py
|
Python
|
egg/zoo/objects_game/train.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | 1
|
2021-05-26T14:23:25.000Z
|
2021-05-26T14:23:25.000Z
|
egg/zoo/objects_game/train.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | 1
|
2019-10-31T16:21:01.000Z
|
2019-10-31T16:21:01.000Z
|
egg/zoo/objects_game/train.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import argparse
import operator
import pathlib
import numpy as np
import torch.nn.functional as F
import torch.utils.data
import egg.core as core
from egg.core.util import move_to
from egg.zoo.objects_game.archs import Receiver, Sender
from egg.zoo.objects_game.features import VectorsLoader
from egg.zoo.objects_game.util import (compute_baseline_accuracy,
compute_mi_input_msgs,
dump_sender_receiver, entropy,
mutual_info)
def get_params(params):
parser = argparse.ArgumentParser()
input_data = parser.add_mutually_exclusive_group()
input_data.add_argument('--perceptual_dimensions', type=str, default='[4, 4, 4, 4, 4]',
help='Number of features for every perceptual dimension')
input_data.add_argument('--load_data_path', type=str, default=None,
help='Path to .npz data file to load')
parser.add_argument('--n_distractors', type=int, default=3,
help='Number of distractor objects for the receiver (default: 3)')
parser.add_argument('--train_samples', type=float, default=1e5,
help='Number of tuples in training data (default: 1e6)')
parser.add_argument('--validation_samples', type=float, default=1e3,
help='Number of tuples in validation data (default: 1e4)')
parser.add_argument('--test_samples', type=float, default=1e3,
help='Number of tuples in test data (default: 1e3)')
parser.add_argument('--data_seed', type=int, default=111,
help="Seed for random creation of train, validation and test tuples (default: 111)")
parser.add_argument('--shuffle_train_data', action='store_true', default=False,
help="Shuffle train data before every epoch (default: False)")
parser.add_argument('--sender_hidden', type=int, default=50,
help='Size of the hidden layer of Sender (default: 50)')
parser.add_argument('--receiver_hidden', type=int, default=50,
help='Size of the hidden layer of Receiver (default: 50)')
parser.add_argument('--sender_embedding', type=int, default=10,
help='Dimensionality of the embedding hidden layer for Sender (default: 10)')
parser.add_argument('--receiver_embedding', type=int, default=10,
help='Dimensionality of the embedding hidden layer for Receiver (default: 10)')
parser.add_argument('--sender_cell', type=str, default='rnn',
help='Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)')
parser.add_argument('--receiver_cell', type=str, default='rnn',
help='Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)')
parser.add_argument('--sender_lr', type=float, default=1e-1,
help="Learning rate for Sender's parameters (default: 1e-1)")
parser.add_argument('--receiver_lr', type=float, default=1e-1,
help="Learning rate for Receiver's parameters (default: 1e-1)")
parser.add_argument('--temperature', type=float, default=1.0,
help="GS temperature for the sender (default: 1.0)")
parser.add_argument('--mode', type=str, default='gs',
help="Selects whether Reinforce or GumbelSoftmax relaxation is used for training {gs only at the moment}"
"(default: rf)")
parser.add_argument('--output_json', action='store_true', default=False,
help="If set, egg will output validation stats in json format (default: False)")
parser.add_argument('--evaluate', action='store_true', default=False,
help="Evaluate trained model on test data")
parser.add_argument('--dump_data_folder', type=str, default=None,
help="Folder where file with dumped data will be created")
parser.add_argument('--dump_msg_folder', type=str, default=None,
help="Folder where file with dumped messages will be created")
parser.add_argument('--debug', action='store_true', default=False,
help="Run egg/objects_game with pdb enabled")
args = core.init(parser, params)
check_args(args)
print(args)
return args
def check_args(args):
args.train_samples, args.validation_samples, args.test_samples = int(args.train_samples), int(args.validation_samples), int(args.test_samples)
try:
args.perceptual_dimensions = eval(args.perceptual_dimensions)
except SyntaxError:
print(
"The format of the # of perceptual dimensions param is not correct. Please change it to string representing a list of int. Correct format: '[int, ..., int]' ")
exit(1)
if args.debug:
import pdb
pdb.set_trace()
args.n_features=len(args.perceptual_dimensions)
# can't set data loading and data dumping at the same time
assert not (args.load_data_path and args.dump_data_folder), "Cannot set folder to dump data while setting path to vectors to be loaded. Are you trying to dump the same vectors that you are loading?"
args.dump_msg_folder = pathlib.Path(args.dump_msg_folder) if args.dump_msg_folder is not None else None
if (not args.evaluate) and args.dump_msg_folder:
print("| WARNING --dump_msg_folder was set without --evaluate. Evaluation will not be performed nor any results will be dumped. Please set --evaluate")
def loss(_sender_input, _message, _receiver_input, receiver_output, _labels):
acc = (receiver_output.argmax(dim=1) == _labels).detach().float()
loss = F.cross_entropy(receiver_output, _labels, reduction="none")
return loss, {'acc': acc}
def main(params):
opts = get_params(params)
device = torch.device('cuda' if opts.cuda else 'cpu')
data_loader = VectorsLoader(perceptual_dimensions=opts.perceptual_dimensions,
n_distractors=opts.n_distractors,
batch_size=opts.batch_size,
train_samples=opts.train_samples,
validation_samples=opts.validation_samples,
test_samples=opts.test_samples,
shuffle_train_data=opts.shuffle_train_data,
dump_data_folder=opts.dump_data_folder,
load_data_path=opts.load_data_path,
seed=opts.data_seed)
train_data, validation_data, test_data = data_loader.get_iterators()
data_loader.upd_cl_options(opts)
if opts.max_len > 1:
baseline_msg = 'Cannot yet compute "smart" baseline value for messages of length greater than 1'
else:
baseline_msg = f'\n| Baselines measures with {opts.n_distractors} distractors and messages of max_len = {opts.max_len}:\n' \
f'| Dummy random baseline: accuracy = {1 / (opts.n_distractors + 1)}\n'
if -1 not in opts.perceptual_dimensions:
baseline_msg += f'| "Smart" baseline with perceptual_dimensions {opts.perceptual_dimensions} = {compute_baseline_accuracy(opts.n_distractors, opts.max_len, *opts.perceptual_dimensions)}\n'
else:
baseline_msg += f'| Data was loaded froman external file, thus no perceptual_dimension vector was provided, "smart baseline" cannot be computed\n'
print(baseline_msg)
sender = Sender(n_features=data_loader.n_features, n_hidden=opts.sender_hidden)
receiver = Receiver(n_features=data_loader.n_features, linear_units=opts.receiver_hidden)
if opts.mode.lower() == 'gs':
sender = core.RnnSenderGS(sender,
opts.vocab_size,
opts.sender_embedding,
opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
temperature=opts.temperature)
receiver = core.RnnReceiverGS(receiver,
opts.vocab_size,
opts.receiver_embedding,
opts.receiver_hidden,
cell=opts.receiver_cell
)
game = core.SenderReceiverRnnGS(sender, receiver, loss)
else:
raise NotImplementedError(f'Unknown training mode, {opts.mode}')
optimizer = torch.optim.Adam([
{'params': game.sender.parameters(), 'lr': opts.sender_lr},
{'params': game.receiver.parameters(), 'lr': opts.receiver_lr}
])
callbacks = [core.ConsoleLogger(as_json=True)]
if opts.mode.lower() == 'gs':
callbacks.append(core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1))
trainer = core.Trainer(game=game, optimizer=optimizer,
train_data=train_data, validation_data=validation_data, callbacks=callbacks)
trainer.train(n_epochs=opts.n_epochs)
if opts.evaluate:
is_gs = opts.mode == 'gs'
sender_inputs, messages, receiver_inputs, receiver_outputs, labels = dump_sender_receiver(game, test_data, is_gs, variable_length=True, device=device)
receiver_outputs = move_to(receiver_outputs, device)
labels = move_to(labels, device)
receiver_outputs = torch.stack(receiver_outputs)
labels = torch.stack(labels)
tensor_accuracy = receiver_outputs.argmax(dim=1) == labels
accuracy = torch.mean(tensor_accuracy.float()).item()
unique_dict = {}
for elem in sender_inputs:
target = ""
for dim in elem:
target += f'{str(int(dim.item()))}-'
target = target[:-1]
if target not in unique_dict:
unique_dict[target] = True
print(f'| Accuracy on test set: {accuracy}')
compute_mi_input_msgs(sender_inputs, messages)
print(f'entropy sender inputs {entropy(sender_inputs)}')
print(f'mi sender inputs msgs {mutual_info(sender_inputs, messages)}')
if opts.dump_msg_folder:
opts.dump_msg_folder.mkdir(exist_ok=True)
msg_dict = {}
output_msg = f'messages_{opts.perceptual_dimensions}_vocab_{opts.vocab_size}' \
f'_maxlen_{opts.max_len}_bsize_{opts.batch_size}' \
f'_n_distractors_{opts.n_distractors}_train_size_{opts.train_samples}' \
f'_valid_size_{opts.validation_samples}_test_size_{opts.test_samples}' \
f'_slr_{opts.sender_lr}_rlr_{opts.receiver_lr}_shidden_{opts.sender_hidden}' \
f'_rhidden_{opts.receiver_hidden}_semb_{opts.sender_embedding}' \
f'_remb_{opts.receiver_embedding}_mode_{opts.mode}' \
f'_scell_{opts.sender_cell}_rcell_{opts.receiver_cell}.msg'
output_file = opts.dump_msg_folder / output_msg
with open(output_file, 'w') as f:
f.write(f'{opts}\n')
for sender_input, message, receiver_input, receiver_output, label \
in zip(sender_inputs, messages, receiver_inputs, receiver_outputs, labels):
sender_input = ','.join(map(str, sender_input.tolist()))
message = ','.join(map(str, message.tolist()))
distractors_list = receiver_input.tolist()
receiver_input = '; '.join([','.join(map(str, elem)) for elem in distractors_list])
if is_gs: receiver_output = receiver_output.argmax()
f.write(f'{sender_input} -> {receiver_input} -> {message} -> {receiver_output} (label={label.item()})\n')
if message in msg_dict:
msg_dict[message] += 1
else:
msg_dict[message] = 1
sorted_msgs = sorted(msg_dict.items(), key=operator.itemgetter(1), reverse=True)
f.write(f'\nUnique target vectors seen by sender: {len(unique_dict.keys())}\n')
f.write(f'Unique messages produced by sender: {len(msg_dict.keys())}\n')
f.write(f"Messagses: 'msg' : msg_count: {str(sorted_msgs)}\n")
f.write(f'\nAccuracy: {accuracy}')
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| 49.073077
| 203
| 0.62309
|
f2fd2ad184d8dccaf787c971d75b58eb5a7a0ee0
| 1,302
|
py
|
Python
|
setup.py
|
TinkerEdgeT/mendel-mdt
|
f3cc1097ab90ba5713eb3f1769f8c34265ade2fa
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TinkerEdgeT/mendel-mdt
|
f3cc1097ab90ba5713eb3f1769f8c34265ade2fa
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TinkerEdgeT/mendel-mdt
|
f3cc1097ab90ba5713eb3f1769f8c34265ade2fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mendel-development-tool',
version='1.2',
description='A command-line tool to manage Mendel Linux embedded systems',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://coral.googlesource.com/mdt.git',
author='Mendel Linux Software Team',
author_email='coral-support@google.com',
license='Apache 2',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
],
keywords='embedded development',
packages=find_packages(),
python_requires='>=3.5.0',
install_requires=[
'zeroconf>=0.19.1',
'paramiko>=2.0.0'
],
data_files=[('share/man/man1', ['man/mdt.1'])],
entry_points={
'console_scripts': [
'mdt=mdt.main:main',
],
},
)
| 28.933333
| 78
| 0.630568
|
6c6176059aae48f74200c2bd7a74e48f8714c186
| 13,127
|
py
|
Python
|
scripts/train_fcn.py
|
AdityaAS/cycada
|
04e062201ec23829e405639c400ce433d8a6da6a
|
[
"BSD-2-Clause"
] | 1
|
2020-02-22T02:23:56.000Z
|
2020-02-22T02:23:56.000Z
|
scripts/train_fcn.py
|
AdityaAS/cycada
|
04e062201ec23829e405639c400ce433d8a6da6a
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/train_fcn.py
|
AdityaAS/cycada
|
04e062201ec23829e405639c400ce433d8a6da6a
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
sys.path.append('./')
import logging
import os
from os.path import join, exists
from collections import deque
import click
import math
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from PIL import Image
from copy import copy
from torch.autograd import Variable
print(os.getcwd())
import shutil
from cycada.data.data_loader import get_fcn_dataset as get_fcn_dataset
from cycada.models import get_model
from cycada.models.models import models
from cycada.transforms import augment_collate
from cycada.util import config_logging
from cycada.util import to_tensor_raw
from cycada.util import roundrobin_infinite
from cycada.util import preprocess_viz
from cycada.tools.util import make_variable
from cycada.loss_fns import supervised_loss
from cycada.metrics import fast_hist
from cycada.metrics import result_stats
from cycada.metrics import sklearnScores
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--load', dest='load', type=str, help="path to load model", default=False)
parser.add_argument('--c', dest='config', type=str, help="Config file", default=False)
args = parser.parse_args()
def main(config_path):
config = None
config_file = config_path.split('/')[-1]
version = config_file.split('.')[0][1:]
with open(config_path, 'r') as f:
config = json.load(f)
config["version"] = version
config_logging()
# Initialize SummaryWriter - For tensorboard visualizations
logdir = 'runs/{:s}/{:s}/{:s}/{:s}'.format(config["model"], config["dataset"], 'v{}'.format(config["version"]), 'tflogs')
logdir = logdir + "/"
checkpointdir = join('runs', config["model"], config["dataset"], 'v{}'.format(config["version"]), 'checkpoints')
print("Logging directory: {}".format(logdir))
print("Checkpoint directory: {}".format(checkpointdir))
versionpath = join('runs', config["model"], config["dataset"], 'v{}'.format(config["version"]))
if not exists(versionpath):
os.makedirs(versionpath)
os.makedirs(checkpointdir)
os.makedirs(logdir)
elif exists(versionpath) and config["force"]:
shutil.rmtree(versionpath)
os.makedirs(versionpath)
os.makedirs(checkpointdir)
os.makedirs(logdir)
else:
print("Version {} already exists! Please run with different version number".format(config["version"]))
logging.info("Version {} already exists! Please run with different version number".format(config["version"]))
sys.exit(-1)
writer = SummaryWriter(logdir)
# Get appropriate model based on config parameters
net = get_model(config["model"], num_cls=config["num_cls"])
if args.load:
net.load_state_dict(torch.load(args.load))
print("============ Loading Model ===============")
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
dataset = config["dataset"]
num_workers = config["num_workers"]
pin_memory = config["pin_memory"]
dataset = dataset[0]
datasets_train = get_fcn_dataset(config["dataset"], config["data_type"], join(config["datadir"], config["dataset"]), split='train')
datasets_val = get_fcn_dataset(config["dataset"], config["data_type"], join(config["datadir"], config["dataset"]), split='val')
datasets_test = get_fcn_dataset(config["dataset"], config["data_type"], join(config["datadir"], config["dataset"]), split='test')
if config["weights"] is not None:
weights = np.loadtxt(config["weights"])
opt = torch.optim.SGD(net.parameters(), lr=config["lr"], momentum=config["momentum"],
weight_decay=0.0005)
if config["augmentation"]:
collate_fn = lambda batch: augment_collate(batch, crop=config["crop_size"], flip=True)
else:
collate_fn = torch.utils.data.dataloader.default_collate
train_loader = torch.utils.data.DataLoader(datasets_train, batch_size=config["batch_size"],
shuffle=True, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory)
# val_loader = torch.utils.data.DataLoader(datasets_val, batch_size=config["batch_size"],
# shuffle=True, num_workers=num_workers,
# collate_fn=collate_fn,
# pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(datasets_test, batch_size=config["batch_size"],
shuffle=False, num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory)
data_metric = {'train': None, 'val' : None, 'test' : None}
Q_size = len(train_loader)/config["batch_size"]
metrics = {'losses': list(), 'ious': list(), 'recalls': list()}
data_metric['train'] = copy(metrics)
data_metric['val'] = copy(metrics)
data_metric['test'] = copy(metrics)
num_cls = config["num_cls"]
hist = np.zeros((num_cls, num_cls))
iteration = 0
for epoch in range(config["num_epochs"]+1):
if config["phase"] == 'train':
net.train()
iterator = tqdm(iter(train_loader))
# Epoch train
print("Train Epoch!")
for im, label in iterator:
if torch.isnan(im).any() or torch.isnan(label).any():
import pdb; pdb.set_trace();
iteration += 1
# Clear out gradients
opt.zero_grad()
# load data/label
im = make_variable(im, requires_grad=False)
label = make_variable(label, requires_grad=False)
#print(im.size())
# forward pass and compute loss
preds = net(im)
#score = preds.data
#_, pred = torch.max(score, 1)
#hist += fast_hist(label.cpu().numpy().flatten(), pred.cpu().numpy().flatten(),num_cls)
#acc_overall, acc_percls, iu, fwIU = result_stats(hist)
loss = supervised_loss(preds, label)
# iou = jaccard_score(preds, label)
precision, rc, fscore, support, iou = sklearnScores(preds, label.type(torch.IntTensor))
#print(acc_overall, np.nanmean(acc_percls), np.nanmean(iu), fwIU)
# backward pass
loss.backward()
# TODO: Right now this is running average, ideally we want true average. Make that change
# Total average will be memory intensive, let it be running average for the moment.
data_metric['train']['losses'].append(loss.item())
data_metric['train']['ious'].append(iou)
data_metric['train']['recalls'].append(rc)
# step gradients
opt.step()
# Train visualizations - each iteration
if iteration % config["train_tf_interval"] == 0:
vizz = preprocess_viz(im, preds, label)
writer.add_scalar('train/loss', loss, iteration)
writer.add_scalar('train/IOU', iou, iteration)
writer.add_scalar('train/recall', rc, iteration)
imutil = vutils.make_grid(torch.from_numpy(vizz), nrow=3, normalize=True, scale_each=True)
writer.add_image('{}_image_data'.format('train'), imutil, iteration)
iterator.set_description("TRAIN V: {} | Epoch: {}".format(config["version"], epoch))
iterator.refresh()
if iteration % 20000 == 0:
torch.save(net.state_dict(), join(checkpointdir, 'iter_{}_{}.pth'.format(iteration, epoch)))
# clean before test/val
opt.zero_grad()
# Train visualizations - per epoch
vizz = preprocess_viz(im, preds, label)
writer.add_scalar('trainepoch/loss', np.mean(data_metric['train']['losses']), global_step=epoch)
writer.add_scalar('trainepoch/IOU', np.mean(data_metric['train']['ious']), global_step=epoch)
writer.add_scalar('trainepoch/recall', np.mean(data_metric['train']['recalls']), global_step=epoch)
imutil = vutils.make_grid(torch.from_numpy(vizz), nrow=3, normalize=True, scale_each=True)
writer.add_image('{}_image_data'.format('trainepoch'), imutil, global_step=epoch)
print("Loss :{}".format(np.mean(data_metric['train']['losses'])))
print("IOU :{}".format(np.mean(data_metric['train']['ious'])))
print("recall :{}".format(np.mean(data_metric['train']['recalls'])))
if epoch % config["checkpoint_interval"] == 0:
torch.save(net.state_dict(), join(checkpointdir, 'iter{}.pth'.format(epoch)))
# Train epoch done. Free up lists
for key in data_metric['train'].keys():
data_metric['train'][key] = list()
if epoch % config["val_epoch_interval"] == 0:
net.eval()
print("Val_epoch!")
iterator = tqdm(iter(val_loader))
for im, label in iterator:
# load data/label
im = make_variable(im, requires_grad=False)
label = make_variable(label, requires_grad=False)
# forward pass and compute loss
preds = net(im)
loss = supervised_loss(preds, label)
precision, rc, fscore, support, iou = sklearnScores(preds, label.type(torch.IntTensor))
data_metric['val']['losses'].append(loss.item())
data_metric['val']['ious'].append(iou)
data_metric['val']['recalls'].append(rc)
iterator.set_description("VAL V: {} | Epoch: {}".format(config["version"], epoch))
iterator.refresh()
# Val visualizations
vizz = preprocess_viz(im, preds, label)
writer.add_scalar('valepoch/loss', np.mean(data_metric['val']['losses']), global_step=epoch)
writer.add_scalar('valepoch/IOU', np.mean(data_metric['val']['ious']), global_step=epoch)
writer.add_scalar('valepoch/Recall', np.mean(data_metric['val']['recalls']), global_step=epoch)
imutil = vutils.make_grid(torch.from_numpy(vizz), nrow=3, normalize=True, scale_each=True)
writer.add_image('{}_image_data'.format('val'), imutil, global_step=epoch)
# Val epoch done. Free up lists
for key in data_metric['val'].keys():
data_metric['val'][key] = list()
# Epoch Test
if epoch % config["test_epoch_interval"] == 0:
net.eval()
print("Test_epoch!")
iterator = tqdm(iter(test_loader))
for im, label in iterator:
# load data/label
im = make_variable(im, requires_grad=False)
label = make_variable(label, requires_grad=False)
# forward pass and compute loss
preds = net(im)
loss = supervised_loss(preds, label)
precision, rc, fscore, support, iou = sklearnScores(preds, label.type(torch.IntTensor))
data_metric['test']['losses'].append(loss.item())
data_metric['test']['ious'].append(iou)
data_metric['test']['recalls'].append(rc)
iterator.set_description("TEST V: {} | Epoch: {}".format(config["version"], epoch))
iterator.refresh()
# Test visualizations
writer.add_scalar('testepoch/loss', np.mean(data_metric['test']['losses']), global_step=epoch)
writer.add_scalar('testepoch/IOU', np.mean(data_metric['test']['ious']), global_step=epoch)
writer.add_scalar('testepoch/Recall', np.mean(data_metric['test']['recalls']), global_step=epoch)
# Test epoch done. Free up lists
for key in data_metric['test'].keys():
data_metric['test'][key] = list()
if config["step"] is not None and epoch % config["step"] == 0:
logging.info('Decreasing learning rate by 0.1 factor')
step_lr(optimizer, 0.1)
logging.info('Optimization complete.')
if __name__ == '__main__':
p = args.config#sys.argv[1]
config_path = join('./configs/fcn/', p)
if exists(config_path):
main(config_path)
else :
print(p)
print("Incorrect Path")
| 43.756667
| 135
| 0.588101
|
0d1bb9801bebf3c964f56b4337529760ca807ee1
| 2,834
|
py
|
Python
|
onnxruntime/test/testdata/transform/noop-add.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | 6,036
|
2019-05-07T06:03:57.000Z
|
2022-03-31T17:59:54.000Z
|
onnxruntime/test/testdata/transform/noop-add.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | 5,730
|
2019-05-06T23:04:55.000Z
|
2022-03-31T23:55:56.000Z
|
onnxruntime/test/testdata/transform/noop-add.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | 1,566
|
2019-05-07T01:30:07.000Z
|
2022-03-31T17:06:50.000Z
|
import onnx
from onnx import helper
from onnx import TensorProto, OperatorSetIdProto
opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
onnxdomain.domain = "" # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)
msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = 'com.microsoft'
opsets.append(msdomain)
kwargs={}
kwargs['opset_imports'] = opsets
def GenerateModel(model_name):
nodes = [ # subgraph
# float
helper.make_node("Identity", ["X1"], ["id_1"], "id_1"),
helper.make_node("Add", ["float_1", "id_1"], ["add_1"], "add_1"),
helper.make_node("Identity", ["add_1"], ["Y1"], "id_2"),
# float_16
helper.make_node("Identity", ["X2"], ["id_3"], "id_3"),
helper.make_node("Add", ["float16_1", "id_3"], ["add_2"], "add_2"),
helper.make_node("Identity", ["add_2"], ["Y2"], "id_4"),
# int64 - flip the input 0 and 1
helper.make_node("Identity", ["X3"], ["id_5"], "id_5"),
helper.make_node("Add", ["id_5", "int64_1"], ["add_3"], "add_3"),
helper.make_node("Identity", ["add_3"], ["Y3"], "id_6"),
# int64
helper.make_node("Identity", ["X4"], ["id_7"], "id_7"),
helper.make_node("Add", ["id_7", "int64_2"], ["add_4"], "add_4"),
helper.make_node("Identity", ["add_4"], ["Y4"], "id_8"),
]
inputs = [ # inputs
helper.make_tensor_value_info('X1', TensorProto.FLOAT, ['M', 'K']),
helper.make_tensor_value_info('X2', TensorProto.FLOAT16, ['M', 'K']),
helper.make_tensor_value_info('X3', TensorProto.INT64, ['M', 'K']),
helper.make_tensor_value_info('X4', TensorProto.INT64, ['M', 'K']),
]
initializers = [
helper.make_tensor('float_1', TensorProto.FLOAT, [1], [0.0]),
helper.make_tensor('float16_1', TensorProto.FLOAT16, [1], [0]),
# int64 - set tensor size to 0
helper.make_tensor('int64_1', TensorProto.INT64, (), [0]),
# higher rank
helper.make_tensor('int64_2', TensorProto.INT64, [1,1,1], [0]),
]
graph = helper.make_graph(
nodes,
"NoopAdd", #name
inputs,
[ # outputs
helper.make_tensor_value_info('Y1', TensorProto.FLOAT, ['M', 'K']),
helper.make_tensor_value_info('Y2', TensorProto.FLOAT16, ['M', 'K']),
helper.make_tensor_value_info('Y3', TensorProto.INT64, ['M', 'K']),
helper.make_tensor_value_info('Y4', TensorProto.INT64, ['M', 'K', 1]),
],
initializers)
model = helper.make_model(graph, **kwargs)
onnx.save(model, model_name)
if __name__ == "__main__":
GenerateModel('noop-add.onnx')
| 39.915493
| 147
| 0.586097
|
2c73f13b4a1a221a17489fe7dd8b692be8fb1e8e
| 4,839
|
py
|
Python
|
experiments/full_simulation/full_forearm_simulation.py
|
CAMI-DKFZ/simpa_paper_experiments
|
f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc
|
[
"MIT"
] | null | null | null |
experiments/full_simulation/full_forearm_simulation.py
|
CAMI-DKFZ/simpa_paper_experiments
|
f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc
|
[
"MIT"
] | null | null | null |
experiments/full_simulation/full_forearm_simulation.py
|
CAMI-DKFZ/simpa_paper_experiments
|
f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import simpa as sp
from simpa import Tags
import numpy as np
import matplotlib.pyplot as plt
from utils.save_directory import get_save_path
from utils.create_example_tissue import create_example_tissue
from utils.basic_settings import create_basic_optical_settings, create_basic_acoustic_settings, \
create_basic_reconstruction_settings
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
VOLUME_WIDTH_HEIGHT_DIM_IN_MM = 50
VOLUME_PLANAR_DIM_IN_MM = 50
SPACING = 0.3
RANDOM_SEED = 24618925
path_manager = sp.PathManager()
SAVE_PATH = get_save_path("full_simulation", "forearm")
# Seed the numpy random configuration prior to creating the global_settings file in
# order to ensure that the same volume
# is generated with the same random seed every time.
np.random.seed(RANDOM_SEED)
VOLUME_NAME = "ForearmScan" + str(RANDOM_SEED)
file_path = SAVE_PATH + "/" + VOLUME_NAME + ".hdf5"
settings = {
# These parameters set he general propeties of the simulated volume
Tags.RANDOM_SEED: RANDOM_SEED,
Tags.VOLUME_NAME: VOLUME_NAME,
Tags.SIMULATION_PATH: SAVE_PATH,
Tags.SPACING_MM: SPACING,
Tags.WAVELENGTHS: [800],
Tags.DIM_VOLUME_Z_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,
Tags.DIM_VOLUME_X_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,
Tags.DIM_VOLUME_Y_MM: VOLUME_PLANAR_DIM_IN_MM,
Tags.VOLUME_CREATOR: Tags.VOLUME_CREATOR_VERSATILE,
Tags.GPU: True
}
settings = sp.Settings(settings)
settings.set_volume_creation_settings({
Tags.STRUCTURES: create_example_tissue(),
Tags.SIMULATE_DEFORMED_LAYERS: True
})
settings.set_optical_settings(create_basic_optical_settings(path_manager))
settings.set_acoustic_settings(create_basic_acoustic_settings(path_manager))
settings.set_reconstruction_settings(create_basic_reconstruction_settings(path_manager, SPACING))
settings["noise_time_series"] = {
Tags.NOISE_STD: 100,
Tags.NOISE_MODE: Tags.NOISE_MODE_ADDITIVE,
Tags.DATA_FIELD: Tags.DATA_FIELD_TIME_SERIES_DATA
}
device = sp.PhotoacousticDevice(device_position_mm=np.asarray([VOLUME_WIDTH_HEIGHT_DIM_IN_MM/2,
VOLUME_PLANAR_DIM_IN_MM/2, 0]))
device.set_detection_geometry(sp.LinearArrayDetectionGeometry(device_position_mm=np.asarray([
VOLUME_WIDTH_HEIGHT_DIM_IN_MM/2, VOLUME_PLANAR_DIM_IN_MM/2, 0]),
number_detector_elements=256,
pitch_mm=0.15))
device.add_illumination_geometry(sp.GaussianBeamIlluminationGeometry(beam_radius_mm=25))
SIMUATION_PIPELINE = [
sp.ModelBasedVolumeCreationAdapter(settings),
sp.MCXAdapter(settings),
sp.KWaveAdapter(settings),
sp.GaussianNoise(settings, "noise_time_series"),
sp.DelayAndSumAdapter(settings),
sp.FieldOfViewCropping(settings)
]
sp.simulate(SIMUATION_PIPELINE, settings, device)
wavelength = settings[Tags.WAVELENGTHS][0]
segmentation_mask = sp.load_data_field(file_path=file_path,
wavelength=wavelength,
data_field=Tags.DATA_FIELD_SEGMENTATION)
reco = np.rot90(sp.load_data_field(file_path, wavelength=wavelength, data_field=Tags.DATA_FIELD_RECONSTRUCTED_DATA), -1)
time_series = np.rot90(sp.load_data_field(file_path, wavelength=wavelength, data_field=Tags.DATA_FIELD_TIME_SERIES_DATA), -1)
initial_pressure = np.rot90(sp.load_data_field(file_path, wavelength=wavelength, data_field=Tags.DATA_FIELD_INITIAL_PRESSURE), -1)
plt.figure(figsize=(7, 3))
plt.subplot(1, 3, 1)
plt.axis('off')
plt.imshow(initial_pressure)
plt.subplot(1, 3, 2)
plt.axis('off')
plt.imshow(time_series, aspect=0.18)
plt.subplot(1, 3, 3)
plt.axis('off')
plt.imshow(reco)
plt.tight_layout()
plt.savefig(os.path.join(SAVE_PATH, "result.svg"), dpi=300)
plt.close()
# fig = plt.figure(figsize=(7, 7))
# ax = fig.add_subplot(111, projection='3d')
# ax.voxels(segmentation_mask==SegmentationClasses.EPIDERMIS, shade=True, facecolors="brown", alpha=0.45)
# ax.voxels(segmentation_mask==SegmentationClasses.DERMIS, shade=True, facecolors="pink", alpha=0.45)
# ax.voxels(segmentation_mask==SegmentationClasses.FAT, shade=True, facecolors="yellow", alpha=0.45)
# ax.voxels(segmentation_mask==SegmentationClasses.BLOOD, shade=True, facecolors="red", alpha=0.55)
# ax.voxels(segmentation_mask==SegmentationClasses.BONE, shade=True, facecolors="antiquewhite", alpha=0.55)
# ax.set_aspect('auto')
# ax.set_zlim(50, 0)
# ax.set_zlabel("Depth [mm]")
# ax.set_xlabel("x width [mm]")
# ax.set_ylabel("y width [mm]")
# ax.view_init(elev=10., azim=-45)
# plt.savefig(os.path.join(SAVE_PATH, "forearm.svg"), dpi=300)
# plt.show()
| 39.341463
| 130
| 0.750775
|
584818d7cfdc28a3e1184ea9ba1cae97bfccbe35
| 1,404
|
py
|
Python
|
mne_bids/commands/run.py
|
zuxfoucault/mne-bids
|
5c54442bced9b1bdcd4a77c9f5a347dc4b8c552d
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/run.py
|
zuxfoucault/mne-bids
|
5c54442bced9b1bdcd4a77c9f5a347dc4b8c552d
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/run.py
|
zuxfoucault/mne-bids
|
5c54442bced9b1bdcd4a77c9f5a347dc4b8c552d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Command Line Interface for MNE-BIDS."""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import sys
import glob
import subprocess
import os.path as op
import mne_bids
mne_bin_dir = op.abspath(op.dirname(mne_bids.__file__))
valid_commands = sorted(glob.glob(op.join(mne_bin_dir,
'commands', 'mne_bids_*.py')))
valid_commands = [c.split(op.sep)[-1][9:-3] for c in valid_commands]
def print_help():
"""Print the help."""
print("Usage : mne_bids command options\n")
print("Accepted commands :\n")
for c in valid_commands:
print("\t- %s" % c)
print("\nExample : mne_bids raw_to_bids --subject_id sub01 --task rest",
"--raw_file data.edf --output_path new_path")
sys.exit(0)
def main():
if len(sys.argv) == 1:
print_help()
elif ("help" in sys.argv[1] or "-h" in sys.argv[1]):
print_help()
elif sys.argv[1] == "--version":
print("MNE-BIDS %s" % mne_bids.__version__)
elif sys.argv[1] not in valid_commands:
print('Invalid command: "%s"\n' % sys.argv[1])
print_help()
sys.exit(0)
else:
cmd = sys.argv[1]
cmd_path = op.join(mne_bin_dir, 'commands', 'mne_bids_%s.py' % cmd)
sys.exit(subprocess.call([sys.executable, cmd_path] + sys.argv[2:]))
| 30.521739
| 76
| 0.61396
|
e1be2452850c8da7b6ce5748fbf55dfbc9fce500
| 5,255
|
py
|
Python
|
conf.py
|
jmbowman/managing_python_package_dependencies
|
b1cb94a490f653a14803a869f1f81128bba12356
|
[
"CC-BY-4.0"
] | null | null | null |
conf.py
|
jmbowman/managing_python_package_dependencies
|
b1cb94a490f653a14803a869f1f81128bba12356
|
[
"CC-BY-4.0"
] | null | null | null |
conf.py
|
jmbowman/managing_python_package_dependencies
|
b1cb94a490f653a14803a869f1f81128bba12356
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# flaky_bok-choy_talk documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 20 14:05:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxjp.themes.revealjs'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Managing Python Package Dependencies'
copyright = '2018, Jeremy Bowman'
author = 'Jeremy Bowman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'revealjs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'fragments': True,
'lang': 'en',
'theme': 'night',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
html_title = 'Managing Python Package Dependencies'
# If false, no index is generated.
html_use_index = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'managing_python_package_dependencies_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'managing_python_package_dependencies.tex', 'Managing Python Package Dependencies',
'Jeremy Bowman', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'managing_python_package_dependencies', 'Managing Python Package Dependencies',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'managing_python_package_dependencies', 'Managing Python Package Dependencies',
author, 'managing_python_package_dependencies', 'June 2018 Boston Django Meetup presentation',
'Miscellaneous'),
]
| 30.911765
| 100
| 0.693054
|
4e90af5d3be05b07fad62a428d2a9162dd378a9e
| 4,992
|
py
|
Python
|
NVIDIA/benchmarks/deepcam/implementations/pytorch/src/deepCam/utils/parser.py
|
mlcommons/hpc_results_v1.0
|
a3f7469937aa44a48e186160a2e97464970cf72f
|
[
"Apache-2.0"
] | 3
|
2021-11-18T20:01:35.000Z
|
2021-12-17T17:47:23.000Z
|
NVIDIA/benchmarks/deepcam/implementations/pytorch/src/deepCam/utils/parser.py
|
mlcommons/hpc_results_v1.0
|
a3f7469937aa44a48e186160a2e97464970cf72f
|
[
"Apache-2.0"
] | 1
|
2022-03-16T07:29:30.000Z
|
2022-03-31T10:19:07.000Z
|
LBNL/benchmarks/deepcam/implementations/deepcam-pytorch/deepCam/utils/parser.py
|
mlcommons/hpc_results_v1.0
|
a3f7469937aa44a48e186160a2e97464970cf72f
|
[
"Apache-2.0"
] | 1
|
2021-11-18T01:53:25.000Z
|
2021-11-18T01:53:25.000Z
|
import argparse as ap
#dict helper for argparse
class StoreDictKeyPair(ap.Action):
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def get_parser():
AP = ap.ArgumentParser()
AP.add_argument("--wireup_method", type=str, default="nccl-openmpi", choices=["dummy", "nccl-file", "nccl-openmpi", \
"nccl-slurm", "nccl-slurm-pmi", "mpi"], help="Specify what is used for wiring up the ranks")
AP.add_argument("--wandb_certdir", type=str, default="/opt/certs", help="Directory in which to find the certificate for wandb logging.")
AP.add_argument("--run_tag", type=str, help="Unique run tag, to allow for better identification")
AP.add_argument("--experiment_id", type=int, default=1, help="Experiment Number")
AP.add_argument("--output_dir", type=str, help="Directory used for storing output. Needs to read/writeable from rank 0")
AP.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to restart training from.")
AP.add_argument("--data_dir_prefix", type=str, default='/', help="prefix to data dir")
AP.add_argument("--max_inter_threads", type=int, default=1, help="Maximum number of concurrent readers")
AP.add_argument("--max_epochs", type=int, default=30, help="Maximum number of epochs to train")
AP.add_argument("--save_frequency", type=int, default=100, help="Frequency with which the model is saved in number of steps")
AP.add_argument("--logging_frequency", type=int, default=100, help="Frequency with which the training progress is logged. If not positive, logging will be disabled")
AP.add_argument("--local_batch_size", type=int, default=1, help="Number of samples per local minibatch")
AP.add_argument("--local_batch_size_validation", type=int, default=1, help="Number of samples per local minibatch for validation")
AP.add_argument("--channels", type=int, nargs='+', default=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], help="Channels used in input")
AP.add_argument("--optimizer", type=str, default="Adam", choices=["Adam", "AdamW", "LAMB", "DistributedLAMB"], help="Optimizer to use (LAMB requires APEX support).")
AP.add_argument("--start_lr", type=float, default=1e-3, help="Start LR")
AP.add_argument("--weight_decay", type=float, default=1e-6, help="Weight decay")
AP.add_argument("--lr_warmup_steps", type=int, default=0, help="Number of steps for linear LR warmup")
AP.add_argument("--lr_warmup_factor", type=float, default=1., help="Multiplier for linear LR warmup")
AP.add_argument("--lr_schedule", action=StoreDictKeyPair)
AP.add_argument("--target_iou", type=float, default=0.82, help="Target IoU score.")
AP.add_argument("--model_prefix", type=str, default="model", help="Prefix for the stored model")
AP.add_argument("--gradient_accumulation_frequency", type=int, default=1, help="Number of gradient accumulation steps before update")
AP.add_argument("--batchnorm_group_size", type=int, default=1, help="Process group size for sync batchnorm")
AP.add_argument("--shuffle_mode", type=str, default="global", choices=["global", "node", "gpu"], help="Specifies how to shuffle the data")
AP.add_argument("--data_format", type=str, default="dali-numpy", choices=["hdf5", "dali-numpy", "dali-recordio", "dali-es", "dali-es-disk", "dali-dummy"], help="Specify data format")
AP.add_argument("--data_cache_directory", type=str, default="/tmp", help="Directory to which the data is cached. Only relevant for dali-es-disk dataloader, ignored otherwise")
AP.add_argument("--data_oversampling_factor", type=int, default=1, help="Determines how many different shard per nodes will be staged")
AP.add_argument("--precision_mode", type=str, default="amp", choices=["fp32", "amp", "fp16"], help="Specify precision format")
AP.add_argument("--enable_gds", action='store_true')
AP.add_argument("--enable_jit", action='store_true')
AP.add_argument("--enable_nhwc", action='store_true')
AP.add_argument("--enable_graph", action='store_true', help="Flag for enabling CUDA graph capture.")
AP.add_argument("--disable_tuning", action='store_true', help="Flag for disabling cuDNN benchmark mode to autotune kernels. Should not be necessary")
AP.add_argument("--force_groupbn", action='store_true')
AP.add_argument("--disable_validation", action='store_true')
AP.add_argument("--disable_comm_overlap", action='store_true')
AP.add_argument("--data_augmentations", type=str, nargs='+', default=[], help="Data augmentations used. Supported are [roll, flip]")
AP.add_argument("--enable_wandb", action='store_true')
AP.add_argument("--resume_logging", action='store_true')
AP.add_argument("--seed", default=333, type=int)
return AP
| 84.610169
| 186
| 0.701723
|
a1002f1f8aac3bf35eab69f2051a21bc53745ec8
| 1,524
|
py
|
Python
|
setup.py
|
MLGB3/mlgb.indexer
|
e3e75620f3a4be01cabeff87555716e9c00599eb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MLGB3/mlgb.indexer
|
e3e75620f3a4be01cabeff87555716e9c00599eb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
MLGB3/mlgb.indexer
|
e3e75620f3a4be01cabeff87555716e9c00599eb
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup
from setuptools import find_packages
version = open(
os.path.join("mlgb", "indexer", "version.txt")
).read().strip()
description = "Indexing scripts for mlgb."
longdesc = open("README.md").read()
longdesc += open(os.path.join("docs", "HISTORY.rst")).read()
setup(
name='mlgb.indexer',
version=version,
description=description,
long_description=longdesc,
author='Michael Davis',
author_email='michael.davis@bodleian.ox.ac.uk',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['mlgb'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'MySQL-python',
],
extras_require={
'test': ['pytest', ],
},
entry_points={
'console_scripts': [
'set_index_book_ids=mlgb.indexer.setIndexBookIDs:setBookIDs',
'strip_xml_comments=mlgb.indexer.stripXMLcomments:stripComments',
'strip_unwanted_tags=mlgb.indexer.stripUnwantedFormatting:stripUnwantedTags',
'write_xml=mlgb.indexer.authortitle_to_xml:writeXML',
'write_html=mlgb.indexer.writeHTML:writeAllHTMLFiles',
'catalogues_html=mlgb.indexer.cataloguesHTML:writeAllHTMLFiles',
'write_static_html=mlgb.indexer.write_static_mlgb:writeStaticHTML',
],
},
classifiers=[
'Programming Language :: Python',
'License :: Other/Proprietary License',
'Development Status :: 3 - Alpha',
],
)
| 30.48
| 89
| 0.665354
|
403d5581afafab673473df67de55d680e1dd80f4
| 6,859
|
py
|
Python
|
scripts/purge_participant_data.py
|
jakehemmerle/beiwe-backend
|
fd7765a348a0d497341cf084bb4da0c0805748bd
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/purge_participant_data.py
|
jakehemmerle/beiwe-backend
|
fd7765a348a0d497341cf084bb4da0c0805748bd
|
[
"BSD-3-Clause"
] | 67
|
2020-05-22T20:34:52.000Z
|
2021-07-28T14:26:57.000Z
|
scripts/purge_participant_data.py
|
tx-covid19/beiwe-backend-serverless
|
b154110c8543e4317d3295c0e8225e9a97b1aad4
|
[
"BSD-3-Clause"
] | 2
|
2020-04-03T23:24:56.000Z
|
2020-04-13T14:29:02.000Z
|
# python 2-3 compatibility section
try:
input = raw_input
except NameError:
pass
import imp as _imp
import json
from datetime import datetime
from sys import argv
from os.path import abspath as _abspath
from pprint import pprint
# modify python path so that this script can be targeted directly but still import everything.
_current_folder_init = _abspath(__file__).rsplit('/', 1)[0]+ "/__init__.py"
_imp.load_source("__init__", _current_folder_init)
# noinspection PyUnresolvedReferences
from config import load_django
from config.settings import S3_BUCKET
from config.constants import CHUNKS_FOLDER, API_TIME_FORMAT
from database.user_models import Participant
from database.data_access_models import ChunkRegistry
from libs.file_processing import unix_time_to_string
from libs.s3 import s3_list_files, s3_list_versions, conn as s3_conn
UNIX_EPOCH_START = datetime(1970,1,1)
DOCUMENTATION = """
This script takes a single command line argument, a file path pointing at a file containing json.
The JSON must look like this:
{
"username_1": "2019-08-24",
"username_2": "2019-08-25"
}
That is a dictionary of usernames mapped to a date in unambiguous "YEAR-MONTH-DAY" format.
You can also supply an argument "-y" to skip the confirmation if you intend to run this in the background.
""".strip()
print() # just a blank line
def humanize_date(date_string):
""" returns dates as in this form: 'August 24 2019' """
return convert_date(date_string).strftime("%B %d %Y")
def convert_date(date_string):
""" transforms the canonical date strings into dates """
try:
return datetime.strptime(date_string, "%Y-%m-%d")
except ValueError as e:
print("woops, the date '%s' is not in YEAR-MONTH-DAY format." % date_string)
exit(1)
def setup():
# determine whether "-y" was provided on the command line
try:
argv.pop(argv.index('-y'))
skip_confirmation = True
except ValueError:
skip_confirmation = False
# basic sanity check
if len(argv) != 2:
print(DOCUMENTATION)
print("you provided %s argument(s)" % len(argv))
print()
exit(1)
else:
try:
with open(argv[1], 'r') as file:
file_json = json.load(file)
except ValueError as e:
print("woops, looks like there is a syntax issue in your JSON, the following error was encountered:")
print(e)
print()
exit(1)
# sort deletees by date.
sorted_data = sorted(file_json.items(), key=lambda x: x[1])
# test thata all the participants exist, exit if they don't
all_patients_exist = True
for patient_id, _ in sorted_data:
if not Participant.objects.filter(patient_id=patient_id).exists():
all_patients_exist = False
print("Participant '%s' does not exist." % patient_id)
if not all_patients_exist:
exit(1)
# print out info for confirmation
for participant_name, date in sorted_data:
print(participant_name, "--", humanize_date(date))
# force user to confirm
if not skip_confirmation:
print()
msg = input(
"I hereby confirm that I want to irreparably delete all data for the above users starting on the day listed. (y/n)\n"
)
if not msg.lower() == "y":
print("Exiting...")
print()
exit(0)
return sorted_data
# delete chunk registries
def delete_chunk_registries(sorted_data):
print()
for patient_id, date in sorted_data:
print("removing ChunkRegistry data for %s..." % patient_id)
date = convert_date(date)
participant = Participant.objects.filter(patient_id=patient_id)
ChunkRegistry.objects.filter(participant=participant, time_bin__gte=date).delete()
def assemble_deletable_files(sorted_data):
deletable_file_paths = []
for patient_id, expunge_start_date in sorted_data:
participant = Participant.objects.get(patient_id=patient_id)
# technically it is a datetime object
expunge_start_date = convert_date(expunge_start_date)
expunge_start_unix_timestamp = int((expunge_start_date - UNIX_EPOCH_START).total_seconds()) * 1000
prefix = str(participant.study.object_id) + "/" + patient_id + "/"
s3_files = s3_list_files(prefix, as_generator=True)
chunks_prefix = CHUNKS_FOLDER + "/" + prefix
s3_chunks_files = s3_list_files(chunks_prefix, as_generator=True)
raw_files = assemble_raw_files(s3_files, expunge_start_unix_timestamp)
chunked_files = assemble_chunked_files(s3_chunks_files, expunge_start_date)
print(
patient_id,
"timestamp: %s, (unixtime: %s): %s files" %
(expunge_start_date, expunge_start_unix_timestamp/1000, len(raw_files) + len(chunked_files))
)
deletable_file_paths.extend(raw_files)
deletable_file_paths.extend(chunked_files)
return deletable_file_paths
def assemble_raw_files(s3_file_paths, expunge_timestamp):
ret = []
for file_path in s3_file_paths:
# there may be some corrupt file paths that has _ instead of /
extracted_timestamp_str = file_path.replace("_", "/").rsplit("/", 1)[1][:-4]
extracted_timestamp_int = int(extracted_timestamp_str)
if len(extracted_timestamp_str) == 10:
extracted_timestamp_int = extracted_timestamp_int * 1000
if expunge_timestamp <= extracted_timestamp_int:
ret.append(file_path)
return ret
def assemble_chunked_files(s3_chunks_files, expunge_start_date):
ret = []
for file_path in s3_chunks_files:
# there may be some corrupt file paths that has _ instead of /
extracted_timestamp_str = file_path.replace("_", "/").rsplit("/", 1)[1][:-4]
extracted_dt = datetime.strptime(extracted_timestamp_str, API_TIME_FORMAT)
if expunge_start_date < extracted_dt:
ret.append(file_path)
return ret
def delete_versions(files_to_delete):
print("Deleting many files, this could take a while...")
for s3_file_path in files_to_delete:
file_args = s3_list_versions(s3_file_path)
print(
"Deleting %s version(s) of %s with the following VersionIds: %s" %
(len(file_args), s3_file_path, ", ".join([f['VersionId'] for f in file_args]) )
)
delete_args = {
"Bucket": S3_BUCKET,
"Delete": {
'Objects': file_args,
'Quiet': False,
},
}
s3_conn.delete_objects(**delete_args)
setup_data = setup()
delete_chunk_registries(setup_data)
print("\nAssembling the files to delete...")
deletable_files = assemble_deletable_files(setup_data)
delete_versions(deletable_files)
| 32.818182
| 129
| 0.675609
|
458fe67bbd87b47d7f491e3094ffd0b215e8a58d
| 10,500
|
py
|
Python
|
sympy/core/tests/test_functions.py
|
fperez/sympy
|
7d8d096215c8f65ba1d4a9c09af78ec0c3844518
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_functions.py
|
fperez/sympy
|
7d8d096215c8f65ba1d4a9c09af78ec0c3844518
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_functions.py
|
fperez/sympy
|
7d8d096215c8f65ba1d4a9c09af78ec0c3844518
|
[
"BSD-3-Clause"
] | 1
|
2021-11-10T06:39:41.000Z
|
2021-11-10T06:39:41.000Z
|
from sympy import Lambda, Symbol, Function, WildFunction, Derivative, sqrt, \
log, exp, Rational, Real, sign, Basic, sin, cos, diff, I, re, im, \
oo, zoo, nan, E, expand, pi, raises, O, Sum
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y
from sympy.core.function import PoleError
def test_log():
assert log(2) > 0
assert log(1).is_zero
assert log(0.5).is_negative == True
def test_exp_log():
x = Symbol("x", real=True)
assert log(exp(x)) == x
assert exp(log(x)) == x
def test_log_expansion():
x = Symbol("x", positive=True)
y = Symbol("y", positive=True)
# ok in interactive, fails in py.test
#assert log(x*y) != log(x)+log(y)
#assert log(x**2) != 2*log(x)
assert log(x*y).expand() == log(x)+log(y)
assert log(x**2).expand() == 2*log(x)
assert (log(x**-5)**-1).expand() == -1/log(x)/5
def test_log_hashing_bug():
x = Symbol("y")
assert x != log(log(x))
assert hash(x) != hash(log(log(x)))
assert log(x) != log(log(log(x)))
e = 1/log(log(x)+log(log(x)))
assert e.base.func is log
e = 1/log(log(x)+log(log(log(x))))
assert e.base.func is log
x = Symbol("x")
e = log(log(x))
assert e.func is log
assert not x.func is log
assert hash(log(log(x))) != hash(x)
assert e != x
def test_sign():
assert sign(log(2)) == 1
def test_exp_bug():
x = Symbol("x")
assert exp(1*log(x)) == x
def test_exp_expand():
x = Symbol("x")
y = Symbol("y")
e = exp(log(Rational(2))*(1+x)-log(Rational(2))*x)
assert e.expand() == 2
assert exp(x+y) != exp(x)*exp(y)
assert exp(x+y).expand() == exp(x)*exp(y)
def test_f_expand_complex():
f = Function('f')
x = Symbol('x', real=True)
z = Symbol('z')
assert f(x).expand(complex=True) == I*im(f(x)) + re(f(x))
assert exp(x).expand(complex=True) == exp(x)
assert exp(I*x).expand(complex=True) == cos(x) + I*sin(x)
assert exp(z).expand(complex=True) == cos(im(z))*exp(re(z)) + \
I*sin(im(z))*exp(re(z))
def test_bug1():
x = Symbol("x")
w = Symbol("w")
e = sqrt(-log(w))
assert e.subs(log(w),-x) == sqrt(x)
e = sqrt(-5*log(w))
assert e.subs(log(w),-x) == sqrt(5*x)
def test_general_function():
nu = Function('nu', nargs=1)
x = Symbol("x")
y = Symbol("y")
e = nu(x)
edx = e.diff(x)
edy = e.diff(y)
edxdx = e.diff(x).diff(x)
edxdy = e.diff(x).diff(y)
assert e == nu(x)
assert edx != nu(x)
assert edx == diff(nu(x), x)
assert edy == 0
assert edxdx == diff(diff(nu(x), x), x)
assert edxdy == 0
def test_function_nargs():
f = Function('f')
x = Symbol('x')
assert f.nargs == None
assert f(x).nargs == 1
assert f(x, x, x, x).nargs == 4
def test_derivative_subs_bug():
x = Symbol("x y")
l = Function('l', nargs=1)
n = Function('n', nargs=1)
e = diff(n(x), x)
assert e.subs(n(x), l(x)) != e
assert e.subs(n(x), l(x)) == diff(l(x), x)
assert e.subs(n(x), -l(x)) == diff(-l(x), x)
assert e.subs(x, y) == diff(n(y), y)
def test_derivative_subs_self_bug():
f = Function('f')
d = diff(f(x), x)
assert d.subs(d, y) == y
def test_derivative_linearity():
x = Symbol("x")
y = Symbol("y")
n = Function('n', nargs=1)
assert diff(-n(x), x) == -diff(n(x), x)
assert diff(8*n(x), x) == 8*diff(n(x), x)
assert diff(8*n(x), x) != 7*diff(n(x), x)
assert diff(8*n(x)*x, x) == 8*n(x) + 8*x*diff(n(x), x)
assert diff(8*n(x)*y*x, x) == 8*y*n(x) + 8*y*x*diff(n(x), x)
def test_derivative_evaluate():
x = Symbol('x')
assert Derivative(sin(x), x) != diff(sin(x), x)
assert Derivative(sin(x), x).doit() == diff(sin(x), x)
f = Function('f')
assert Derivative(Derivative(f(x), x), x) == diff(f(x), x, x)
assert Derivative(sin(x), x, 0) == sin(x)
def test_diff_symbols():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = Function('f')
g = Function('g')
assert diff(f(x, y, z), x, y, z) == Derivative(f(x, y, z), x, y, z)
assert diff(f(x, y, z), x, x, x) == Derivative(f(x, y, z), x, x, x)
assert diff(f(x, y, z), x, 3) == Derivative(f(x, y, z), x, 3)
assert diff([f(x, y, z), g(x, y, z)], [x, y, z, (x, x), (y, 2), (z, 3),
(x, y, z, 2), (x, x, x)]) == \
[[Derivative(f(x, y, z), x), Derivative(f(x, y, z), y),
Derivative(f(x, y, z), z), Derivative(f(x, y, z), x, x),
Derivative(f(x, y, z), y, y), Derivative(f(x, y, z), z, z, z),
Derivative(f(x, y, z), x, y, z, z), Derivative(f(x, y, z), x, x, x)],
[Derivative(g(x, y, z), x), Derivative(g(x, y, z), y),
Derivative(g(x, y, z), z), Derivative(g(x, y, z), x, x),
Derivative(g(x, y, z), y, y), Derivative(g(x, y, z), z, z, z),
Derivative(g(x, y, z), x, y, z, z), Derivative(g(x, y, z), x, x, x)]]
@XFAIL
def test_combine():
# XXX combine no longer exists
x = Symbol("x")
y = Symbol("y")
assert exp(x)*exp(-x) != 1
assert (exp(x)*exp(-x)).combine() == 1
assert exp(x)**2 != exp(2*x)
assert (exp(x)**2).combine() == exp(2*x)
assert exp(x)*exp(-x/2)*exp(-x/2) != 1
assert (exp(x)*exp(-x/2)*exp(-x/2)).combine() == 1
assert (2*log(x)).combine() == log(x**2)
assert exp(2*log(x)) != x**2
assert exp(2*log(x)).combine() == x**2
assert exp(x)*exp(-x)-1 !=0
assert (exp(x)*exp(-x)-1).combine() == 0
assert (2*exp(x)*exp(-x)).combine() == 2
assert (x/exp(x)*exp(-x)).combine() == x*exp(-2*x)
def test_Lambda():
e = Lambda(x, x**2)
f = Function('f')
assert e(4) == 16
assert e(x) == x**2
assert e(y) == y**2
assert Lambda(x, x**2) == Lambda(x, x**2)
assert Lambda(x, x**2) == Lambda(y, y**2)
assert Lambda(x, x**2) != Lambda(y, y**2+1)
assert Lambda(x,y,x**y) == Lambda(y,x,y**x)
assert Lambda(x,y,x**y) != Lambda(x,y,y**x)
assert Lambda(x,y,x**y)(x,y) == x**y
assert Lambda(x,y,x**y)(x) == Lambda(y,x**y)
assert Lambda(x,y,x**y)(x)(y) == x**y
assert Lambda(x,y,x**y)(x)(3) == x**3
assert Lambda(x,y,x**y)(3)(y) == 3**y
assert Lambda(x,y,x**y)(3)(3) == 3**3
assert Lambda(x,y,x**y)(3,3) == 3**3
assert Lambda(x,y,x**y)(x,3) == x**3
assert Lambda(x,y,x**y)(3,y) == 3**y
assert Lambda(x,f(x))(x) == f(x)
assert Lambda(x,f(x))() == Lambda(x,f(x))
assert Lambda(x,x**2)(e(x)) == x**4
assert e(e(x)) == x**4
assert Lambda(x,y,f(x)+f(y))(x) == Lambda(y,f(x)+f(y))
#doesn't work yet:
#class F(Function):
# pass
#assert Lambda(x, F(x)) == F
assert Lambda(x, y, x+y).nargs == 2
z = Symbol('z')
t = Symbol('t')
p = x, y, z, t
assert Lambda(p, t*(x+y+z))(*p) == t * (x + y + z)
def test_expand_function():
assert expand(x+y) == x + y
assert expand(x+y, complex=True) == I*im(x) + I*im(y) + re(x) + re(y)
def test_function_comparable():
x = Symbol('x')
assert sin(x).is_comparable == False
assert cos(x).is_comparable == False
assert sin(Real('0.1')).is_comparable == True
assert cos(Real('0.1')).is_comparable == True
assert sin(E).is_comparable == True
assert cos(E).is_comparable == True
assert sin(Rational(1,3)).is_comparable == True
assert cos(Rational(1,3)).is_comparable == True
@XFAIL
def test_function_comparable_fail():
x = Symbol('x')
assert sin(oo).is_comparable == False
assert sin(-oo).is_comparable == False
assert sin(zoo).is_comparable == False
assert sin(nan).is_comparable == False
def test_deriv1():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert f(g(x)).diff(x) == Derivative(f(g(x)), g(x)) * Derivative(g(x), x)
def test_deriv2():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert f(x).diff(x) == Derivative(f(x), x)
assert f(2*x).diff(x) == 2*Derivative(f(2*x), 2*x)
assert (f(x)**3).diff(x) == 3*f(x)**2*f(x).diff(x)
assert (f(2*x)**3).diff(x) == 6*f(2*x)**2*Derivative(f(2*x), 2*x)
assert f(2+x).diff(x) == Derivative(f(2+x), 2+x)
assert f(2+3*x).diff(x) == 3*Derivative(f(2+3*x), 2+3*x)
assert f(sin(x)).diff(x) == Derivative(f(sin(x)), sin(x)) * cos(x)
assert f(3*sin(x)).diff(x) == 3*Derivative(f(3*sin(x)), 3*sin(x)) * cos(x)
def test_deriv3():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert (x**3).diff(x) == 3*x**2
assert (x**3).diff(x, evaluate=False) != 3*x**2
assert (x**3).diff(x, evaluate=False) == Derivative(x**3, x)
assert diff(x**3, x) == 3*x**2
assert diff(x**3, x, evaluate=False) != 3*x**2
assert diff(x**3, x, evaluate=False) == Derivative(x**3, x)
def test_suppressed_evaluation():
a = sin(0, evaluate=False)
assert a != 0
assert a.func is sin
assert a.args == (0,)
def test_function_evalf():
def eq(a,b,eps):
return abs(a-b) < eps
assert eq(sin(1).evalf(15), Real("0.841470984807897"), 1e-13)
assert eq(sin(2).evalf(25), Real("0.9092974268256816953960199",25), 1e-23)
assert eq(sin(1+I).evalf(15), Real("1.29845758141598") + Real("0.634963914784736")*I, 1e-13)
assert eq(exp(1+I).evalf(15), Real("1.46869393991588") + Real("2.28735528717884239")*I, 1e-13)
assert eq(exp(-0.5+1.5*I).evalf(15), Real("0.0429042815937374") + Real("0.605011292285002")*I, 1e-13)
assert eq(log(pi+sqrt(2)*I).evalf(15), Real("1.23699044022052") + Real("0.422985442737893")*I, 1e-13)
assert eq(cos(100).evalf(15), Real("0.86231887228768"), 1e-13)
def test_extensibility_eval():
class MyFunc(Function):
@classmethod
def eval(cls, *args):
return (0,0,0)
assert MyFunc(0) == (0,0,0)
def test_function_non_commutative():
x = Symbol('x', commutative=False)
f = Function('f')
assert f(x).is_commutative == False
assert sin(x).is_commutative == False
assert exp(x).is_commutative == False
assert log(x).is_commutative == False
def test_function__eval_nseries():
x = Symbol('x')
assert sin(x)._eval_nseries(x,0,2) == x + O(x**2)
assert sin(x+1)._eval_nseries(x,0,2) == x*cos(1) + sin(1) + O(x**2)
assert sin(pi*(1-x))._eval_nseries(x,0,2) == pi*x + O(x**2)
raises(PoleError, 'sin(1/x)._eval_nseries(x,0,2)')
def test_doit():
n = Symbol('n', integer = True)
f = Sum(2 * n * x, (n, 1, 3))
d = Derivative(f, x)
assert d.doit() == 12
assert d.doit(deep = False) == d
| 30.523256
| 105
| 0.544
|
af14d4c5be099018263b5d35fc0360bbace84777
| 4,137
|
py
|
Python
|
hotbox_designer/templates.py
|
dgirondi/hotbox_designer
|
2fb3f4ee01662d7607888d3f43721c5c0123068f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
hotbox_designer/templates.py
|
dgirondi/hotbox_designer
|
2fb3f4ee01662d7607888d3f43721c5c0123068f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
hotbox_designer/templates.py
|
dgirondi/hotbox_designer
|
2fb3f4ee01662d7607888d3f43721c5c0123068f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# coding=utf-8
SQUARE_BUTTON = {
'shape': 'square', # or round
'shape.left': 0.0,
'shape.top': 0.0,
'shape.width': 120.0,
'shape.height': 25.0,
'border': True,
'borderwidth.normal': 1.0,
'borderwidth.hovered': 1.25,
'borderwidth.clicked': 2,
'bordercolor.normal': '#000000',
'bordercolor.hovered': '#393939',
'bordercolor.clicked': '#FFFFFF',
'bordercolor.transparency': 0,
'bgcolor.normal': '#888888',
'bgcolor.hovered': '#AAAAAA',
'bgcolor.clicked': '#DDDDDD',
'bgcolor.transparency': 0,
'text.content': 'Button',
'text.size': 12,
'text.bold': False,
'text.italic': False,
'text.color': '#FFFFFF',
'text.valign': 'center', # or 'top' or bottom
'text.halign': 'center', # or 'left' or 'right'
'action.left': True,
'action.left.close': False,
'action.left.language': 'python', # or mel
'action.left.command': '',
'action.right': False,
'action.right.close': False,
'action.right.language': 'python', # or mel
'action.right.command': '',
'image.path': '',
'image.fit': True,
'image.height': 32,
'image.width': 32}
TEXT = {
'shape': 'square', # or round
'shape.left': 0.0,
'shape.top': 0.0,
'shape.width': 200.0,
'shape.height': 50.0,
'border': False,
'borderwidth.normal': 0,
'borderwidth.hovered': 0,
'borderwidth.clicked': 0,
'bordercolor.normal': '#000000',
'bordercolor.hovered': '#393939',
'bordercolor.clicked': '#FFFFFF',
'bordercolor.transparency': 0,
'bgcolor.normal': '#888888',
'bgcolor.hovered': '#AAAAAA',
'bgcolor.clicked': '#DDDDDD',
'bgcolor.transparency': 255,
'text.content': 'Text',
'text.size': 16,
'text.bold': True,
'text.italic': False,
'text.color': '#FFFFFF',
'text.valign': 'top', # or 'top' or bottom
'text.halign': 'left', # or 'left' or 'right'
'action.left': False,
'action.left.close': False,
'action.left.language': 'python', # or mel
'action.left.command': '',
'action.right': False,
'action.right.close': False,
'action.right.language': 'python', # or mel
'action.right.command': '',
'image.path': '',
'image.fit': False,
'image.height': 32,
'image.width': 32}
BACKGROUND = {
'shape': 'square', # or round
'shape.left': 0.0,
'shape.top': 0.0,
'shape.width': 400.0,
'shape.height': 400.0,
'border': False,
'borderwidth.normal': 0,
'borderwidth.hovered': 0,
'borderwidth.clicked': 0,
'bordercolor.normal': '#888888',
'bordercolor.hovered': '#888888',
'bordercolor.clicked': '#888888',
'bordercolor.transparency': 0,
'bgcolor.normal': '#888888',
'bgcolor.hovered': '#888888',
'bgcolor.clicked': '#888888',
'bgcolor.transparency': 0,
'text.content': '',
'text.size': 12,
'text.bold': False,
'text.italic': False,
'text.color': '#FFFFFF',
'text.valign': 'center', # or 'top' or bottom
'text.halign': 'center', # or 'left' or 'right'
'action.left': False,
'action.left.close': False,
'action.left.language': 'python', # or mel
'action.left.command': '',
'action.right': False,
'action.right.close': False,
'action.right.language': 'python', # or mel
'action.right.command': '',
'image.path': '',
'image.fit': False,
'image.height': 32,
'image.width': 32}
HOTBOX = {
'name': '',
'triggering': 'click only', # or 'click or close',
'aiming': False,
'centerx': 450,
'centery': 300,
'width': 900,
'height': 600,
'submenu': False,
'leaveclose': False
}
| 32.574803
| 59
| 0.505439
|
9233da3c979679e4cf1d5214def6de21f2024e1b
| 3,046
|
py
|
Python
|
chaingreen/rpc/rpc_client.py
|
todortron/chaingreen-blockchain
|
89fe435e5dc87de4a7bb4d64c1ad335d81f24b95
|
[
"Apache-2.0"
] | 1
|
2021-11-12T20:30:23.000Z
|
2021-11-12T20:30:23.000Z
|
chaingreen/rpc/rpc_client.py
|
morrillup/chaingreen-blockchain
|
0b2d008dd10228670decf360d21448a65fce48a4
|
[
"Apache-2.0"
] | 19
|
2021-09-07T08:07:05.000Z
|
2022-03-29T08:10:34.000Z
|
chaingreen/rpc/rpc_client.py
|
morrillup/chaingreen-blockchain
|
0b2d008dd10228670decf360d21448a65fce48a4
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from ssl import SSLContext
from typing import Dict, List, Optional, Any
import aiohttp
from chaingreen.server.server import NodeType, ssl_context_for_client
from chaingreen.server.ssl_context import private_ssl_ca_paths
from chaingreen.types.blockchain_format.sized_bytes import bytes32
from chaingreen.util.byte_types import hexstr_to_bytes
from chaingreen.util.ints import uint16
class RpcClient:
"""
Client to Chaingreen RPC, connects to a local service. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Chaingreen's
protocol on top of TCP), it's a separate protocol on top of HTTP that provides easy access
to the full node.
"""
url: str
session: aiohttp.ClientSession
closing_task: Optional[asyncio.Task]
ssl_context: Optional[SSLContext]
hostname: str
port: uint16
@classmethod
async def create(cls, self_hostname: str, port: uint16, root_path, net_config):
self = cls()
self.hostname = self_hostname
self.port = port
self.url = f"https://{self_hostname}:{str(port)}/"
self.session = aiohttp.ClientSession()
ca_crt_path, ca_key_path = private_ssl_ca_paths(root_path, net_config)
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
self.closing_task = None
return self
async def fetch(self, path, request_json) -> Any:
async with self.session.post(self.url + path, json=request_json, ssl_context=self.ssl_context) as response:
response.raise_for_status()
res_json = await response.json()
if not res_json["success"]:
raise ValueError(res_json)
return res_json
async def get_connections(self, node_type: Optional[NodeType] = None) -> List[Dict]:
request = {}
if node_type is not None:
request["node_type"] = node_type.value
response = await self.fetch("get_connections", request)
for connection in response["connections"]:
connection["node_id"] = hexstr_to_bytes(connection["node_id"])
return response["connections"]
async def open_connection(self, host: str, port: int) -> Dict:
return await self.fetch("open_connection", {"host": host, "port": int(port)})
async def close_connection(self, node_id: bytes32) -> Dict:
return await self.fetch("close_connection", {"node_id": node_id.hex()})
async def stop_node(self) -> Dict:
return await self.fetch("stop_node", {})
def close(self):
self.closing_task = asyncio.create_task(self.session.close())
async def await_closed(self):
if self.closing_task is not None:
await self.closing_task
| 40.078947
| 115
| 0.692055
|
56a70b06df0463b3cd3ceaf20f4f4fcb317af191
| 747
|
py
|
Python
|
bosch_thermostat_client/operation_mode/nefit_dhw.py
|
bosch-thermostat/bosch-thermostat-client-python
|
07d8cf35f38eea0a5d6063bb49d1a5252428dd9a
|
[
"Apache-2.0"
] | 9
|
2021-01-16T16:46:04.000Z
|
2022-03-09T11:43:21.000Z
|
bosch_thermostat_client/operation_mode/nefit_dhw.py
|
bosch-thermostat/bosch-thermostat-client-python
|
07d8cf35f38eea0a5d6063bb49d1a5252428dd9a
|
[
"Apache-2.0"
] | 18
|
2020-09-08T07:24:03.000Z
|
2021-12-08T21:32:50.000Z
|
bosch_thermostat_client/operation_mode/nefit_dhw.py
|
bosch-thermostat/bosch-thermostat-client-python
|
07d8cf35f38eea0a5d6063bb49d1a5252428dd9a
|
[
"Apache-2.0"
] | 10
|
2020-09-19T21:04:09.000Z
|
2022-03-09T11:43:45.000Z
|
"""
Operation mode helper for DHW.
"""
from bosch_thermostat_client.const import AUTO, MANUAL, USED, VALUE, ON, OFF
from .base import OperationModeHelper
class NefitDhwOperationModeHelper(OperationModeHelper):
@property
def available_modes(self):
"""Get Bosch operations modes."""
return ["clock", "manual"]
@property
def mode_type(self):
"""Check if operation mode type is manual or auto."""
if self._operation_mode.get(USED, True) != "false":
return super().mode_type
return MANUAL
@property
def current_mode(self):
"""Retrieve current mode of Circuit."""
if self._operation_mode.get(VALUE, OFF) == ON:
return MANUAL
return AUTO
| 27.666667
| 76
| 0.646586
|
0ae7edb4118c23b02ceefcd4dd9630f3cfbf5964
| 421
|
py
|
Python
|
sleap/nn/data/__init__.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | 156
|
2020-05-01T18:43:43.000Z
|
2022-03-25T10:31:18.000Z
|
sleap/nn/data/__init__.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | 299
|
2020-04-20T16:37:52.000Z
|
2022-03-31T23:54:48.000Z
|
sleap/nn/data/__init__.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | 41
|
2020-05-14T15:25:21.000Z
|
2022-03-25T12:44:54.000Z
|
from sleap.nn.data import augmentation
from sleap.nn.data import confidence_maps
from sleap.nn.data import instance_centroids
from sleap.nn.data import instance_cropping
from sleap.nn.data import normalization
from sleap.nn.data import pipelines
from sleap.nn.data import providers
from sleap.nn.data import resizing
from sleap.nn.data import utils
from sleap.nn.data import inference
from sleap.nn.data import pipelines
| 35.083333
| 44
| 0.84323
|
ab0c3bfe03b8c41ed68cef3aef6a33c459564933
| 588
|
py
|
Python
|
edparser/datasets/cws/sighan2005/pku.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 3
|
2020-06-16T12:58:57.000Z
|
2021-06-07T21:07:37.000Z
|
edparser/datasets/cws/sighan2005/pku.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 6
|
2020-06-22T07:46:49.000Z
|
2022-02-10T02:22:14.000Z
|
edparser/datasets/cws/sighan2005/pku.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 2
|
2020-06-27T07:32:43.000Z
|
2020-11-10T07:21:03.000Z
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-21 15:42
from edparser.datasets.cws.sighan2005 import SIGHAN2005, make
SIGHAN2005_PKU_DICT = SIGHAN2005 + "#" + "gold/pku_training_words.utf8"
SIGHAN2005_PKU_TRAIN_FULL = SIGHAN2005 + "#" + "training/pku_training.utf8"
SIGHAN2005_PKU_TRAIN = SIGHAN2005 + "#" + "training/pku_training_90.txt"
SIGHAN2005_PKU_VALID = SIGHAN2005 + "#" + "training/pku_training_10.txt"
SIGHAN2005_PKU_TEST_INPUT = SIGHAN2005 + "#" + "testing/pku_test.utf8"
SIGHAN2005_PKU_TEST = SIGHAN2005 + "#" + "gold/pku_test_gold.utf8"
make(SIGHAN2005_PKU_TRAIN)
| 42
| 75
| 0.756803
|
0f4be4a272b38001c39a4c9c16a2e2f7c36506bc
| 6,405
|
py
|
Python
|
lte/gateway/python/magma/pipelined/tests/test_check_quota.py
|
fbcode/magma_old
|
054ef8e079478bda36d2b13b8a88386c6dc94ef2
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/tests/test_check_quota.py
|
fbcode/magma_old
|
054ef8e079478bda36d2b13b8a88386c6dc94ef2
|
[
"BSD-3-Clause"
] | 6
|
2021-03-31T19:59:59.000Z
|
2022-01-22T12:56:47.000Z
|
lte/gateway/python/magma/pipelined/tests/test_check_quota.py
|
fbcode/magma_old
|
054ef8e079478bda36d2b13b8a88386c6dc94ef2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright (c) 2019-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
import warnings
from concurrent.futures import Future
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.pipelined_pb2 import SubscriberQuotaUpdate
from lte.protos.subscriberdb_pb2 import SubscriberID
from magma.pipelined.tests.app.start_pipelined import (
TestSetup,
PipelinedController,
)
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.tests.pipelined_test_util import (
SnapshotVerifier,
start_ryu_app_thread,
stop_ryu_app_thread,
create_service_manager,
wait_after_send
)
@unittest.skip("Skip test, currenlty flaky")
class UEMacAddressTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
BRIDGE_IP = '192.168.130.1'
@classmethod
@unittest.mock.patch('netifaces.ifaddresses',
return_value=[[{'addr': '00:11:22:33:44:55'}]])
@unittest.mock.patch('netifaces.AF_LINK', 0)
def setUpClass(cls, *_):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures.
"""
super(UEMacAddressTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([],
['ue_mac', 'arpd', 'check_quota'])
check_quota_controller_reference = Future()
testing_controller_reference = Future()
arp_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.UEMac,
PipelinedController.Arp,
PipelinedController.CheckQuotaController,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.CheckQuotaController:
check_quota_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.UEMac:
Future(),
PipelinedController.Arp:
arp_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'setup_type': 'CWF',
'allow_unknown_arps': False,
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP,
'internal_ip_subnet': '192.168.0.0/16',
'ovs_gtp_port_number': 32768,
'has_quota_port': 50001,
'no_quota_port': 50002,
'quota_check_ip': '1.2.3.4',
'local_ue_eth_addr': False,
'clean_restart': True,
},
mconfig=PipelineD(
ue_ip_block='192.168.128.0/24',
),
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
cls.thread = start_ryu_app_thread(test_setup)
cls.check_quota_controller = check_quota_controller_reference.result()
cls.arp_controlelr = arp_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_add_valid_quota_subscriber(self):
"""
Add flows for two subscribers
"""
imsi_1 = 'IMSI010000000088888'
imsi_2 = 'IMSI010000111111118'
imsi_3 = 'IMSI010002222222222'
mac_1 = '5e:cc:cc:b1:49:4b'
mac_2 = '5e:a:cc:af:aa:fe'
mac_3 = '5e:bb:cc:aa:aa:fe'
# Add subscriber with UE MAC address
self.check_quota_controller.update_subscriber_quota_state(
[
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_1), mac_addr=mac_1,
update_type=SubscriberQuotaUpdate.VALID_QUOTA),
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_2), mac_addr=mac_2,
update_type=SubscriberQuotaUpdate.TERMINATE),
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_3), mac_addr=mac_3,
update_type=SubscriberQuotaUpdate.TERMINATE),
]
)
snapshot_verifier = SnapshotVerifier(self, self.BRIDGE,
self.service_manager,
include_stats=False)
with snapshot_verifier:
wait_after_send(self.testing_controller)
def test_add_three_subscribers(self):
"""
Add flows for two subscribers
"""
imsi_1 = 'IMSI010000000088888'
imsi_2 = 'IMSI010000111111118'
imsi_3 = 'IMSI010002222222222'
mac_1 = '5e:cc:cc:b1:49:4b'
mac_2 = '5e:a:cc:af:aa:fe'
mac_3 = '5e:bb:cc:aa:aa:fe'
# Add subscriber with UE MAC address
self.check_quota_controller.update_subscriber_quota_state(
[
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_1), mac_addr=mac_1,
update_type=SubscriberQuotaUpdate.NO_QUOTA),
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_2), mac_addr=mac_2,
update_type=SubscriberQuotaUpdate.NO_QUOTA),
SubscriberQuotaUpdate(
sid=SubscriberID(id=imsi_3), mac_addr=mac_3,
update_type=SubscriberQuotaUpdate.VALID_QUOTA),
]
)
snapshot_verifier = SnapshotVerifier(self, self.BRIDGE,
self.service_manager,
include_stats=False)
with snapshot_verifier:
wait_after_send(self.testing_controller)
if __name__ == "__main__":
unittest.main()
| 36.6
| 78
| 0.602654
|
83d4adfe24794f4693429dde191826f6ef5eb0a0
| 716
|
py
|
Python
|
Algorithm/Easy/1000+/1200RelativeRanks.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | null | null | null |
Algorithm/Easy/1000+/1200RelativeRanks.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | 1
|
2020-08-08T10:14:53.000Z
|
2020-08-08T10:18:37.000Z
|
Algorithm/Easy/1000+/1200RelativeRanks.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | null | null | null |
class Solution:
"""
@param nums: List[int]
@return: return List[str]
"""
def findRelativeRanks(self, nums):
# write your code here
if len(nums)==1:
return ['Gold Medal']
if len(nums)==2:
if nums[0]>nums[1]:
return ['Gold Medal','Silver Medal']
else:
return ['Silver Medal','Gold Medal']
list1=sorted(nums)
nums[nums.index(list1[-1])]='Gold Medal'
nums[nums.index(list1[-2])]='Silver Medal'
nums[nums.index(list1[-3])]='Bronze Medal'
j=4
for i in range(len(nums)-4,-1,-1):
nums[nums.index(list1[i])]=str(j)
j+=1
return nums
| 31.130435
| 52
| 0.498603
|
01a463bb859fd788260eb517eb14fa7471a2f5a8
| 294
|
py
|
Python
|
venv/lib/python3.9/site-packages/asyncpraw/const.py
|
proxamon/proxabot
|
2f4858ca5a77667ddd167bf2256eecf882369a27
|
[
"MIT"
] | null | null | null |
venv/lib/python3.9/site-packages/asyncpraw/const.py
|
proxamon/proxabot
|
2f4858ca5a77667ddd167bf2256eecf882369a27
|
[
"MIT"
] | 12
|
2021-04-11T19:46:06.000Z
|
2021-06-18T16:08:37.000Z
|
venv/lib/python3.9/site-packages/asyncpraw/const.py
|
EDiasAlberto/proxabot
|
2f4858ca5a77667ddd167bf2256eecf882369a27
|
[
"MIT"
] | null | null | null |
"""Async PRAW constants."""
from .endpoints import API_PATH # noqa: F401
__version__ = "7.2.0"
USER_AGENT_FORMAT = f"{{}} Async PRAW/{__version__}"
MAX_IMAGE_SIZE = 512000
MIN_JPEG_SIZE = 128
MIN_PNG_SIZE = 67
JPEG_HEADER = b"\xff\xd8\xff"
PNG_HEADER = b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
| 21
| 52
| 0.717687
|
01a04eb0de5eb21618287e76573da12dccd1460a
| 2,027
|
py
|
Python
|
matplotlib_exercise/select-rectangle.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
matplotlib_exercise/select-rectangle.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
matplotlib_exercise/select-rectangle.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import matplotlib.widgets as widgets
import numpy as np
import matplotlib.pyplot as plt
class MyRectangleSelector(widgets.RectangleSelector):
def release(self, event):
super(MyRectangleSelector, self).release(event)
self.to_draw.set_visible(True)
self.canvas.draw()
def line_select_callback(eclick, erelease):
'eclick and erelease are the press and release events'
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
print(" The button you used were: %s %s" %
(eclick.button, erelease.button))
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
fig, current_ax = plt.subplots() # make a new plotingrange
N = 100000 # If N is large one can see
x = np.linspace(0.0, 10.0, N) # improvement by use blitting!
plt.plot(x, +np.sin(.2 * np.pi * x), lw=3.5, c='b', alpha=.7) # plot something
plt.plot(x, +np.cos(.2 * np.pi * x), lw=3.5, c='r', alpha=.5)
plt.plot(x, -np.sin(.2 * np.pi * x), lw=3.5, c='g', alpha=.3)
print("\n click --> release")
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = MyRectangleSelector(current_ax, line_select_callback,
drawtype='box', useblit=True,
# don't use middle button
button=[1, 3],
minspanx=5, minspany=5,
spancoords='pixels')
plt.connect('key_press_event', toggle_selector)
plt.show()
| 38.245283
| 79
| 0.579181
|
74773933ec98f561a6440108ecfc8852ac5d7ff6
| 3,598
|
py
|
Python
|
libreoffice/sw_layout2svg.py
|
vmiklos/vmexam
|
ff4ef386d3cfd84b8ed06387cbd87b119dd50448
|
[
"MIT"
] | 1
|
2015-02-09T10:21:51.000Z
|
2015-02-09T10:21:51.000Z
|
libreoffice/sw_layout2svg.py
|
vmiklos/vmexam
|
ff4ef386d3cfd84b8ed06387cbd87b119dd50448
|
[
"MIT"
] | null | null | null |
libreoffice/sw_layout2svg.py
|
vmiklos/vmexam
|
ff4ef386d3cfd84b8ed06387cbd87b119dd50448
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Quick&dirty script to turn an 'SW_DEBUG=1 ./soffice' layout dump (produced by pressing F12) into
# an SVG file, which visualizes the relations between the layout frames.
from xml.dom import minidom
import sys
FONT_SIZE = 12
RECTANGLE_STYLE = "stroke: black; fill: none;"
def get_by_name(node, child_name):
return [i for i in node.childNodes if i.localName == child_name][0]
def twip_to_pt(fro):
return fro / 20
def print_text(x, y, text):
print(' <text x="{}pt" y="{}pt" font-size="{}pt" dominant-baseline="hanging">{}</text>'.format(x, y, FONT_SIZE, text))
def print_rect(identifier, left, top, width, height):
print(' <rect id="{}" x="{}pt" y="{}pt" width="{}pt" height="{}pt" style="{}"/>'.format(identifier, left, top, width, height, RECTANGLE_STYLE))
def is_frame_name(frame):
return frame in ("page", "header", "footer", "body", "txt", "fly", "notxt")
def handle_frame(frame):
identifier = ""
symbol = ""
for k, v in list(frame.attributes.items()):
if k == "id":
identifier = v
elif k == "symbol":
symbol = v
infos = get_by_name(frame, "infos")
infos_bounds = get_by_name(infos, "bounds")
left = 0
top = 0
width = 0
height = 0
for k, v in list(infos_bounds.attributes.items()):
if k == "left":
left = twip_to_pt(float(v))
elif k == "top":
top = twip_to_pt(float(v))
elif k == "width":
width = twip_to_pt(float(v))
elif k == "height":
height = twip_to_pt(float(v))
print_rect(identifier, left, top, width, height)
print_text(left, top, "{} {}".format(symbol, identifier))
for child in [i for i in frame.childNodes if is_frame_name(i.localName)]:
handle_frame(child)
anchoreds = [i for i in frame.childNodes if i.localName == "anchored"]
if anchoreds:
anchored = anchoreds[0]
for child in [i for i in anchored.childNodes if is_frame_name(i.localName)]:
handle_frame(child)
def main():
print('<?xml version="1.0"?>')
layout = minidom.parse(sys.argv[1])
root = get_by_name(layout, "root")
identifier = ""
symbol = ""
for k, v in list(root.attributes.items()):
if k == "id":
identifier = v
elif k == "symbol":
symbol = v
root_infos = get_by_name(root, "infos")
root_infos_bounds = get_by_name(root_infos, "bounds")
left = 0
top = 0
width = 0
height = 0
for k, v in list(root_infos_bounds.attributes.items()):
if k == "left":
left = twip_to_pt(float(v))
elif k == "top":
top = twip_to_pt(float(v))
elif k == "width":
width = twip_to_pt(float(v))
elif k == "height":
height = twip_to_pt(float(v))
# Root frame is the bounding box of all pages, canvas size is the same + the margins.
print('<svg width="{}pt" height="{}pt" xmlns="http://www.w3.org/2000/svg">'.format(width + 2 * left, height + 2 * top))
print_rect(identifier, left, top, width, height)
print_text(left, top, "{} {}".format(symbol, identifier))
for page in [i for i in root.childNodes if is_frame_name(i.localName)]:
handle_frame(page)
print('</svg>')
if __name__ == '__main__':
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| 30.752137
| 148
| 0.603669
|
91eabeb0e1ada57c20ecfe9382fc516366a302b2
| 15,179
|
py
|
Python
|
lib/python2.7/site-packages/sklearn/externals/joblib/format_stack.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
lib/python2.7/site-packages/sklearn/externals/joblib/format_stack.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
lib/python2.7/site-packages/sklearn/externals/joblib/format_stack.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
"""
Represent an exception with a lot of information.
Provides 2 useful functions:
format_exc: format an exception into a complete traceback, with full
debugging instruction.
format_outer_frames: format the current position in the stack call.
Adapted from IPython's VerboseTB.
"""
# Authors: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Nathaniel Gray <n8gray@caltech.edu>
# Fernando Perez <fperez@colorado.edu>
# Copyright: 2010, Gael Varoquaux
# 2001-2004, Fernando Perez
# 2001 Nathaniel Gray
# License: BSD 3 clause
import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
try: # Python 2
generate_tokens = tokenize.generate_tokens
except AttributeError: # Python 3
generate_tokens = tokenize.tokenize
INDENT = ' ' * 8
###############################################################################
# some internal-use functions
def safe_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return safe_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % safe_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eq_repr(value, repr=safe_repr):
return '=%s' % repr(value)
###############################################################################
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
A naive solution to this problem which just makes a dictionary with the
elements as keys fails to respect the stability condition, since
dictionaries are unsorted by nature.
Note: All elements in the input must be hashable.
"""
unique = []
unique_set = set()
for nn in elems:
if nn not in unique_set:
unique.append(nn)
unique_set.add(nn)
return unique
###############################################################################
def fix_frame_records_filenames(records):
"""Try to fix the filenames in each record from inspect.getinnerframes().
Particularly, modules loaded from within zip files have useless filenames
attached to their code object, and inspect.getinnerframes() just uses it.
"""
fixed_records = []
for frame, filename, line_no, func_name, lines, index in records:
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
fixed_records.append((frame, filename, line_no, func_name, lines,
index))
return fixed_records
def _fixed_getframes(etb, context=1, tb_offset=0):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in enumerate(aux):
maybeStart = lnum - 1 - context // 2
start = max(maybeStart, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
# pad with empty lines if necessary
if maybeStart < 0:
lines = (['\n'] * -maybeStart) + lines
if len(lines) < context:
lines += ['\n'] * (context - len(lines))
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
def _format_traceback_lines(lnum, index, lines, lvals=None):
numbers_width = 7
res = []
i = lnum - index
for line in lines:
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
if pad >= 3:
marker = '-' * (pad - 3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = marker + str(i)
else:
num = '%*s' % (numbers_width, i)
line = '%s %s' % (num, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + '\n')
i = i + 1
return res
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
if file.endswith('.pyc'):
file = file[:-4] + '.py'
link = file
args, varargs, varkw, locals = inspect.getargvalues(frame)
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError, SyntaxError):
# signals exit of tokenizer
# SyntaxError can happen when trying to tokenize
# a compiled (e.g. .so or .pyd) extension
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
###############################################################################
def format_exc(etype, evalue, etb, context=5, tb_offset=0):
""" Return a nice text document describing the traceback.
Parameters
-----------
etype, evalue, etb: as returned by sys.exc_info
context: number of lines of the source file to plot
tb_offset: the number of stack frame not to use (0 = use all)
"""
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
pid = 'PID: %i' % os.getpid()
head = '%s%s%s\n%s%s%s' % (
etype, ' ' * (75 - len(str(etype)) - len(date)),
date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),
pyver)
# Drop topmost frames if requested
try:
records = _fixed_getframes(etb, context, tb_offset)
except:
raise
print('\nUnfortunately, your original traceback can not be '
'constructed.\n')
return ''
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
exception = ['%s: %s' % (etype_str, evalue_str)]
frames = format_records(records)
return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0]))
###############################################################################
def format_outer_frames(context=5, stack_start=None, stack_end=None,
ignore_ipython=True):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = inspect.getouterframes(inspect.currentframe())
output = list()
for i, (frame, filename, line_no, func_name, lines, index) \
in enumerate(records):
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
if filename.endswith('.pyc'):
filename = filename[:-4] + '.py'
if ignore_ipython:
# Hack to avoid printing the internals of IPython
if (os.path.basename(filename) == 'iplib.py'
and func_name in ('safe_execfile', 'runcode')):
break
maybeStart = line_no - 1 - context // 2
start = max(maybeStart, 0)
end = start + context
lines = linecache.getlines(filename)[start:end]
# pad with empty lines if necessary
if maybeStart < 0:
lines = (['\n'] * -maybeStart) + lines
if len(lines) < context:
lines += ['\n'] * (context - len(lines))
buf = list(records[i])
buf[LNUM_POS] = line_no
buf[INDEX_POS] = line_no - 1 - start
buf[LINES_POS] = lines
output.append(tuple(buf))
return '\n'.join(format_records(output[stack_end:stack_start:-1]))
| 36.313397
| 81
| 0.542855
|
2488c3efff097d4a8d9680db7b127792179b2a00
| 494
|
py
|
Python
|
src/main/resources/rqmjazz/control/retrieve_test_results.py
|
xebialabs-community/xlr-rqmjazz-plugin
|
d0053be8558ca36937f64487fd2b8d6ca1e45e68
|
[
"MIT"
] | 1
|
2019-07-24T03:25:28.000Z
|
2019-07-24T03:25:28.000Z
|
src/main/resources/rqmjazz/control/retrieve_test_results.py
|
xebialabs-community/xlr-rqmjazz-plugin
|
d0053be8558ca36937f64487fd2b8d6ca1e45e68
|
[
"MIT"
] | null | null | null |
src/main/resources/rqmjazz/control/retrieve_test_results.py
|
xebialabs-community/xlr-rqmjazz-plugin
|
d0053be8558ca36937f64487fd2b8d6ca1e45e68
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 XebiaLabs
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from rqmjazz.core.RQMClient import RQMClient
def process(task_vars):
server = task_vars['server']
client = RQMClient.create_client(server, username = task_vars["username"], password = task_vars["password"])
return client.get_test_results(task_vars['result_url'])
if __name__ == '__main__' or __name__ == '__builtin__':
job_result = process(locals())
| 29.058824
| 112
| 0.736842
|
a6042c2ed4a6ebe41f11ab3554a8e66a1b541af4
| 196
|
py
|
Python
|
Flask_IHome/map_demo_py3.py
|
haitaoss/flask_project
|
84475d035d818382b824d535b55c29dbf61a6162
|
[
"Apache-2.0"
] | null | null | null |
Flask_IHome/map_demo_py3.py
|
haitaoss/flask_project
|
84475d035d818382b824d535b55c29dbf61a6162
|
[
"Apache-2.0"
] | null | null | null |
Flask_IHome/map_demo_py3.py
|
haitaoss/flask_project
|
84475d035d818382b824d535b55c29dbf61a6162
|
[
"Apache-2.0"
] | null | null | null |
li1 = [1, 2, 3, 4]
li2 = [2, 3]
def add(num1, num2):
return num1 + num2
# map函数,参数:函数,调用函数所需的参数
ret = map(add, li1, li2)
print(ret) # python2 返回的是list,python3返回的是map类型
print(list(ret))
| 13.066667
| 47
| 0.632653
|
a7f08c710cadce146ffb15d2f45019b005649163
| 6,940
|
py
|
Python
|
sdk/lusid/models/index_model_options_all_of.py
|
slemasne/lusid-sdk-python-preview
|
94a97951ec2052bc1672b7be21e52ad2fcf6eea0
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/index_model_options_all_of.py
|
slemasne/lusid-sdk-python-preview
|
94a97951ec2052bc1672b7be21e52ad2fcf6eea0
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/index_model_options_all_of.py
|
slemasne/lusid-sdk-python-preview
|
94a97951ec2052bc1672b7be21e52ad2fcf6eea0
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3725
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class IndexModelOptionsAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'portfolio_scaling': 'str',
'model_options_type': 'str'
}
attribute_map = {
'portfolio_scaling': 'portfolioScaling',
'model_options_type': 'modelOptionsType'
}
required_map = {
'portfolio_scaling': 'required',
'model_options_type': 'required'
}
def __init__(self, portfolio_scaling=None, model_options_type=None, local_vars_configuration=None): # noqa: E501
"""IndexModelOptionsAllOf - a model defined in OpenAPI"
:param portfolio_scaling: The available values are: Sum, AbsoluteSum, Unity (required)
:type portfolio_scaling: str
:param model_options_type: The available values are: Invalid, OpaqueModelOptions, EmptyModelOptions, IndexModelOptions (required)
:type model_options_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._portfolio_scaling = None
self._model_options_type = None
self.discriminator = None
self.portfolio_scaling = portfolio_scaling
self.model_options_type = model_options_type
@property
def portfolio_scaling(self):
"""Gets the portfolio_scaling of this IndexModelOptionsAllOf. # noqa: E501
The available values are: Sum, AbsoluteSum, Unity # noqa: E501
:return: The portfolio_scaling of this IndexModelOptionsAllOf. # noqa: E501
:rtype: str
"""
return self._portfolio_scaling
@portfolio_scaling.setter
def portfolio_scaling(self, portfolio_scaling):
"""Sets the portfolio_scaling of this IndexModelOptionsAllOf.
The available values are: Sum, AbsoluteSum, Unity # noqa: E501
:param portfolio_scaling: The portfolio_scaling of this IndexModelOptionsAllOf. # noqa: E501
:type portfolio_scaling: str
"""
if self.local_vars_configuration.client_side_validation and portfolio_scaling is None: # noqa: E501
raise ValueError("Invalid value for `portfolio_scaling`, must not be `None`") # noqa: E501
allowed_values = ["Sum", "AbsoluteSum", "Unity"] # noqa: E501
if self.local_vars_configuration.client_side_validation and portfolio_scaling not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `portfolio_scaling` ({0}), must be one of {1}" # noqa: E501
.format(portfolio_scaling, allowed_values)
)
self._portfolio_scaling = portfolio_scaling
@property
def model_options_type(self):
"""Gets the model_options_type of this IndexModelOptionsAllOf. # noqa: E501
The available values are: Invalid, OpaqueModelOptions, EmptyModelOptions, IndexModelOptions # noqa: E501
:return: The model_options_type of this IndexModelOptionsAllOf. # noqa: E501
:rtype: str
"""
return self._model_options_type
@model_options_type.setter
def model_options_type(self, model_options_type):
"""Sets the model_options_type of this IndexModelOptionsAllOf.
The available values are: Invalid, OpaqueModelOptions, EmptyModelOptions, IndexModelOptions # noqa: E501
:param model_options_type: The model_options_type of this IndexModelOptionsAllOf. # noqa: E501
:type model_options_type: str
"""
if self.local_vars_configuration.client_side_validation and model_options_type is None: # noqa: E501
raise ValueError("Invalid value for `model_options_type`, must not be `None`") # noqa: E501
allowed_values = ["Invalid", "OpaqueModelOptions", "EmptyModelOptions", "IndexModelOptions"] # noqa: E501
if self.local_vars_configuration.client_side_validation and model_options_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `model_options_type` ({0}), must be one of {1}" # noqa: E501
.format(model_options_type, allowed_values)
)
self._model_options_type = model_options_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IndexModelOptionsAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IndexModelOptionsAllOf):
return True
return self.to_dict() != other.to_dict()
| 36.335079
| 138
| 0.636599
|
d137400fc2dc582765d3dea5abfc2723d6105531
| 880
|
py
|
Python
|
vivisect/impapi/windows/ws2plus_64.py
|
4k4xs4pH1r3/vivisect
|
dea425eed796176309a3be46936eb682598271aa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
vivisect/impapi/windows/ws2plus_64.py
|
4k4xs4pH1r3/vivisect
|
dea425eed796176309a3be46936eb682598271aa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
vivisect/impapi/windows/ws2plus_64.py
|
4k4xs4pH1r3/vivisect
|
dea425eed796176309a3be46936eb682598271aa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# APIs for Windows 64-bit libraries ws2_32, mswsock, wsock32, and wininet.
# Built as a delta from the 32-bit version.
# Format: retval, rettype, callconv, exactname, arglist(type, name)
# arglist type is one of ['int', 'void *']
# arglist name is one of [None, 'funcptr', 'obj', 'ptr']
# List the normalized name of any 32-bit functions to omit.
api_32_omits = []
# Define any functions specific to 64-bit.
api_64_adds = {
}
# Build from the 32-bit API, skipping omits, changing the calling convention,
# and adding any specific 64-bit functions.
api_defs_64 = {}
import vivisect.impapi.windows.ws2plus_32 as m32
for name in m32.api_defs.iterkeys():
if name in api_32_omits:
continue
(rtype,rname,cconv,cname,cargs) = m32.api_defs[name]
api_defs_64[name] = (rtype, rname, 'msx64call', cname, cargs)
api_defs_64.update(api_64_adds)
| 33.846154
| 77
| 0.703409
|
7fcff5d1c6a3ec83abb35209620bc08f133c0429
| 2,213
|
py
|
Python
|
blazar_dashboard/content/hosts/forms.py
|
stackhpc/blazar-dashboard
|
c33709700f0c2ccfe89341f22cb608a785216699
|
[
"Apache-2.0"
] | 7
|
2017-10-31T10:09:19.000Z
|
2019-01-28T21:52:45.000Z
|
blazar_dashboard/content/hosts/forms.py
|
stackhpc/blazar-dashboard
|
c33709700f0c2ccfe89341f22cb608a785216699
|
[
"Apache-2.0"
] | 1
|
2017-10-24T20:48:23.000Z
|
2017-10-24T21:09:32.000Z
|
blazar_dashboard/content/hosts/forms.py
|
stackhpc/blazar-dashboard
|
c33709700f0c2ccfe89341f22cb608a785216699
|
[
"Apache-2.0"
] | 5
|
2017-08-30T16:11:21.000Z
|
2021-05-31T14:32:38.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from blazar_dashboard import api
LOG = logging.getLogger(__name__)
class UpdateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Update Host Parameters')
host_id = forms.CharField(
label=_('Host ID'), widget=forms.widgets.HiddenInput, required=True)
values = forms.CharField(
label=_("Values to Update"),
required=True,
help_text=_('Enter values to update in JSON'),
widget=forms.Textarea(
attrs={'rows': 5}),
max_length=511)
def handle(self, request, data):
try:
api.client.host_update(self.request, host_id=data.get('host_id'),
values=data.get('values'))
messages.success(request, _("Host was successfully updated."))
return True
except Exception as e:
LOG.error('Error updating host: %s', e)
exceptions.handle(request,
message="An error occurred while updating this"
" host. Please try again.")
def clean(self):
cleaned_data = super(UpdateForm, self).clean()
values = cleaned_data.get('values')
try:
values = json.loads(values)
cleaned_data['values'] = values
except json.JSONDecodeError:
raise forms.ValidationError(
_('Values must be written in JSON')
)
return cleaned_data
| 33.530303
| 78
| 0.630366
|
69ad62843f399d2a031c7f2d474e1e99fafca09a
| 1,282
|
py
|
Python
|
yowsup/layers/protocol_media/protocolentities/test_message_media_downloadable_video.py
|
zulu494/Anoa-Bot-
|
6666b0257d0c5bd2ce57c473078c4059ecd0fecd
|
[
"MIT"
] | 1
|
2021-09-11T13:38:47.000Z
|
2021-09-11T13:38:47.000Z
|
yowsup/layers/protocol_media/protocolentities/test_message_media_downloadable_video.py
|
enigma-chan/Anti-Hoax-Bot
|
6666b0257d0c5bd2ce57c473078c4059ecd0fecd
|
[
"MIT"
] | null | null | null |
yowsup/layers/protocol_media/protocolentities/test_message_media_downloadable_video.py
|
enigma-chan/Anti-Hoax-Bot
|
6666b0257d0c5bd2ce57c473078c4059ecd0fecd
|
[
"MIT"
] | null | null | null |
from yowsup.layers.protocol_media.protocolentities.message_media_downloadable_video \
import VideoDownloadableMediaMessageProtocolEntity
from yowsup.layers.protocol_messages.proto.e2e_pb2 import Message
from .test_message_media import MediaMessageProtocolEntityTest
class VideoDownloadableMediaMessageProtocolEntityTest(MediaMessageProtocolEntityTest):
def setUp(self):
super(VideoDownloadableMediaMessageProtocolEntityTest, self).setUp()
self.ProtocolEntity = VideoDownloadableMediaMessageProtocolEntity
proto_node = self.node.getChild("proto")
m = Message()
media_message = Message.VideoMessage()
media_message.url = "url"
media_message.mimetype = "video/mp4"
media_message.caption = "caption"
media_message.file_sha256 = b"shaval"
media_message.file_length = 4
media_message.width = 1
media_message.height = 2
media_message.seconds = 3
media_message.media_key = b"MEDIA_KEY"
media_message.jpeg_thumbnail = b"THUMBNAIL"
media_message.gif_attribution = 0
media_message.gif_playback = False
media_message.streaming_sidecar = b''
m.video_message.MergeFrom(media_message)
proto_node.setData(m.SerializeToString())
| 44.206897
| 86
| 0.74181
|
e029f77b89eb3ff6ee7f7a6ad38c7d54ad772544
| 407
|
py
|
Python
|
Pensive/asgi.py
|
ajra7/Pensive
|
cffd14267aec21e70c99e16b55c961107605f5db
|
[
"Apache-2.0"
] | null | null | null |
Pensive/asgi.py
|
ajra7/Pensive
|
cffd14267aec21e70c99e16b55c961107605f5db
|
[
"Apache-2.0"
] | null | null | null |
Pensive/asgi.py
|
ajra7/Pensive
|
cffd14267aec21e70c99e16b55c961107605f5db
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for Pensive project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Pensive.settings')
application = get_asgi_application()
| 23.941176
| 79
| 0.7543
|
d661750cffb15a4b4a9159c3979cd55d1614d9f9
| 881
|
py
|
Python
|
var/spack/repos/builtin/packages/py-alabaster/package.py
|
msimberg/spack
|
27a339eeb28007bf0844e4c331bdd7d9da13da2e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-alabaster/package.py
|
msimberg/spack
|
27a339eeb28007bf0844e4c331bdd7d9da13da2e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-alabaster/package.py
|
msimberg/spack
|
27a339eeb28007bf0844e4c331bdd7d9da13da2e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAlabaster(PythonPackage):
"""Alabaster is a visually (c)lean, responsive, configurable theme
for the Sphinx documentation system."""
homepage = "https://alabaster.readthedocs.io/"
url = "https://pypi.io/packages/source/a/alabaster/alabaster-0.7.10.tar.gz"
import_modules = ['alabaster']
version('0.7.12', sha256='a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02')
version('0.7.10', sha256='37cdcb9e9954ed60912ebc1ca12a9d12178c26637abdf124e3cde2341c257fe0')
version('0.7.9', sha256='47afd43b08a4ecaa45e3496e139a193ce364571e7e10c6a87ca1a4c57eb7ea08')
depends_on('py-setuptools', type='build')
| 38.304348
| 96
| 0.758229
|
80932cb7f60cbb85cdf021c3253544964c757685
| 62,576
|
py
|
Python
|
calibre-plugin/dialogs.py
|
stranger-danger-zamu/FanFicFare
|
9ea9cf4c681ee76f792e53f0ed3829dadd0bb827
|
[
"Apache-2.0"
] | 2
|
2015-04-01T19:00:25.000Z
|
2015-04-01T20:09:51.000Z
|
calibre-plugin/dialogs.py
|
stranger-danger-zamu/FanFicFare
|
9ea9cf4c681ee76f792e53f0ed3829dadd0bb827
|
[
"Apache-2.0"
] | null | null | null |
calibre-plugin/dialogs.py
|
stranger-danger-zamu/FanFicFare
|
9ea9cf4c681ee76f792e53f0ed3829dadd0bb827
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, division,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2020, Jim Miller'
__docformat__ = 'restructuredtext en'
import re
from functools import partial
import logging
logger = logging.getLogger(__name__)
from datetime import datetime
from PyQt5 import QtWidgets as QtGui
from PyQt5 import QtCore
from PyQt5.Qt import (QApplication, QDialog, QWidget, QTableWidget, QVBoxLayout, QHBoxLayout,
QGridLayout, QPushButton, QFont, QLabel, QCheckBox, QIcon,
QLineEdit, QComboBox, QProgressDialog, QTimer, QDialogButtonBox,
QScrollArea, QPixmap, Qt, QAbstractItemView, QTextEdit,
pyqtSignal, QGroupBox, QFrame)
try:
# qt6 Calibre v6+
QTextEditNoWrap = QTextEdit.LineWrapMode.NoWrap
except:
# qt5 Calibre v2-5
QTextEditNoWrap = QTextEdit.NoWrap
from calibre.gui2 import gprefs
show_download_options = 'fff:add new/update dialogs:show_download_options'
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.complete2 import EditWithComplete
from fanficfare.six import text_type as unicode, ensure_text
# pulls in translation files for _() strings
try:
load_translations()
except NameError:
pass # load_translations() added in calibre 1.9
from calibre_plugins.fanficfare_plugin.common_utils import (
ReadOnlyTableWidgetItem, ReadOnlyTextIconWidgetItem,
SizePersistedDialog, EditableTableWidgetItem,
ImageTitleLayout, get_icon)
from fanficfare.geturls import get_urls_from_mime
from fanficfare.adapters import getNormalStoryURL
from fanficfare.configurable import (
get_valid_sections, get_valid_entries,
get_valid_keywords, get_valid_entry_keywords)
from .inihighlighter import IniHighlighter
## moved to prefs.py so they can be included in jobs.py.
from calibre_plugins.fanficfare_plugin.prefs import (
SKIP,
ADDNEW,
UPDATE,
UPDATEALWAYS,
OVERWRITE,
OVERWRITEALWAYS,
CALIBREONLY,
CALIBREONLYSAVECOL,
collision_order,
save_collisions,
anthology_collision_order,
)
gpstyle='QGroupBox {border:0; padding-top:10px; padding-bottom:0px; margin-bottom:0px;}' # background-color:red;
class RejectUrlEntry:
matchpat=re.compile(r"^(?P<url>[^,]+?)(,(?P<fullnote>(((?P<title>.+?) by (?P<auth>.+?)( - (?P<note>.+))?)|.*)))?$")
def __init__(self,url_or_line,note=None,title=None,auth=None,
addreasontext=None,fromline=False,book_id=None,
normalize=True):
self.url=url_or_line
self.note=note
self.title=title
self.auth=auth
self.valid=False
self.book_id=book_id
if fromline:
mc = re.match(self.matchpat,url_or_line)
if mc:
#print("mc:%s"%mc.groupdict())
(url,title,auth,note) = mc.group('url','title','auth','note')
if not mc.group('title'):
title=''
auth=''
note=mc.group('fullnote')
self.url=url
self.note=note
self.title=title
self.auth=auth
if not self.note:
if addreasontext:
self.note = addreasontext
else:
self.note = ''
else:
if addreasontext:
self.note = self.note + ' - ' + addreasontext
if normalize and self.url:
self.url = getNormalStoryURL(self.url)
self.valid = self.url != None
def to_line(self):
# always 'url,'
return "%s,%s"%(self.url,self.fullnote())
@classmethod
def from_data(cls,data):
rue = cls('')
rue.url=data['url']
rue.title=data['title']
rue.auth=data['auth']
rue.note=data['note']
rue.valid=True
# rue.book_id=book_id
return rue
def to_data(self):
return { 'url': self.url,
'title': self.title,
'auth': self.auth,
'note': self.note,
}
def fullnote(self):
retval = ""
if self.title and self.auth:
# don't translate--ends up being saved and confuses regex above.
retval = retval + "%s by %s"%(self.title,self.auth)
if self.note:
retval = retval + " - "
if self.note:
retval = retval + self.note
return retval
class NotGoingToDownload(Exception):
def __init__(self,error,icon='dialog_error.png',showerror=True):
self.error=error
self.icon=icon
self.showerror=showerror
def __str__(self):
return self.error
class DroppableQTextEdit(QTextEdit):
def __init__(self,parent):
QTextEdit.__init__(self,parent)
self.setTabChangesFocus(True)
def dropEvent(self,event):
# logger.debug("dropEvent")
urllist = get_urls_from_mime(event.mimeData())
if urllist:
self.append("\n".join(urllist))
return None
return QTextEdit.dropEvent(self,event)
def insertFromMimeData(self, mime_data):
# logger.debug("insertFromMimeData")
# logger.debug(mime_data)
urllist = None
if mime_data.hasFormat('text/html'):
urllist = get_urls_from_mime(mime_data)
# logger.debug(urllist)
if urllist:
[ self.append(url) for url in urllist ]
else:
return QTextEdit.insertFromMimeData(self, mime_data)
class AddNewDialog(SizePersistedDialog):
go_signal = pyqtSignal(object, object, object, object)
def __init__(self, gui, prefs, icon):
SizePersistedDialog.__init__(self, gui, 'fff:add new dialog')
self.prefs = prefs
self.setMinimumWidth(300)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.setWindowTitle('FanFicFare')
self.setWindowIcon(icon)
self.merge = self.newmerge = False
self.extraoptions = {}
# elements to hide when doing merge.
self.mergehide = []
self.mergeshow = []
# elements to show again when doing *update* merge
self.mergeupdateshow = []
self.toplabel=QLabel("Toplabel")
self.l.addWidget(self.toplabel)
## XXX add labels for series name and desc? Desc in tooltip?
row = 0
grid = QGridLayout()
label = QLabel('<b>'+_('Series')+':</b>')
grid.addWidget(label,row,0)
self.mergedname=QLabel("mergedname")
tt = _('This name will be used with the %s setting to set the title of the new book.')%'<i>anthology_title_pattern</i>'
label.setToolTip(tt)
self.mergeshow.append(label)
self.mergedname.setToolTip(tt)
grid.addWidget(self.mergedname,row,1,1,-1)
self.l.addLayout(grid)
self.mergeshow.append(self.mergedname)
row+=1
label = QLabel('<b>'+_('Comments')+':</b>')
grid.addWidget(label,row,0)
self.mergeddesc=QLabel("mergeddesc")
tt = _('These comments about the series will be included in the Comments of the new book.')+'<i></i>' # for html for auto-wrap
label.setToolTip(tt)
self.mergeshow.append(label)
self.mergeddesc.setToolTip(tt)
self.mergeddesc.setWordWrap(True)
grid.addWidget(self.mergeddesc,row,1,1,-1)
self.l.addLayout(grid)
self.mergeshow.append(self.mergeddesc)
grid.setColumnStretch(1,1)
self.url = DroppableQTextEdit(self)
self.url.setToolTip("UrlTooltip")
self.url.setLineWrapMode(QTextEditNoWrap)
self.l.addWidget(self.url)
self.groupbox = QGroupBox(_("Show Download Options"))
self.groupbox.setCheckable(True)
self.groupbox.setFlat(True)
#print("style:%s"%self.groupbox.styleSheet())
self.groupbox.setStyleSheet(gpstyle)
self.gbf = QFrame()
self.gbl = QVBoxLayout()
self.gbl.addWidget(self.gbf)
self.groupbox.setLayout(self.gbl)
self.gbl = QVBoxLayout()
self.gbf.setLayout(self.gbl)
self.l.addWidget(self.groupbox)
self.groupbox.setChecked(gprefs.get(show_download_options,False))
self.gbf.setVisible(gprefs.get(show_download_options,False))
self.groupbox.toggled.connect(self.click_show_download_options)
horz = QHBoxLayout()
label = QLabel(_('Output &Format:'))
self.mergehide.append(label)
self.fileform = QComboBox(self)
self.fileform.addItem('epub')
self.fileform.addItem('mobi')
self.fileform.addItem('html')
self.fileform.addItem('txt')
self.fileform.setToolTip(_('Choose output format to create. May set default from plugin configuration.'))
self.fileform.activated.connect(self.set_collisions)
horz.addWidget(label)
label.setBuddy(self.fileform)
horz.addWidget(self.fileform)
self.gbl.addLayout(horz)
self.mergehide.append(self.fileform)
horz = QHBoxLayout()
self.collisionlabel = QLabel("CollisionLabel")
horz.addWidget(self.collisionlabel)
self.collision = QComboBox(self)
self.collision.setToolTip("CollisionToolTip")
# add collision options
self.set_collisions()
i = self.collision.findText(save_collisions[self.prefs['collision']])
if i > -1:
self.collision.setCurrentIndex(i)
self.collisionlabel.setBuddy(self.collision)
horz.addWidget(self.collision)
self.gbl.addLayout(horz)
self.mergehide.append(self.collisionlabel)
self.mergehide.append(self.collision)
self.mergeupdateshow.append(self.collisionlabel)
self.mergeupdateshow.append(self.collision)
horz = QHBoxLayout()
self.updatemeta = QCheckBox(_('Update Calibre &Metadata?'),self)
self.updatemeta.setToolTip(_("Update metadata for existing stories in Calibre from web site?\n(Columns set to 'New Only' in the column tabs will only be set for new books.)"))
self.updatemeta.setChecked(self.prefs['updatemeta'])
horz.addWidget(self.updatemeta)
self.mergehide.append(self.updatemeta)
self.mergeupdateshow.append(self.updatemeta)
self.updateepubcover = QCheckBox(_('Update EPUB Cover?'),self)
self.updateepubcover.setToolTip(_('Update book cover image from site or defaults (if found) <i>inside</i> the EPUB when EPUB is updated.'))
self.updateepubcover.setChecked(self.prefs['updateepubcover'])
horz.addWidget(self.updateepubcover)
self.mergehide.append(self.updateepubcover)
self.gbl.addLayout(horz)
## bgmeta not used with Add New because of stories that change
## story URL and for title/author collision matching.
# horz = QHBoxLayout()
# self.bgmeta = QCheckBox(_('Background Metadata?'),self)
# self.bgmeta.setToolTip(_("Collect Metadata from sites in a Background process.<br />This returns control to you quicker while updating, but you won't be asked for username/passwords or if you are an adult--stories that need those will just fail."))
# self.bgmeta.setChecked(self.prefs['bgmeta'])
# horz.addWidget(self.bgmeta)
# self.mergehide.append(self.bgmeta)
# self.mergeupdateshow.append(self.bgmeta)
# self.gbl.addLayout(horz)
self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.button_box.accepted.connect(self.ok_clicked)
self.button_box.rejected.connect(self.reject)
self.l.addWidget(self.button_box)
def click_show_download_options(self,x):
self.gbf.setVisible(x)
gprefs[show_download_options] = x
# invoke the
def ok_clicked(self):
self.dialog_closing(None) # save persistent size.
self.hide()
self.go_signal.emit( self.get_fff_options(),
self.get_urlstext(),
self.merge,
self.extrapayload )
def show_dialog(self,
url_list_text,
callback,
show=True,
merge=False,
newmerge=True,
extraoptions={},
extrapayload=None):
# rather than mutex in fff_plugin, just bail here if it's
# already in use.
if self.isVisible():
if url_list_text: # add to open box.
self.url.setText( '\n'.join([self.get_urlstext(), url_list_text]) )
return
try:
self.go_signal.disconnect()
except:
pass # if not already connected.
self.go_signal.connect(callback)
self.merge = merge
self.newmerge = newmerge
self.extraoptions = extraoptions
self.extrapayload = extrapayload
self.groupbox.setVisible(not(self.merge and self.newmerge))
if self.merge:
count=""
if url_list_text:
count = " " + _("(%s URLs found)")%len(url_list_text.split()) # count lines
self.toplabel.setText('<b>'+_('Story URLs for anthology, one per line:') + count + '</b>')
self.url.setToolTip(_('URLs for stories to include in the anthology, one per line.\nWill take URLs from clipboard, but only valid URLs.'))
self.collisionlabel.setText(_('If Story Already Exists in Anthology?'))
self.collision.setToolTip(_("What to do if there's already an existing story with the same URL in the anthology."))
for widget in self.mergehide:
widget.setVisible(False)
for widget in self.mergeshow:
widget.setVisible(True)
if not self.newmerge:
for widget in self.mergeupdateshow:
widget.setVisible(True)
n = extraoptions.get('frompage',{}).get('name',None)
if n:
self.mergedname.setText(n)
else:
self.mergedname.setVisible(False)
d = extraoptions.get('frompage',{}).get('desc',None)
if d:
self.mergeddesc.setText(unicode(d))
else:
self.mergeddesc.setVisible(False)
else:
for widget in self.mergehide:
widget.setVisible(True)
for widget in self.mergeshow:
widget.setVisible(False)
self.toplabel.setText(_('Story URLs, one per line:'))
self.url.setToolTip(_('URLs for stories, one per line.\nWill take URLs from clipboard, but only valid URLs.\nAdd [1,5] after the URL to limit the download to chapters 1-5.'))
self.collisionlabel.setText(_('If Story Already Exists?'))
self.collision.setToolTip(_("What to do if there's already an existing story with the same URL or title and author."))
self.groupbox.setChecked(gprefs.get(show_download_options,False))
self.gbf.setVisible(gprefs.get(show_download_options,False))
self.groupbox.toggled.connect(self.click_show_download_options)
# Need to re-able after hiding/showing
self.setAcceptDrops(True)
self.url.setFocus()
if self.prefs['adddialogstaysontop']:
QDialog.setWindowFlags ( self, Qt.Dialog | Qt.WindowStaysOnTopHint )
else:
QDialog.setWindowFlags ( self, Qt.Dialog )
if not self.merge:
self.fileform.setCurrentIndex(self.fileform.findText(self.prefs['fileform']))
else:
# always epub on self.merge (anthology)
self.fileform.setCurrentIndex(self.fileform.findText('epub'))
# add collision options
self.set_collisions()
if 'collision' in extraoptions:
use_collision = extraoptions['collision']
# self.collision.setDisabled(True)
# self.collision.setToolTip(_("Update Mode set by menu/shortcut choice."))
else:
use_collision = save_collisions[self.prefs['collision']]
# self.collision.setDisabled(False)
i = self.collision.findText(use_collision)
if i > -1:
self.collision.setCurrentIndex(i)
self.updatemeta.setChecked(self.prefs['updatemeta'])
# self.bgmeta.setChecked(self.prefs['bgmeta'])
if not self.merge:
self.updateepubcover.setChecked(self.prefs['updateepubcover'])
self.url.setText(url_list_text)
if url_list_text:
self.button_box.button(QDialogButtonBox.Ok).setFocus()
# restore saved size.
self.resize_dialog()
if show: # so anthology update can be modal still.
self.show()
#self.resize(self.sizeHint())
def set_collisions(self):
prev=self.collision.currentText()
self.collision.clear()
if self.merge:
order = list(anthology_collision_order)
else:
order = list(collision_order)
## Remove options that aren't valid.
if self.fileform.currentText() != 'epub':
order.remove(UPDATE)
order.remove(UPDATEALWAYS)
if self.prefs['savemetacol'] == '':
order.remove(CALIBREONLYSAVECOL)
for o in order:
self.collision.addItem(o)
i = self.collision.findText(prev)
if i > -1:
self.collision.setCurrentIndex(i)
def get_fff_options(self):
retval = {
'fileform': unicode(self.fileform.currentText()),
'collision': unicode(self.collision.currentText()),
'updatemeta': self.updatemeta.isChecked(),
'bgmeta': False, # self.bgmeta.isChecked(),
'updateepubcover': self.updateepubcover.isChecked(),
'smarten_punctuation':self.prefs['smarten_punctuation'],
'do_wordcount':self.prefs['do_wordcount'],
}
if self.merge:
retval['fileform']=='epub'
retval['updateepubcover']=True
if self.newmerge:
retval['updatemeta']=True
retval['collision']=ADDNEW
logger.debug("self.extraoptions['anthology_url']:%s"%self.extraoptions.get('anthology_url','NOT FOUND'))
retval.update(self.extraoptions)
return retval
def get_urlstext(self):
return unicode(self.url.toPlainText())
class FakeLineEdit():
def __init__(self):
pass
def text(self):
pass
class CollectURLDialog(SizePersistedDialog):
'''
Collect single url for get urls.
'''
def __init__(self, gui, title, url_text, anthology=False, indiv=True):
SizePersistedDialog.__init__(self, gui, 'fff:get story urls')
self.status=False
self.anthology=False
self.setMinimumWidth(300)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.setWindowTitle(title)
self.l.addWidget(QLabel(title))
horz = QHBoxLayout()
self.l.addLayout(horz)
horz.addWidget(QLabel("URL:"))
self.url = QLineEdit(self)
self.url.setText(url_text)
horz.addWidget(self.url)
horz = QHBoxLayout()
self.l.addLayout(horz)
if indiv:
self.indiv_button = QPushButton(_('For Individual Books'), self)
self.indiv_button.setToolTip(_('Get URLs and go to dialog for individual story downloads.'))
self.indiv_button.clicked.connect(self.indiv)
horz.addWidget(self.indiv_button)
if anthology:
self.merge_button = QPushButton(_('For Anthology Epub'), self)
self.merge_button.setToolTip(_('Get URLs and go to dialog for Anthology download.\nRequires %s plugin.')%'EpubMerge 1.3.1+')
self.merge_button.clicked.connect(self.merge)
horz.addWidget(self.merge_button)
self.cancel_button = QPushButton(_('Cancel'), self)
self.cancel_button.clicked.connect(self.cancel)
horz.addWidget(self.cancel_button)
# restore saved size.
self.resize_dialog()
def indiv(self):
self.status=True
self.accept()
def merge(self):
self.status=True
self.anthology=True
self.accept()
def cancel(self):
self.status=False
self.reject()
class UserPassDialog(QDialog):
'''
Need to collect User/Pass for some sites.
'''
def __init__(self, gui, site, exception=None):
QDialog.__init__(self, gui)
self.status=False
self.l = QGridLayout()
self.setLayout(self.l)
if exception and exception.passwdonly:
self.setWindowTitle(_('Password'))
self.l.addWidget(QLabel(_("Author requires a password for this story(%s).")%exception.url),0,0,1,2)
# user isn't used, but it's easier to still have it for
# post processing.
self.user = FakeLineEdit()
else:
self.setWindowTitle(_('User/Password'))
self.l.addWidget(QLabel(_("%s requires you to login to download this story.")%site),0,0,1,2)
self.l.addWidget(QLabel(_("User:")),1,0)
self.user = QLineEdit(self)
self.l.addWidget(self.user,1,1)
self.l.addWidget(QLabel(_("Password:")),2,0)
self.passwd = QLineEdit(self)
self.passwd.setEchoMode(QLineEdit.Password)
self.l.addWidget(self.passwd,2,1)
self.ok_button = QPushButton(_('OK'), self)
self.ok_button.clicked.connect(self.ok)
self.l.addWidget(self.ok_button,3,0)
self.cancel_button = QPushButton(_('Cancel'), self)
self.cancel_button.clicked.connect(self.cancel)
self.l.addWidget(self.cancel_button,3,1)
self.resize(self.sizeHint())
def ok(self):
self.status=True
self.hide()
def cancel(self):
self.status=False
self.hide()
def LoopProgressDialog(gui,
book_list,
foreach_function,
finish_function,
init_label=_("Fetching metadata for stories..."),
win_title=_("Downloading metadata for stories"),
status_prefix=_("Fetched metadata for")):
ld = _LoopProgressDialog(gui,
book_list,
foreach_function,
init_label,
win_title,
status_prefix)
# Mac OS X gets upset if the finish_function is called from inside
# the real _LoopProgressDialog class.
# reflect old behavior.
if not ld.wasCanceled():
finish_function(book_list)
class _LoopProgressDialog(QProgressDialog):
'''
ProgressDialog displayed while fetching metadata for each story.
'''
def __init__(self, gui,
book_list,
foreach_function,
init_label=_("Fetching metadata for stories..."),
win_title=_("Downloading metadata for stories"),
status_prefix=_("Fetched metadata for")):
QProgressDialog.__init__(self,
init_label,
_('Cancel'), 0, len(book_list), gui)
self.setWindowTitle(win_title)
self.setMinimumWidth(500)
self.book_list = book_list
self.foreach_function = foreach_function
self.status_prefix = status_prefix
self.i = 0
self.start_time = datetime.now()
self.first = True
# can't import at file load.
from calibre_plugins.fanficfare_plugin.prefs import prefs
self.show_est_time = prefs['show_est_time']
self.setLabelText('%s %d / %d' % (self.status_prefix, self.i, len(self.book_list)))
self.setValue(self.i)
## self.do_loop does QTimer.singleShot on self.do_loop also.
## A weird way to do a loop, but that was the example I had.
QTimer.singleShot(0, self.do_loop)
self.exec_()
def updateStatus(self):
remaining_time_string = ''
if self.show_est_time and self.i > -1:
time_spent = (datetime.now() - self.start_time).total_seconds()
estimated_remaining = (time_spent/(self.i+1)) * len(self.book_list) - time_spent
remaining_time_string = _(' - %s estimated until done') % ( time_duration_format(estimated_remaining))
self.setLabelText('%s %d / %d%s' % (self.status_prefix, self.i+1, len(self.book_list), remaining_time_string))
self.setValue(self.i+1)
#print(self.labelText())
def do_loop(self):
if self.first:
## Windows 10 doesn't want to show the prog dialog content
## until after the timer's been called again. Something to
## do with cooperative multi threading maybe?
## So this just trips the timer loop an extra time at the start.
self.first = False
QTimer.singleShot(0, self.do_loop)
return
book = self.book_list[self.i]
try:
## collision spec passed into getadapter by partial from fff_plugin
## no retval only if it exists, but collision is SKIP
self.foreach_function(book)
except NotGoingToDownload as d:
book['status']=_('Skipped')
book['good']=False
book['showerror']=d.showerror
book['comment']=unicode(d)
book['icon'] = d.icon
except Exception as e:
book['good']=False
book['status']=_("Error")
book['comment']=unicode(e)
logger.error("Exception: %s:%s"%(book,book['comment']),exc_info=True)
self.updateStatus()
self.i += 1
if self.i >= len(self.book_list) or self.wasCanceled():
return self.do_when_finished()
else:
QTimer.singleShot(0, self.do_loop)
def do_when_finished(self):
self.hide()
def time_duration_format(seconds):
"""
Convert seconds into a string describing the duration in larger time units (seconds, minutes, hours, days)
Only returns the two largest time divisions (eg, will drop seconds if there's hours remaining)
:param seconds: number of seconds
:return: string description of the duration
"""
periods = [
(_('%d day'),_('%d days'), 60*60*24),
(_('%d hour'),_('%d hours'), 60*60),
(_('%d minute'),_('%d minutes'), 60),
(_('%d second'),_('%d seconds'), 1)
]
strings = []
for period_label, period_plural_label, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds,period_seconds)
if period_value == 1:
strings.append( period_label % period_value)
else:
strings.append(period_plural_label % period_value)
if len(strings) == 2:
break
if len(strings) == 0:
return _('less than 1 second')
else:
return ', '.join(strings)
class AboutDialog(QDialog):
def __init__(self, parent, icon, text):
QDialog.__init__(self, parent)
#self.resize(400, 250)
self.l = QGridLayout()
self.setLayout(self.l)
self.logo = QLabel()
self.logo.setMaximumWidth(110)
self.logo.setPixmap(QPixmap(icon.pixmap(100,100)))
self.label = QLabel(text)
self.label.setOpenExternalLinks(True)
self.label.setWordWrap(True)
self.setWindowTitle(_('About FanFicFare'))
self.setWindowIcon(icon)
self.l.addWidget(self.logo, 0, 0)
self.l.addWidget(self.label, 0, 1)
self.bb = QDialogButtonBox(self)
b = self.bb.addButton(_('OK'), self.bb.AcceptRole)
b.setDefault(True)
self.l.addWidget(self.bb, 2, 0, 1, -1)
self.bb.accepted.connect(self.accept)
class IconWidgetItem(ReadOnlyTextIconWidgetItem):
def __init__(self, text, icon, sort_key):
ReadOnlyTextIconWidgetItem.__init__(self, text, icon)
self.sort_key = sort_key
#Qt uses a simple < check for sorting items, override this to use the sortKey
def __lt__(self, other):
return self.sort_key < other.sort_key
class AuthorTableWidgetItem(ReadOnlyTableWidgetItem):
def __init__(self, text, sort_key):
ReadOnlyTableWidgetItem.__init__(self, text)
self.sort_key = sort_key
#Qt uses a simple < check for sorting items, override this to use the sortKey
def __lt__(self, other):
return self.sort_key.lower() < other.sort_key.lower()
class UpdateExistingDialog(SizePersistedDialog):
def __init__(self, gui, header, prefs, icon, books,
extraoptions={},
save_size_name='fff:update list dialog'):
SizePersistedDialog.__init__(self, gui, save_size_name)
self.prefs = prefs
self.setWindowTitle(header)
self.setWindowIcon(icon)
layout = QVBoxLayout(self)
self.setLayout(layout)
title_layout = ImageTitleLayout(self, 'images/icon.png',
header)
layout.addLayout(title_layout)
books_layout = QHBoxLayout()
layout.addLayout(books_layout)
self.books_table = StoryListTableWidget(self)
books_layout.addWidget(self.books_table)
button_layout = QVBoxLayout()
books_layout.addLayout(button_layout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
button_layout.addItem(spacerItem)
self.remove_button = QtGui.QToolButton(self)
self.remove_button.setToolTip(_('Remove selected books from the list'))
self.remove_button.setIcon(get_icon('list_remove.png'))
self.remove_button.clicked.connect(self.remove_from_list)
button_layout.addWidget(self.remove_button)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
button_layout.addItem(spacerItem1)
options_layout = QHBoxLayout()
groupbox = QGroupBox(_("Show Download Options"))
groupbox.setCheckable(True)
groupbox.setChecked(gprefs.get(show_download_options,False))
groupbox.setFlat(True)
groupbox.setStyleSheet(gpstyle)
self.gbf = QFrame()
gbl = QVBoxLayout()
gbl.addWidget(self.gbf)
groupbox.setLayout(gbl)
gbl = QVBoxLayout()
self.gbf.setLayout(gbl)
options_layout.addWidget(groupbox)
self.gbf.setVisible(gprefs.get(show_download_options,False))
groupbox.toggled.connect(self.click_show_download_options)
horz = QHBoxLayout()
gbl.addLayout(horz)
label = QLabel(_('Output &Format:'))
horz.addWidget(label)
self.fileform = QComboBox(self)
self.fileform.addItem('epub')
self.fileform.addItem('mobi')
self.fileform.addItem('html')
self.fileform.addItem('txt')
self.fileform.setCurrentIndex(self.fileform.findText(self.prefs['fileform']))
self.fileform.setToolTip(_('Choose output format to create. May set default from plugin configuration.'))
self.fileform.activated.connect(self.set_collisions)
label.setBuddy(self.fileform)
horz.addWidget(self.fileform)
label = QLabel(_('Update Mode:'))
horz.addWidget(label)
self.collision = QComboBox(self)
self.collision.setToolTip(_("What sort of update to perform. May set default from plugin configuration."))
# add collision options
self.set_collisions()
if 'collision' in extraoptions:
use_collision = extraoptions['collision']
# self.collision.setDisabled(True)
# self.collision.setToolTip(_("Update Mode set by menu/shortcut choice."))
else:
use_collision = save_collisions[self.prefs['collision']]
# self.collision.setDisabled(False)
i = self.collision.findText(use_collision)
if i > -1:
self.collision.setCurrentIndex(i)
label.setBuddy(self.collision)
horz.addWidget(self.collision)
horz = QHBoxLayout()
gbl.addLayout(horz)
self.updatemeta = QCheckBox(_('Update Calibre &Metadata?'),self)
self.updatemeta.setToolTip(_("Update metadata for existing stories in Calibre from web site?\n(Columns set to 'New Only' in the column tabs will only be set for new books.)"))
self.updatemeta.setChecked(self.prefs['updatemeta'])
horz.addWidget(self.updatemeta)
self.updateepubcover = QCheckBox(_('Update EPUB Cover?'),self)
self.updateepubcover.setToolTip(_('Update book cover image from site or defaults (if found) <i>inside</i> the EPUB when EPUB is updated.'))
self.updateepubcover.setChecked(self.prefs['updateepubcover'])
horz.addWidget(self.updateepubcover)
self.bgmeta = QCheckBox(_('Background Metadata?'),self)
self.bgmeta.setToolTip(_("Collect Metadata from sites in a Background process.<br />This returns control to you quicker while updating, but you won't be asked for username/passwords or if you are an adult--stories that need those will just fail."))
self.bgmeta.setChecked(self.prefs['bgmeta'])
horz.addWidget(self.bgmeta)
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
options_layout.addWidget(button_box)
layout.addLayout(options_layout)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
self.books_table.populate_table(books)
def click_show_download_options(self,x):
self.gbf.setVisible(x)
gprefs[show_download_options] = x
def set_collisions(self):
prev=self.collision.currentText()
self.collision.clear()
order = list(collision_order)
order.remove(ADDNEW)
order.remove(SKIP)
if self.fileform.currentText() != 'epub':
order.remove(UPDATE)
order.remove(UPDATEALWAYS)
if self.prefs['savemetacol'] == '':
order.remove(CALIBREONLYSAVECOL)
for o in order:
self.collision.addItem(o)
i = self.collision.findText(prev)
if i > -1:
self.collision.setCurrentIndex(i)
def remove_from_list(self):
self.books_table.remove_selected_rows()
def get_books(self):
return self.books_table.get_books()
def get_fff_options(self):
return {
'fileform': unicode(self.fileform.currentText()),
'collision': unicode(self.collision.currentText()),
'updatemeta': self.updatemeta.isChecked(),
'bgmeta': self.bgmeta.isChecked(),
'updateepubcover': self.updateepubcover.isChecked(),
'smarten_punctuation':self.prefs['smarten_punctuation'],
'do_wordcount':self.prefs['do_wordcount'],
}
class StoryListTableWidget(QTableWidget):
def __init__(self, parent):
QTableWidget.__init__(self, parent)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def populate_table(self, books):
self.clear()
self.setAlternatingRowColors(True)
self.setRowCount(len(books))
header_labels = ['',_('Title'), _('Author'), 'URL', _('Comment')]
self.setColumnCount(len(header_labels))
self.setHorizontalHeaderLabels(header_labels)
self.horizontalHeader().setStretchLastSection(True)
#self.verticalHeader().setDefaultSectionSize(24)
self.verticalHeader().hide()
self.books={}
for row, book in enumerate(books):
self.populate_table_row(row, book)
self.books[row] = book
# turning True breaks up/down. Do we need either sorting or up/down?
self.setSortingEnabled(True)
self.resizeColumnsToContents()
self.setMinimumColumnWidth(1, 100)
self.setMinimumColumnWidth(2, 100)
self.setMinimumColumnWidth(3, 100)
self.setMinimumSize(300, 0)
# if len(books) > 0:
# self.selectRow(0)
self.sortItems(1)
self.sortItems(0)
def setMinimumColumnWidth(self, col, minimum):
if self.columnWidth(col) < minimum:
self.setColumnWidth(col, minimum)
def populate_table_row(self, row, book):
if book['good']:
icon = get_icon('ok.png')
val = 0
else:
icon = get_icon('minus.png')
val = 1
if 'icon' in book:
icon = get_icon(book['icon'])
status_cell = IconWidgetItem(None,icon,val)
status_cell.setData(Qt.UserRole, val)
self.setItem(row, 0, status_cell)
title_cell = ReadOnlyTableWidgetItem(book['title'])
title_cell.setData(Qt.UserRole, row)
self.setItem(row, 1, title_cell)
self.setItem(row, 2, AuthorTableWidgetItem(", ".join(book['author']), ", ".join(book['author_sort'])))
url_cell = ReadOnlyTableWidgetItem(book['url'])
self.setItem(row, 3, url_cell)
comment_cell = ReadOnlyTableWidgetItem(book['comment'])
self.setItem(row, 4, comment_cell)
def get_books(self):
books = []
#print("=========================\nbooks:%s"%self.books)
for row in range(self.rowCount()):
rnum = self.item(row, 1).data(Qt.UserRole)
book = self.books[rnum]
books.append(book)
return books
def remove_selected_rows(self):
self.setFocus()
rows = self.selectionModel().selectedRows()
if len(rows) == 0:
return
message = '<p>'+_('Are you sure you want to remove this book from the list?')
if len(rows) > 1:
message = '<p>'+_('Are you sure you want to remove the selected %d books from the list?')%len(rows)
if not confirm(message,'fff_delete_item', self):
return
first_sel_row = self.currentRow()
for selrow in reversed(rows):
self.removeRow(selrow.row())
if first_sel_row < self.rowCount():
self.select_and_scroll_to_row(first_sel_row)
elif self.rowCount() > 0:
self.select_and_scroll_to_row(first_sel_row - 1)
def select_and_scroll_to_row(self, row):
self.selectRow(row)
self.scrollToItem(self.currentItem())
class RejectListTableWidget(QTableWidget):
def __init__(self, parent,rejectreasons=[]):
QTableWidget.__init__(self, parent)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.rejectreasons = rejectreasons
def populate_table(self, reject_list):
self.clear()
self.setAlternatingRowColors(True)
self.setRowCount(len(reject_list))
header_labels = ['URL', _('Title'), _('Author'), _('Note')]
self.setColumnCount(len(header_labels))
self.setHorizontalHeaderLabels(header_labels)
self.horizontalHeader().setStretchLastSection(True)
#self.verticalHeader().setDefaultSectionSize(24)
#self.verticalHeader().hide()
# it's generally recommended to enable sort after pop, not
# before. But then it needs to be sorted on a column and I'd
# rather keep the order given.
self.setSortingEnabled(True)
# row is just row number.
for row, rejectrow in enumerate(reject_list):
#print("populating table:%s"%rejectrow.to_line())
self.populate_table_row(row,rejectrow)
self.resizeColumnsToContents()
self.setMinimumColumnWidth(0, 100)
self.setMinimumColumnWidth(3, 100)
self.setMinimumSize(300, 0)
def setMinimumColumnWidth(self, col, minimum):
if self.columnWidth(col) < minimum:
self.setColumnWidth(col, minimum)
def populate_table_row(self, row, rej):
url_cell = ReadOnlyTableWidgetItem(rej.url)
url_cell.setData(Qt.UserRole, rej.book_id)
self.setItem(row, 0, url_cell)
self.setItem(row, 1, EditableTableWidgetItem(rej.title))
self.setItem(row, 2, EditableTableWidgetItem(rej.auth))
note_cell = EditWithComplete(self,sort_func=lambda x:1)
items = [rej.note]+self.rejectreasons
note_cell.update_items_cache(items)
note_cell.show_initial_value(rej.note)
note_cell.set_separator(None)
note_cell.setToolTip(_('Select or Edit Reject Note.'))
self.setCellWidget(row, 3, note_cell)
note_cell.setCursorPosition(0)
def remove_selected_rows(self):
self.setFocus()
rows = self.selectionModel().selectedRows()
if len(rows) == 0:
return
message = '<p>'+_('Are you sure you want to remove this URL from the list?')
if len(rows) > 1:
message = '<p>'+_('Are you sure you want to remove the %d selected URLs from the list?')%len(rows)
if not confirm(message,'fff_rejectlist_delete_item_again', self):
return
first_sel_row = self.currentRow()
for selrow in reversed(rows):
self.removeRow(selrow.row())
if first_sel_row < self.rowCount():
self.select_and_scroll_to_row(first_sel_row)
elif self.rowCount() > 0:
self.select_and_scroll_to_row(first_sel_row - 1)
def select_and_scroll_to_row(self, row):
self.selectRow(row)
self.scrollToItem(self.currentItem())
class RejectListDialog(SizePersistedDialog):
def __init__(self, gui, reject_list,
rejectreasons=[],
header=_("List of Books to Reject"),
icon='rotate-right.png',
show_delete=True,
show_all_reasons=True,
save_size_name='fff:reject list dialog'):
SizePersistedDialog.__init__(self, gui, save_size_name)
self.setWindowTitle(header)
self.setWindowIcon(get_icon(icon))
layout = QVBoxLayout(self)
self.setLayout(layout)
title_layout = ImageTitleLayout(self, icon, header,
'<i></i>'+_('FFF will remember these URLs and display the note and offer to reject them if you try to download them again later.'))
layout.addLayout(title_layout)
rejects_layout = QHBoxLayout()
layout.addLayout(rejects_layout)
self.rejects_table = RejectListTableWidget(self,rejectreasons=rejectreasons)
rejects_layout.addWidget(self.rejects_table)
button_layout = QVBoxLayout()
rejects_layout.addLayout(button_layout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
button_layout.addItem(spacerItem)
self.remove_button = QtGui.QToolButton(self)
self.remove_button.setToolTip(_('Remove selected URLs from the list'))
self.remove_button.setIcon(get_icon('list_remove.png'))
self.remove_button.clicked.connect(self.remove_from_list)
button_layout.addWidget(self.remove_button)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
button_layout.addItem(spacerItem1)
if show_all_reasons:
self.reason_edit = EditWithComplete(self,sort_func=lambda x:1)
items = ['']+rejectreasons
self.reason_edit.update_items_cache(items)
self.reason_edit.show_initial_value('')
self.reason_edit.set_separator(None)
self.reason_edit.setToolTip(_("This will be added to whatever note you've set for each URL above."))
horz = QHBoxLayout()
label = QLabel(_("Add this reason to all URLs added:"))
label.setToolTip(_("This will be added to whatever note you've set for each URL above."))
horz.addWidget(label)
horz.addWidget(self.reason_edit)
self.reason_edit.setCursorPosition(0)
horz.insertStretch(-1)
layout.addLayout(horz)
options_layout = QHBoxLayout()
if show_delete:
# can't import at file load.
from calibre_plugins.fanficfare_plugin.prefs import prefs
self.deletebooks = QCheckBox(_('Delete Books (including books without FanFiction URLs)?'),self)
self.deletebooks.setToolTip(_("Delete the selected books after adding them to the Rejected URLs list."))
self.deletebooks.setChecked(prefs['reject_delete_default'])
options_layout.addWidget(self.deletebooks)
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
options_layout.addWidget(button_box)
layout.addLayout(options_layout)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
self.rejects_table.populate_table(reject_list)
def remove_from_list(self):
self.rejects_table.remove_selected_rows()
def get_reject_list(self):
rejectrows = []
for row in range(self.rejects_table.rowCount()):
url = unicode(self.rejects_table.item(row, 0).text()).strip()
book_id =self.rejects_table.item(row, 0).data(Qt.UserRole)
title = unicode(self.rejects_table.item(row, 1).text()).strip()
auth = unicode(self.rejects_table.item(row, 2).text()).strip()
note = unicode(self.rejects_table.cellWidget(row, 3).currentText()).strip()
rejectrows.append(RejectUrlEntry(url,note,title,auth,self.get_reason_text(),book_id=book_id,normalize=False))
return rejectrows
def get_reject_list_ids(self):
rejectrows = []
for row in range(self.rejects_table.rowCount()):
book_id = self.rejects_table.item(row, 0).data(Qt.UserRole)
if book_id:
rejectrows.append(book_id)
return rejectrows
def get_reason_text(self):
try:
return unicode(self.reason_edit.currentText()).strip()
except:
# doesn't have self.reason_edit when editing existing list.
return None
def get_deletebooks(self):
return self.deletebooks.isChecked()
class EditTextDialog(SizePersistedDialog):
def __init__(self, parent, text,
icon=None, title=None, label=None, tooltip=None,
read_only=False,
rejectreasons=[],reasonslabel=None,
save_size_name='fff:edit text dialog',
):
SizePersistedDialog.__init__(self, parent, save_size_name)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.label = QLabel(label)
if title:
self.setWindowTitle(title)
if icon:
self.setWindowIcon(icon)
self.l.addWidget(self.label)
self.textedit = QTextEdit(self)
self.textedit.setLineWrapMode(QTextEditNoWrap)
self.textedit.setReadOnly(read_only)
self.textedit.setText(text)
self.l.addWidget(self.textedit)
if tooltip:
self.label.setToolTip(tooltip)
self.textedit.setToolTip(tooltip)
if rejectreasons or reasonslabel:
self.reason_edit = EditWithComplete(self,sort_func=lambda x:1)
items = ['']+rejectreasons
self.reason_edit.update_items_cache(items)
self.reason_edit.show_initial_value('')
self.reason_edit.set_separator(None)
self.reason_edit.setToolTip(reasonslabel)
if reasonslabel:
horz = QHBoxLayout()
label = QLabel(reasonslabel)
label.setToolTip(reasonslabel)
horz.addWidget(label)
horz.addWidget(self.reason_edit)
self.l.addLayout(horz)
else:
self.l.addWidget(self.reason_edit)
self.reason_edit.setCursorPosition(0)
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
self.l.addWidget(button_box)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
def get_plain_text(self):
return unicode(self.textedit.toPlainText())
def get_reason_text(self):
return unicode(self.reason_edit.currentText()).strip()
class IniTextDialog(SizePersistedDialog):
def __init__(self, parent, text,
icon=None, title=None, label=None,
use_find=False,
read_only=False,
save_size_name='fff:ini text dialog',
):
SizePersistedDialog.__init__(self, parent, save_size_name)
self.keys=dict()
self.l = QVBoxLayout()
self.setLayout(self.l)
self.label = QLabel(label)
if title:
self.setWindowTitle(title)
if icon:
self.setWindowIcon(icon)
self.l.addWidget(self.label)
self.textedit = QTextEdit(self)
highlighter = IniHighlighter(self.textedit,
sections=get_valid_sections(),
keywords=get_valid_keywords(),
entries=get_valid_entries(),
entry_keywords=get_valid_entry_keywords(),
)
self.textedit.setLineWrapMode(QTextEditNoWrap)
try:
self.textedit.setFont(QFont("Courier",
parent.font().pointSize()+1))
except Exception as e:
logger.error("Couldn't get font: %s"%e)
self.textedit.setReadOnly(read_only)
self.textedit.setText(ensure_text(text))
self.l.addWidget(self.textedit)
self.lastStart = 0
if use_find:
findtooltip=_('Search for string in edit box.')
horz = QHBoxLayout()
label = QLabel(_('Find:'))
label.setToolTip(findtooltip)
# Button to search the document for something
self.findButton = QtGui.QPushButton(_('Find'),self)
self.findButton.clicked.connect(self.find)
self.findButton.setToolTip(findtooltip)
# The field into which to type the query
self.findField = QLineEdit(self)
self.findField.setToolTip(findtooltip)
self.findField.returnPressed.connect(self.findButton.setFocus)
# Case Sensitivity option
self.caseSens = QtGui.QCheckBox(_('Case sensitive'),self)
self.caseSens.setToolTip(_("Search for case sensitive string; don't treat Harry, HARRY and harry all the same."))
horz.addWidget(label)
horz.addWidget(self.findField)
horz.addWidget(self.findButton)
horz.addWidget(self.caseSens)
self.l.addLayout(horz)
self.addCtrlKeyPress(QtCore.Qt.Key_F,self.findFocus)
self.addCtrlKeyPress(QtCore.Qt.Key_G,self.find)
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
self.l.addWidget(button_box)
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
def accept(self):
from .fff_util import test_config
# print("in accept")
errors = test_config(self.get_plain_text())
retry = False
if errors:
d = ViewLog(self,
_('Go back to fix errors?'),
errors)
retry = d.exec_() == d.Accepted
# print("retry:%s"%retry)
if retry:
lineno=d.get_lineno()
if lineno:
# print("go to lineno (%s) here"%lineno)
self.select_line(lineno)
else:
# print("call parent accept")
return SizePersistedDialog.accept(self)
def addCtrlKeyPress(self,key,func):
# print("addKeyPress: key(0x%x)"%key)
# print("control: 0x%x"%QtCore.Qt.ControlModifier)
self.keys[key]=func
def keyPressEvent(self, event):
# print("event: key(0x%x) modifiers(0x%x)"%(event.key(),event.modifiers()))
if (event.modifiers() & QtCore.Qt.ControlModifier) and event.key() in self.keys:
func = self.keys[event.key()]
return func()
else:
return SizePersistedDialog.keyPressEvent(self, event)
def get_plain_text(self):
return unicode(self.textedit.toPlainText())
def findFocus(self):
# print("findFocus called")
self.findField.setFocus()
self.findField.selectAll()
def find(self):
#print("find self.lastStart:%s"%self.lastStart)
# Grab the parent's text
text = self.textedit.toPlainText()
# And the text to find
query = self.findField.text()
if not self.caseSens.isChecked():
text = text.lower()
query = query.lower()
# Use normal string search to find the query from the
# last starting position
self.lastStart = text.find(query,self.lastStart + 1)
# If the find() method didn't return -1 (not found)
if self.lastStart >= 0:
end = self.lastStart + len(query)
self.moveCursor(self.lastStart,end)
else:
# Make the next search start from the begining again
self.lastStart = 0
self.textedit.moveCursor(self.textedit.textCursor().Start)
def moveCursor(self,start,end):
# We retrieve the QTextCursor object from the parent's QTextEdit
cursor = self.textedit.textCursor()
# Then we set the position to the beginning of the last match
cursor.setPosition(start)
# Next we move the Cursor by over the match and pass the KeepAnchor parameter
# which will make the cursor select the match's text
cursor.movePosition(cursor.Right,cursor.KeepAnchor,end - start)
# And finally we set this new cursor as the parent's
self.textedit.setTextCursor(cursor)
def select_line(self,lineno):
# We retrieve the QTextCursor object from the parent's QTextEdit
cursor = self.textedit.textCursor()
# Then we set the position to the beginning of the buffer
cursor.setPosition(0)
# Next we move the Cursor down lineno times
cursor.movePosition(cursor.Down,cursor.MoveAnchor,lineno-1)
# Next we move the Cursor to the end of the line
cursor.movePosition(cursor.EndOfLine,cursor.KeepAnchor,1)
# And finally we set this new cursor as the parent's
self.textedit.setTextCursor(cursor)
class ViewLog(SizePersistedDialog):
def label_clicked(self, event, lineno=None):
self.lineno = lineno
# print("lineno set to: %s"%lineno)
self.accept()
def get_lineno(self):
return self.lineno
def __init__(self, parent, title, errors,
save_size_name='fff:view log dialog',):
SizePersistedDialog.__init__(self, parent,save_size_name)
self.l = l = QVBoxLayout()
self.setLayout(l)
label = QLabel(_('Click an error below to return to Editing directly on that line:'))
label.setWordWrap(True)
self.l.addWidget(label)
self.lineno = None
scrollable = QScrollArea()
scrollcontent = QWidget()
scrollable.setWidget(scrollcontent)
scrollable.setWidgetResizable(True)
self.l.addWidget(scrollable)
self.sl = QVBoxLayout()
scrollcontent.setLayout(self.sl)
## error = (lineno, msg)
for (lineno, error_msg) in errors:
# print('adding label for error:%s: %s'%(lineno, error_msg))
if len(error_msg) > 200:
error_msg=error_msg[:200]+" ..."
label = QLabel('%s: %s'%(lineno, error_msg))
label.setWordWrap(True)
if( hasattr(QApplication.instance(),'is_dark_theme')
and QApplication.instance().is_dark_theme ):
label.setStyleSheet("QLabel { margin-left: 2em; color : aqua; } QLabel:hover { color: red; }");
else:
label.setStyleSheet("QLabel { margin-left: 2em; color : blue; } QLabel:hover { color: red; }");
label.setToolTip(_('Click to go to line %s')%lineno)
label.mouseReleaseEvent = partial(self.label_clicked, lineno=lineno)
self.sl.addWidget(label)
# html='<p>'+'</p><p>'.join([ '(lineno: %s) %s'%e for e in errors ])+'</p>'
# self.tb = QTextBrowser(self)
# self.tb.setFont(QFont("Courier",
# parent.font().pointSize()+1))
# self.tb.setHtml(html)
# l.addWidget(self.tb)
self.sl.insertStretch(-1)
horz = QHBoxLayout()
editagain = QPushButton(_('Return to Editing'), self)
editagain.clicked.connect(self.accept)
horz.addWidget(editagain)
saveanyway = QPushButton(_('Save Anyway'), self)
saveanyway.clicked.connect(self.reject)
horz.addWidget(saveanyway)
l.addLayout(horz)
self.setModal(False)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(I('debug.png')))
#self.show()
# Cause our dialog size to be restored from prefs or created on first usage
self.resize_dialog()
def copy_to_clipboard(self):
txt = self.tb.toPlainText()
QApplication.clipboard().setText(txt)
class EmailPassDialog(QDialog):
'''
Need to collect Pass for imap.
'''
def __init__(self, gui, user):
QDialog.__init__(self, gui)
self.status=False
self.l = QGridLayout()
self.setLayout(self.l)
self.setWindowTitle(_('Password'))
self.l.addWidget(QLabel(_("Enter Email Password for %s:")%user),0,0,1,2)
# self.l.addWidget(QLabel(_("Password:")),1,0)
self.passwd = QLineEdit(self)
self.passwd.setEchoMode(QLineEdit.Password)
self.l.addWidget(self.passwd,1,0,1,2)
self.ok_button = QPushButton(_('OK'), self)
self.ok_button.clicked.connect(self.ok)
self.l.addWidget(self.ok_button,2,0)
self.cancel_button = QPushButton(_('Cancel'), self)
self.cancel_button.clicked.connect(self.cancel)
self.l.addWidget(self.cancel_button,2,1)
# set stretch factors the same.
self.l.setColumnStretch(0,1)
self.l.setColumnStretch(1,1)
self.resize(self.sizeHint())
def ok(self):
self.status=True
self.hide()
def cancel(self):
self.status=False
self.hide()
def get_pass(self):
return u"%s"%self.passwd.text()
def get_remember(self):
return self.remember_pass.isChecked()
def question_dialog_all(parent, title, msg, det_msg='', show_copy_button=False,
default_yes=True,
# Skippable dialogs
# Set skip_dialog_name to a unique name for this dialog
# Set skip_dialog_msg to a message displayed to the user
skip_dialog_name=None, skip_dialog_msg=_('Show this confirmation again'),
skip_dialog_skipped_value=True, skip_dialog_skip_precheck=True,
# Override icon (QIcon to be used as the icon for this dialog or string for I())
override_icon=None,
# Change the text/icons of the yes and no buttons.
# The icons must be QIcon objects or strings for I()
yes_text=None, no_text=None, yes_icon=None, no_icon=None,
# for yes/no to all memory:
question_name=None,
question_cache=None,
):
# print(question_cache)
if isinstance(question_cache,dict) and question_name and question_name in question_cache:
return question_cache[question_name]
from calibre.gui2.dialogs.message_box import MessageBox
if not isinstance(skip_dialog_name, unicode):
skip_dialog_name = None
try:
auto_skip = set(gprefs.get('questions_to_auto_skip', ()))
except Exception:
auto_skip = set()
if (skip_dialog_name is not None and skip_dialog_name in auto_skip):
return bool(skip_dialog_skipped_value)
## There's almost certainly a more elegant way to do this, but
## this works and I understand it. all_flag is a contain so the
## click connect can change the contents.
all_flag = []
def set_all_flag(a,s):
a.append(s)
d = MessageBox(MessageBox.QUESTION, title, msg, det_msg, parent=parent,
show_copy_button=show_copy_button, default_yes=default_yes,
q_icon=override_icon, yes_text=yes_text, no_text=no_text,
yes_icon=yes_icon, no_icon=no_icon)
d.bb.setStandardButtons(d.bb.Yes|d.bb.No|d.bb.YesToAll|d.bb.NoToAll)
d.bb.button(d.bb.YesToAll).setIcon(d.bb.button(d.bb.Yes).icon())
d.bb.button(d.bb.NoToAll ).setIcon(d.bb.button(d.bb.No ).icon())
d.bb.button(d.bb.YesToAll).clicked.connect(partial(set_all_flag,all_flag,'yes_all'))
d.bb.button(d.bb.NoToAll ).clicked.connect(partial(set_all_flag,all_flag,'no_all'))
# d.bb.button(d.bb.NoToAll ).clicked.connect(lambda x:no_all = True)
if skip_dialog_name is not None and skip_dialog_msg:
tc = d.toggle_checkbox
tc.setVisible(True)
tc.setText(skip_dialog_msg)
tc.setChecked(bool(skip_dialog_skip_precheck))
d.resize_needed.emit()
ret = d.exec_() == d.Accepted
# print("yes/no_all:")
# print(all_flag)
if all_flag and isinstance(question_cache,dict) and question_name:
question_cache[question_name] = (all_flag[0] == 'yes_all')
# print(question_cache)
if skip_dialog_name is not None and not d.toggle_checkbox.isChecked():
auto_skip.add(skip_dialog_name)
gprefs.set('questions_to_auto_skip', list(auto_skip))
return ret
| 37.093065
| 258
| 0.624121
|
f55f67489ae3b6803ef2b8b5142a84b4fb386795
| 4,314
|
py
|
Python
|
tests/test_units.py
|
iamliamc/Brick
|
45909dbfa39535f265072bc6bc99eb1ce504add5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_units.py
|
iamliamc/Brick
|
45909dbfa39535f265072bc6bc99eb1ce504add5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_units.py
|
iamliamc/Brick
|
45909dbfa39535f265072bc6bc99eb1ce504add5
|
[
"BSD-3-Clause"
] | null | null | null |
from rdflib import Namespace
import re
import brickschema
from collections import defaultdict
import warnings
import sys
sys.path.append("..")
from bricksrc.namespaces import A, BRICK, TAG, QUDT # noqa: E402
BLDG = Namespace("https://brickschema.org/schema/ExampleBuilding#")
def test_quantity_has_one_quantitykind():
"""
In the current implementation, using owl:sameAs to align Quantity
with QUDT QuantityKinds, we need to make sure that a Brick Quantity
does not end up with more than 1 QuantityKind
"""
g = brickschema.graph.Graph()
g.load_file("Brick.ttl")
g.bind("qudt", QUDT)
g.expand(profile="owlrl")
quantity_qk = g.query(
"SELECT ?quantity ?kind WHERE {\
?quantity a brick:Quantity .\
?quantity owl:sameAs ?kind }"
)
assert len(quantity_qk) > 0
seen = defaultdict(list)
for quant, quantkind in quantity_qk:
if quant == quantkind:
continue
if "Brick" in quant and "qudt" in quantkind:
seen[quant].append(quantkind)
for quant, kindlist in seen.items():
assert (
len(kindlist) == 1
), f"Quantity {quant} has more than one associated QuantityKind! {kindlist}"
def test_instances_measure_correct_units():
"""
Tests that the units associated with instances are properly linked
through the QuantityKinds
Recall that the Brick unit model is such:
Brick class ---measures---> Brick quantity -- sameAs --> QuantityKind
| |
| +-----applicableUnit-----------+
| |
v v
Instance --- hasUnit ---> QUDT unit
We create an instance of each Brick class which 'measures' a quantity
and associate that instance with one of the applicable units as defined
by QUDT. We then verify that all of those units are associated with the
correct quantity
"""
g = brickschema.graph.Graph()
g.load_file("Brick.ttl")
g.bind("qudt", QUDT)
g.expand(profile="owlrl")
# test the definitions by making sure that some quantities have applicable
# units
classes_with_quantities = g.query(
"SELECT ?class ?quantity ?unit WHERE { \
?class a brick:Class .\
?class brick:measures ?quantity .\
?quantity qudt:applicableUnit ?unit }"
)
triples = []
for brickclass, quantity, unit in classes_with_quantities:
class_name = re.split("/|#", brickclass)[-1]
unit_name = re.split("/|#", unit)[-1]
instance_name = f"Instance_of_{class_name}_{unit_name}"
triples.append((BLDG[instance_name], A, brickclass))
triples.append((BLDG[instance_name], BRICK.hasUnit, unit))
g.add(*triples)
g.expand(profile="owlrl")
instances = g.query(
"SELECT ?inst ?quantity ?unit WHERE {\
?inst rdf:type brick:Sensor .\
?inst rdf:type/brick:measures ?quantity .\
?quantity a brick:Quantity .\
?inst brick:hasUnit ?unit .}"
)
assert len(instances) == len(classes_with_quantities)
def test_quantity_units():
g = brickschema.graph.Graph()
g.load_file("Brick.ttl")
g.bind("qudt", QUDT)
g.expand(profile="owlrl")
# test the definitions by making sure that some quantities have applicable
# units
quantities_with_units = g.query(
"SELECT ?q WHERE { \
?q rdf:type brick:Quantity .\
?q qudt:applicableUnit ?unit}"
)
assert len(quantities_with_units) > 0
def test_all_quantities_have_units():
g = brickschema.graph.Graph()
g.load_file("Brick.ttl")
g.bind("qudt", QUDT)
g.expand(profile="owlrl")
# test the definitions by making sure that some quantities have applicable
# units
quantities_without_units = list(
g.query(
"SELECT ?q WHERE { \
?q rdf:type brick:Quantity .\
FILTER NOT EXISTS {?q qudt:applicableUnit ?unit} }"
)
)
if len(quantities_without_units) > 0:
warnings.warn(
f"The following quantities do not have associated units: {quantities_without_units}"
)
| 32.931298
| 96
| 0.608252
|
ace8ec943ff987d21fe4479f9bcf27bf5aab9d24
| 10,397
|
py
|
Python
|
tests/cli/test_add.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | 1
|
2022-03-02T19:43:46.000Z
|
2022-03-02T19:43:46.000Z
|
tests/cli/test_add.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | 1
|
2022-03-20T07:36:27.000Z
|
2022-03-20T07:36:27.000Z
|
tests/cli/test_add.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | null | null | null |
import shutil
from pathlib import Path
import pytest
from pdm.cli import actions
from pdm.models.pip_shims import Link
from pdm.models.specifiers import PySpecSet
from tests import FIXTURES
@pytest.mark.usefixtures("repository")
def test_add_package(project, working_set, is_dev):
actions.do_add(project, is_dev, packages=["requests"])
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert group[0] == "requests~=2.19"
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
def test_add_command(project, invoke, mocker):
do_add = mocker.patch.object(actions, "do_add")
invoke(["add", "requests"], obj=project)
do_add.assert_called_once()
@pytest.mark.usefixtures("repository")
def test_add_package_to_custom_group(project, working_set):
actions.do_add(project, group="test", packages=["requests"])
assert "requests" in project.meta.optional_dependencies["test"][0]
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
@pytest.mark.usefixtures("repository")
def test_add_package_to_custom_dev_group(project, working_set):
actions.do_add(project, dev=True, group="test", packages=["requests"])
dependencies = project.tool_settings["dev-dependencies"]["test"]
assert "requests" in dependencies[0]
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
@pytest.mark.usefixtures("repository", "vcs")
def test_add_editable_package(project, working_set, is_dev):
# Ensure that correct python version is used.
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, is_dev, packages=["demo"])
actions.do_add(
project,
is_dev,
editables=["git+https://github.com/test-root/demo.git#egg=demo"],
)
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert "demo" in group[0]
assert "-e git+https://github.com/test-root/demo.git#egg=demo" in group[1]
locked_candidates = project.locked_repository.all_candidates
assert (
locked_candidates["demo"].prepare(project.environment).revision
== "1234567890abcdef"
)
assert locked_candidates["idna"].version == "2.7"
assert "idna" in working_set
actions.do_sync(project, no_editable=True)
assert not working_set["demo"].link_file
@pytest.mark.usefixtures("repository", "vcs")
def test_editable_package_override_non_editable(project, working_set):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(
project, packages=["git+https://github.com/test-root/demo.git#egg=demo"]
)
actions.do_add(
project,
editables=["git+https://github.com/test-root/demo.git#egg=demo"],
)
assert working_set["demo"].link_file
@pytest.mark.usefixtures("repository", "working_set")
def test_add_remote_package_url(project, is_dev):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(
project,
is_dev,
packages=["http://fixtures.test/artifacts/demo-0.0.1-py2.py3-none-any.whl"],
)
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert (
group[0]
== "demo @ http://fixtures.test/artifacts/demo-0.0.1-py2.py3-none-any.whl"
)
@pytest.mark.usefixtures("repository")
def test_add_no_install(project, working_set):
actions.do_add(project, sync=False, packages=["requests"])
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package not in working_set
@pytest.mark.usefixtures("repository")
def test_add_package_save_exact(project):
actions.do_add(project, sync=False, save="exact", packages=["requests"])
assert project.meta.dependencies[0] == "requests==2.19.1"
@pytest.mark.usefixtures("repository")
def test_add_package_save_wildcard(project):
actions.do_add(project, sync=False, save="wildcard", packages=["requests"])
assert project.meta.dependencies[0] == "requests"
@pytest.mark.usefixtures("repository")
def test_add_package_save_minimum(project):
actions.do_add(project, sync=False, save="minimum", packages=["requests"])
assert project.meta.dependencies[0] == "requests>=2.19.1"
def test_add_package_update_reuse(project, repository):
actions.do_add(project, sync=False, save="wildcard", packages=["requests", "pytz"])
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.19.1"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
repository.add_candidate("pytz", "2019.6")
repository.add_candidate("chardet", "3.0.5")
repository.add_candidate("requests", "2.20.0")
repository.add_dependencies(
"requests",
"2.20.0",
[
"certifi>=2017.4.17",
"chardet<3.1.0,>=3.0.2",
"idna<2.8,>=2.5",
"urllib3<1.24,>=1.21.1",
],
)
actions.do_add(
project, sync=False, save="wildcard", packages=["requests"], strategy="reuse"
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.20.0"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
def test_add_package_update_eager(project, repository):
actions.do_add(project, sync=False, save="wildcard", packages=["requests", "pytz"])
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.19.1"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
repository.add_candidate("pytz", "2019.6")
repository.add_candidate("chardet", "3.0.5")
repository.add_candidate("requests", "2.20.0")
repository.add_dependencies(
"requests",
"2.20.0",
[
"certifi>=2017.4.17",
"chardet<3.1.0,>=3.0.2",
"idna<2.8,>=2.5",
"urllib3<1.24,>=1.21.1",
],
)
actions.do_add(
project, sync=False, save="wildcard", packages=["requests"], strategy="eager"
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.20.0"
assert locked_candidates["chardet"].version == "3.0.5"
assert locked_candidates["pytz"].version == "2019.3"
@pytest.mark.usefixtures("repository")
def test_add_package_with_mismatch_marker(project, working_set, mocker):
mocker.patch(
"pdm.models.environment.get_pep508_environment",
return_value={"platform_system": "Darwin"},
)
actions.do_add(project, packages=["requests", "pytz; platform_system!='Darwin'"])
assert "pytz" not in working_set
@pytest.mark.usefixtures("repository")
def test_add_dependency_from_multiple_parents(project, working_set, mocker):
mocker.patch(
"pdm.models.environment.get_pep508_environment",
return_value={"platform_system": "Darwin"},
)
actions.do_add(project, packages=["requests", "chardet; platform_system!='Darwin'"])
assert "chardet" in working_set
@pytest.mark.usefixtures("repository")
def test_add_packages_without_self(project, working_set):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, packages=["requests"], no_self=True)
assert project.meta.name not in working_set
@pytest.mark.usefixtures("repository", "working_set")
def test_add_package_unconstrained_rewrite_specifier(project):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, packages=["django"], no_self=True)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["django"].version == "2.2.9"
assert project.meta.dependencies[0] == "django~=2.2"
actions.do_add(
project, packages=["django-toolbar"], no_self=True, unconstrained=True
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["django"].version == "1.11.8"
assert project.meta.dependencies[0] == "django~=1.11"
@pytest.mark.usefixtures("repository", "working_set", "vcs")
def test_add_cached_vcs_requirement(project, mocker):
project.environment.python_requires = PySpecSet(">=3.6")
url = "git+https://github.com/test-root/demo.git@1234567890abcdef#egg=demo"
built_path = FIXTURES / "artifacts/demo-0.0.1-py2.py3-none-any.whl"
wheel_cache = project.make_wheel_cache()
cache_path = Path(wheel_cache.get_path_for_link(Link(url)))
if not cache_path.exists():
cache_path.mkdir(parents=True)
shutil.copy2(built_path, cache_path)
downloader = mocker.patch("pdm.models.pip_shims.unpack_url")
builder = mocker.patch("pdm.builders.WheelBuilder.build")
actions.do_add(project, packages=[url], no_self=True)
lockfile_entry = next(p for p in project.lockfile["package"] if p["name"] == "demo")
assert lockfile_entry["revision"] == "1234567890abcdef"
downloader.assert_not_called()
builder.assert_not_called()
@pytest.mark.usefixtures("repository")
def test_add_with_dry_run(project, capsys):
actions.do_add(project, dry_run=True, packages=["requests"])
out, _ = capsys.readouterr()
assert not project.get_dependencies()
assert "requests 2.19.1" in out
assert "urllib3 1.22" in out
@pytest.mark.usefixtures("repository")
def test_add_with_prerelease(project, working_set):
actions.do_add(project, packages=["urllib3"], prerelease=True)
assert working_set["urllib3"].version == "1.23b0"
assert project.meta.dependencies[0] == "urllib3<2,>=1.23b0"
| 37.265233
| 88
| 0.697413
|
e96a7df038b2730ea468a3e0815b91778c6b7e66
| 287
|
py
|
Python
|
speed-tests/python-libs/backend/json_file.py
|
tivvit/shus-benchmarks
|
4a26ceeb3c5fe58478b3c4a0e8da2f1bdd4b2282
|
[
"MIT"
] | null | null | null |
speed-tests/python-libs/backend/json_file.py
|
tivvit/shus-benchmarks
|
4a26ceeb3c5fe58478b3c4a0e8da2f1bdd4b2282
|
[
"MIT"
] | 1
|
2022-03-13T12:13:37.000Z
|
2022-03-13T12:13:37.000Z
|
speed-tests/python-libs/backend/json_file.py
|
tivvit/shush-benchmarks
|
97869e19a57ea556dcf1e0011b8d1104b4a012a8
|
[
"MIT"
] | null | null | null |
import json
class JsonFile(object):
def __init__(self, fn):
self.data = json.load(open(fn, "r"))
def __contains__(self, item):
return item in self.data
def get_all(self):
return self.data
def get(self, key):
return self.data.get(key)
| 17.9375
| 44
| 0.602787
|
0fb2df8c0fac99769096596a0bbc30ac0b41100d
| 37,177
|
py
|
Python
|
syft/frameworks/torch/hook/hook.py
|
brandonhee/PySyft
|
31217f28aa3d996b2bb84477fb15a990f0cb9a80
|
[
"Apache-2.0"
] | 1
|
2020-07-14T18:19:25.000Z
|
2020-07-14T18:19:25.000Z
|
syft/frameworks/torch/hook/hook.py
|
brandonhee/PySyft
|
31217f28aa3d996b2bb84477fb15a990f0cb9a80
|
[
"Apache-2.0"
] | 1
|
2020-01-25T19:33:08.000Z
|
2020-01-25T19:33:08.000Z
|
syft/frameworks/torch/hook/hook.py
|
brandonhee/PySyft
|
31217f28aa3d996b2bb84477fb15a990f0cb9a80
|
[
"Apache-2.0"
] | 1
|
2021-12-22T05:16:43.000Z
|
2021-12-22T05:16:43.000Z
|
import copy
from functools import wraps
import logging
from math import inf
import torch
from torch import nn
import types
import weakref
import syft
from syft.generic.frameworks.hook import hook_args
from syft.generic.frameworks.hook.hook import FrameworkHook
from syft.generic.tensor import AbstractTensor
from syft.generic.frameworks.remote import Remote
from syft.frameworks.torch.tensors.interpreters.autograd import AutogradTensor
from syft.frameworks.torch.tensors.interpreters.native import TorchTensor
from syft.frameworks.torch.tensors.interpreters.promise import PromiseTensor
from syft.frameworks.torch.tensors.interpreters.hook import HookedTensor
from syft.frameworks.torch.tensors.interpreters.paillier import PaillierTensor
from syft.frameworks.torch.tensors.decorators.logging import LoggingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.large_precision import LargePrecisionTensor
from syft.frameworks.torch.tensors.interpreters.private import PrivateTensor
from syft.frameworks.torch.torch_attributes import TorchAttributes
from syft.generic.pointers.multi_pointer import MultiPointerTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
from syft.generic.tensor import initialize_tensor
from syft.generic.tensor import _apply_args
from syft.workers.base import BaseWorker
from syft.workers.virtual import VirtualWorker
from syft.messaging.plan import Plan
from syft.messaging.promise import Promise
from syft.exceptions import route_method_exception
class TorchHook(FrameworkHook):
"""A Hook which Overrides Methods on PyTorch Tensors.
The purpose of this class is to:
* extend torch methods to allow for the moving of tensors from one
worker to another.
* override torch methods to execute commands on one worker that are
called on tensors controlled by the local worker.
This class is typically the first thing you will initialize when using
PySyft with PyTorch because it is responsible for augmenting PyTorch with
PySyft's added functionality (such as remote execution).
Args:
local_worker: An optional BaseWorker instance that lets you provide a
local worker as a parameter which TorchHook will assume to be the
worker owned by the local machine. If you leave it empty,
TorchClient will automatically initialize a
:class:`.workers.VirtualWorker` under the assumption you're looking
to do local experimentation or development.
is_client: An optional boolean parameter (default True), indicating
whether TorchHook is being initialized as an end-user client.This
can impact whether or not variables are deleted when they fall out
of scope. If you set this incorrectly on a end user client, Tensors
and Variables will never be deleted. If you set this incorrectly on
a remote machine (not a client), tensors will not get saved. It's
really only important if you're not initializing the local worker
yourself.
verbose: An optional boolean parameter (default True) to indicate
whether or not to print the operations as they occur.
queue_size: An integer optional parameter (default 0) to specify the
max length of the list that stores the messages to be sent.
Example:
>>> import torch as th
>>> import syft as sy
>>> hook = sy.TorchHook(th)
Hooking into Torch...
Overloading Complete.
# constructing a normal torch tensor in pysyft
>>> x = th.Tensor([-2,-1,0,1,2,3])
>>> x
-2
-1
0
1
2
3
[syft.core.frameworks.torch.tensor.FloatTensor of size 6]
"""
def __init__(
self, torch, local_worker: BaseWorker = None, is_client: bool = True, verbose: bool = True
):
"""Initializes the hook.
Initialize the hook and define all the attributes pertaining to the
torch hook in a special TorchAttibute class, that will be added in the
syft.torch attributes. Hence, this parameters are now conveyed by the
syft module.
"""
# Save the provided torch module as an attribute of the hook
self.torch = torch
self.framework = self.torch
# Save the local worker as an attribute
self.local_worker = local_worker
if hasattr(torch, "torch_hooked"):
logging.warning("Torch was already hooked... skipping hooking process")
self.local_worker = syft.local_worker
return
else:
torch.torch_hooked = True
# Add all the torch attributes in the syft.torch attr
syft.torch = TorchAttributes(torch, self)
syft.framework = syft.torch
# Hook some torch methods such that tensors could be created directy at workers
self._hook_worker_methods()
if self.local_worker is None:
# Every TorchHook instance should have a local worker which is
# responsible for interfacing with other workers. The worker
# interface is what allows the Torch specific code in TorchHook to
# be agnostic to the means by which workers communicate (such as
# peer-to-peer, sockets, through local ports, or all within the
# same process)
self.local_worker = VirtualWorker(hook=self, is_client_worker=is_client, id="me")
else:
self.local_worker.hook = self
self.to_auto_overload = {}
self.args_hook_for_overloaded_attr = {}
self._hook_native_tensor(torch.Tensor, TorchTensor)
# Add all hooked tensor methods to pointer but change behaviour to have the cmd sent
self._hook_pointer_tensor_methods(self.torch.Tensor)
# Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
# to all shares (when it makes sense, otherwise the method is overwritten in the
# AdditiveSharingTensor class)
self._hook_additive_shared_tensor_methods()
# Add all hooked tensor methods to multi_pointer to change behavior to have the cmd
# sent to all child pointers.
self._hook_multi_pointer_tensor_methods(self.torch.Tensor)
# Add all hooked tensor methods to Logging tensor but change behaviour to just forward
# the cmd to the next child (behaviour can be changed in the SyftTensor class file)
self._hook_syft_tensor_methods(LoggingTensor)
# Add all hooked tensor methods to Paillier tensor but change behaviour to just forward
# the cmd to the next child (behaviour can be changed in the SyftTensor class file)
self._hook_syft_tensor_methods(PaillierTensor)
# Add all hooked tensor methods to FixedPrecisionTensor tensor but change behaviour
# to just forward the cmd to the next child (behaviour can be changed in the
# SyftTensor class file)
self._hook_syft_tensor_methods(FixedPrecisionTensor)
# Add all hooked tensor methods to AutogradTensor tensor but change behaviour
# to just forward the cmd to the next child (behaviour can be changed in the
# SyftTensor class file)
self._hook_syft_tensor_methods(AutogradTensor)
# Add all hooked tensor methods to PrivateTensor tensor but change behaviour
# to just forward the cmd to the next child (behaviour can be changed in the
# SyftTensor class file)
self._hook_private_tensor_methods(PrivateTensor)
# Add all hooked tensor methods to AdditiveSharingTensor tensor but change behaviour
# to just forward the cmd to the next child (behaviour can be changed in the
# SyftTensor class file)
self._hook_syft_tensor_methods(AdditiveSharingTensor)
# Add all hooked tensor methods to LargePrecisionTensor tensor
self._hook_syft_tensor_methods(LargePrecisionTensor)
# Add all hooked tensor methods to NumpyTensor tensor
self._hook_syft_tensor_methods(HookedTensor)
# Add all built-in 'str' methods to String
self._hook_string_methods(owner=self.local_worker)
# Add all string methods to StringPointer
# This method call should strictly come after the
# call to self._hook_string_methods()
self._hook_string_pointer_methods()
# Add all hooked tensor methods to PromiseTensor
self._hook_promise_tensor()
# Hook the tensor constructor function
self._hook_tensor()
# Hook the Parameter methods to store tensor chains in parameters
self._hook_parameters()
# Hook torch functions from modules like torch.add OR torch.nn.functional (containing relu, etc.)
self._hook_torch_module()
# Hook torch.nn (containing Linear and Convolution layers)
self._hook_module()
# Hook torch.optim (containing optim.SGD, Adam, etc)
self._hook_optim()
# Add the local_worker to syft so that it can be found if the hook is
# called several times
syft.local_worker = self.local_worker
syft.hook = self
def create_shape(cls, shape_dims):
return torch.Size(shape_dims)
def create_wrapper(cls, wrapper_type):
# Note this overrides FrameworkHook.create_wrapper, so it must conform to
# that classmethod's signature
assert (
wrapper_type is None or wrapper_type == torch.Tensor
), "TorchHook only uses torch.Tensor wrappers"
return torch.Tensor()
def create_zeros(cls, *shape, dtype=None, **kwargs):
return torch.zeros(*shape, dtype=dtype, **kwargs)
def _hook_native_tensor(self, tensor_type: type, syft_type: type):
"""Adds PySyft Tensor Functionality to the given native tensor type.
Overloads the given native Torch tensor to add PySyft Tensor
Functionality. Overloading involves modifying the tensor type with
PySyft's added functionality. You may read about what kind of
modifications are made in the methods that this method calls.
Args:
tensor_type: The type of tensor being hooked (in this refactor
this is only ever torch.Tensor, but in previous versions of
PySyft this iterated over all tensor types.
syft_type: The abstract type whose methods should all be added to
the tensor_type class. In practice this is always TorchTensor.
Read more about it there.
"""
# Reinitialize init method of Torch tensor with Syft init
self._add_registration_to___init__(tensor_type, is_tensor=True)
# Overload Torch tensor properties with Syft properties
self._hook_properties(tensor_type)
# Returns a list of methods to be overloaded, stored in the dict to_auto_overload
# with tensor_type as a key
self.to_auto_overload[tensor_type] = self._which_methods_should_we_auto_overload(
tensor_type
)
# [We don't rename native methods as torch tensors are not hooked] Rename native functions
# #self._rename_native_functions(tensor_type)
# Overload auto overloaded with Torch methods
self._transfer_methods_to_native_tensor(tensor_type, syft_type)
self._hook_native_methods(tensor_type)
def __hook_properties(self, tensor_type):
super()._hook_properties(tensor_type)
tensor_type.native_shape = tensor_type.shape
def _hook_syft_tensor_methods(self, syft_type: type):
tensor_type = self.torch.Tensor
super()._hook_syft_tensor_methods(tensor_type, syft_type)
def _hook_private_tensor_methods(self, syft_type: type):
tensor_type = self.torch.Tensor
super()._hook_private_tensor_methods(tensor_type, syft_type)
def _hook_worker_methods(self):
class Torch(object):
name = "torch"
def __init__(self, worker, *args, **kwargs):
self.worker = weakref.ref(worker)
Remote.register_framework(Torch)
for attr in syft.torch.worker_methods:
new_method = self._get_hooked_base_worker_method(attr)
setattr(Torch, attr, new_method)
def _get_hooked_base_worker_method(hook_self, attr):
@wraps(attr)
def overloaded_attr(self_torch, *args, **kwargs):
ptr = hook_self.local_worker.send_command(
recipient=self_torch.worker(),
message=("{}.{}".format("torch", attr), None, args, kwargs),
)
return ptr.wrap()
return overloaded_attr
def _hook_additive_shared_tensor_methods(self):
"""
Add hooked version of all methods of the torch Tensor to the
Additive Shared tensor: instead of performing the native tensor
method, it will be forwarded to each share when it is relevant
"""
tensor_type = self.torch.Tensor
# Use a pre-defined list to select the methods to overload
for attr in self.to_auto_overload[tensor_type]:
if attr not in dir(AdditiveSharingTensor):
new_method = self._get_hooked_additive_shared_method(attr)
setattr(AdditiveSharingTensor, attr, new_method)
def _hook_parameters(self):
"""
This method overrides the torch Parameter class such that
it works correctly with our overridden tensor types. The
native torch Parameter class kept deleting all of our
attributes on our custom tensors, so we wrote our own.
"""
# Hook __new__ to handle when non-pure torch tensors are given as data attribute
def hooked__new__(cls, data=None, requires_grad=True):
if data is None:
data = torch.Tensor()
# If data is not a pure torch tensor you need to store the chain in a
# specific place otherwise it will get deleted
if not isinstance(data, torch.Tensor) or hasattr(data, "child"):
p = torch.Tensor._make_subclass(cls, torch.Tensor(), requires_grad)
if isinstance(data, torch.Tensor): # so it's a wrapper: remove it
p.child = data.child
else:
p.child = data
else:
p = torch.Tensor._make_subclass(cls, data, requires_grad)
return p
torch.nn.Parameter.__new__ = hooked__new__
# Hook __repr__ to handle chain repr when needed
torch.nn.Parameter.native_param___repr__ = torch.nn.Parameter.__repr__
def hooked__repr__(self):
if hasattr(self, "child"):
return "Parameter containing:\n" + self.child.__repr__()
else:
return self.native_param___repr__()
# torch.nn.Parameter.__repr__ = hooked__repr__
def get_data(self):
if hasattr(self, "child"):
to_return = self.child.attr("data")
else:
to_return = self.native_data
# good to ensure that the ID stays consistent
# not 100% this is required but it's at least
# good practice
try:
to_return.id = self.data_id
except AttributeError:
self.data_id = to_return.id
return to_return
def set_data(self, new_data):
# If data is not a pure torch tensor you need to store the chain in a
# specific place otherwise it will get deleted
if not isinstance(new_data, torch.Tensor) or hasattr(new_data, "child"):
self.child = new_data # .wrap()
else:
if hasattr(self, "child"):
del self.child
with torch.no_grad():
self.native_data = new_data
return self
torch.nn.Parameter.data = property(fget=get_data, fset=set_data)
# Hook .grad to handle chain assignment when needed
torch.nn.Parameter.native_param_grad = torch.nn.Parameter.grad
@property
def grad(self):
if hasattr(self, "child"):
to_return = self.child.attr("grad")
if to_return is not None and isinstance(to_return.child, PointerTensor):
if to_return.child.is_none():
to_return = None
else:
to_return = self.native_param_grad
# good to ensure that the ID stays consistent
# not 100% this is required but it's at least
# good practice
try:
to_return.id = self.grad_id
except AttributeError:
if to_return is not None and hasattr(to_return, "id"):
self.grad_id = to_return.id
return to_return
@grad.setter
def grad(self, new_grad):
# If grad is not a pure torch tensor you need to store the chain in a
# specific place otherwise it will get deleted
if new_grad is not None and (
not isinstance(new_grad, torch.Tensor) or hasattr(new_grad, "child")
):
self.child.grad = new_grad # .wrap()
else:
if self.native_param_grad is not None:
with torch.no_grad():
self.native_param_grad = new_grad
elif new_grad is not None:
self.native_param_grad = new_grad
return self
torch.nn.Parameter.grad = grad
def _hook_torch_module(self):
"""Overloads functions in the main torch modules.
The way this is accomplished is by first moving all existing module
functions in the torch module to native_<function_name_here>.
Example:
the real :func:`torch.cat` will become :func:`torch.native_cat`
and :func:`torch.cat` will have our hooking code.
"""
if torch.__version__ < "1.0.2":
# Hard fix for PyTorch versions < 1.0.2
# usage of torch.jit requires a torch version < torch 1.1, so we still need to support this torch version
syft.torch.apply_fix16922(self.torch)
torch_modules = syft.torch.torch_modules
for module_name, torch_module in torch_modules.items():
for func in dir(torch_module):
# Some functions we want to ignore (not override). Such functions have been hard
# coded into the torch_attribute exclude (see TorchAttribute class)
if func in syft.torch.exclude:
continue
# ignore dunder functions
if "__" in func:
continue
# ignore capitalized func values which are Classes not functions
if func[0].isupper():
continue
# ignore hidden functins
if func[0] == "_":
continue
# If we haven't already overloaded this function
if "native_" in func or f"native_{func}" in dir(torch_module):
continue
self._perform_function_overloading(module_name, torch_module, func)
@classmethod
def _get_hooked_func(cls, public_module_name, func_api_name, attr):
"""Torch-specific implementation. See the subclass for more."""
if attr.__module__ is None:
attr.__module__ = "torch"
return super()._get_hooked_func(attr.__module__, func_api_name, attr)
def _get_hooked_additive_shared_method(hook_self, attr):
"""
Hook a method to send it multiple remote workers
Args:
attr (str): the method to hook
Return:
the hooked method
"""
def dispatch(args, k):
return map(lambda x: x[k] if isinstance(x, dict) else x, args)
@wraps(attr)
def overloaded_attr(self, *args, **kwargs):
"""
Operate the hooking
"""
# Replace all syft tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
attr, self, args, kwargs
)
results = {}
for k, v in new_self.items():
results[k] = v.__getattribute__(attr)(*dispatch(new_args, k), **new_kwargs)
# Put back AdditiveSharingTensor on the tensors found in the response
response = hook_args.hook_response(
attr,
results,
wrap_type=AdditiveSharingTensor,
wrap_args=self.get_class_attributes(),
)
return response
return overloaded_attr
def _hook_promise_tensor(hook_self):
methods_to_hook = hook_self.to_auto_overload[torch.Tensor]
def generate_method(method_name):
def method(self, *args, **kwargs):
arg_shapes = list([self.shape])
arg_ids = list([self.id])
# Convert scalar arguments to tensors to be able to use them with plans
args = list(args)
for ia in range(len(args)):
if not isinstance(args[ia], (torch.Tensor, AbstractTensor)):
args[ia] = torch.tensor(args[ia])
for arg in args:
arg_shapes.append(arg.shape)
@syft.func2plan(arg_shapes)
def operation_method(self, *args, **kwargs):
return getattr(self, method_name)(*args, **kwargs)
self.plans.add(operation_method.id)
for arg in args:
if isinstance(arg, PromiseTensor):
arg.plans.add(operation_method.id)
operation_method.procedure.update_args(
[self, *args], operation_method.procedure.result_ids
)
promise_out = PromiseTensor(
owner=self.owner,
shape=operation_method.output_shape,
tensor_type=self.obj_type,
plans=set(),
)
operation_method.procedure.promise_out_id = promise_out.id
if operation_method.owner != self.owner:
operation_method.send(self.owner)
else: # otherwise object not registered on local worker
operation_method.owner.register_obj(operation_method)
return promise_out
return method
for method_name in methods_to_hook:
setattr(PromiseTensor, method_name, generate_method(method_name))
def FloatTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.FloatTensor", *args, **kwargs).wrap()
setattr(Promise, "FloatTensor", FloatTensor)
def DoubleTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.DoubleTensor", *args, **kwargs).wrap()
setattr(Promise, "DoubleTensor", DoubleTensor)
def HalfTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.HalfTensor", *args, **kwargs).wrap()
setattr(Promise, "HalfTensor", HalfTensor)
def ByteTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.ByteTensor", *args, **kwargs).wrap()
setattr(Promise, "ByteTensor", ByteTensor)
def CharTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.CharTensor", *args, **kwargs).wrap()
setattr(Promise, "CharTensor", CharTensor)
def ShortTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.ShortTensor", *args, **kwargs).wrap()
setattr(Promise, "ShortTensor", ShortTensor)
def IntTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.IntTensor", *args, **kwargs).wrap()
setattr(Promise, "IntTensor", IntTensor)
def LongTensor(shape, *args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.LongTensor", *args, **kwargs).wrap()
setattr(Promise, "LongTensor", LongTensor)
def BoolTensor(shape, args, **kwargs):
return PromiseTensor(shape, tensor_type="torch.BoolTensor", *args, **kwargs).wrap()
setattr(Promise, "BoolTensor", BoolTensor)
def _hook_tensor(hook_self):
"""Hooks the function torch.tensor()
We need to do this seperately from hooking the class because internally
torch does not pick up the change to add the args
Args:
hook_self: the hook itself
"""
if "native_tensor" not in dir(hook_self.torch):
hook_self.torch.native_tensor = hook_self.torch.tensor
def new_tensor(*args, owner=None, id=None, register=True, **kwargs):
current_tensor = hook_self.torch.native_tensor(*args, **kwargs)
_apply_args(hook_self, current_tensor, owner, id)
if register:
current_tensor.owner.register_obj(current_tensor)
return current_tensor
hook_self.torch.tensor = new_tensor
@classmethod
def _transfer_methods_to_native_tensor(cls, tensor_type: type, syft_type: type):
"""Adds methods from the TorchTensor class to the native torch tensor.
The class TorchTensor is a proxy to avoid extending directly the torch
tensor class.
Args:
tensor_type: The tensor type to which we are adding methods
from TorchTensor class.
"""
exclude = [
"__class__",
"__delattr__",
"__dir__",
"__doc__",
"__dict__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__weakref__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__setattr__",
"__sizeof__",
"__subclasshook__",
"_get_type",
# "__eq__", # FIXME it now overwritten in native.py to use torch.eq, because of pb between == & __eq__ See #2030
"__gt__",
"__ge__",
"__lt__",
"__le__",
]
cls._transfer_methods_to_framework_class(tensor_type, syft_type, exclude)
def _hook_module(self):
"""Overloading torch.nn.Module with PySyft functionality, the primary module
responsible for core ML functionality such as Neural network layers and
loss functions.
It is important to note that all the operations are actually in-place.
"""
def module_is_missing_grad(model):
"""Checks if all the parameters in the model have been assigned a gradient"""
for p in model.parameters():
if p.grad is None:
return True
return False
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
def module_send_(nn_self, *dest, force_send=False, **kwargs):
"""Overloads torch.nn instances so that they could be sent to other workers"""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.send_(*dest, **kwargs)
if isinstance(nn_self.forward, Plan):
nn_self.forward.send(*dest, force=force_send)
return nn_self
self.torch.nn.Module.send = module_send_
self.torch.nn.Module.send_ = module_send_
def module_move_(nn_self, destination):
params = list(nn_self.parameters())
for p in params:
p.move(destination)
self.torch.nn.Module.move = module_move_
# def module_end_get_(nn_self):
# """Overloads send to remote for torch.nn.Module."""
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
#
# for p in nn_self.parameters():
# p.end_get()
#
# return nn_self
#
# self.torch.nn.Module.end_get = module_end_get_
#
# def module_move_(nn_self, dest):
# return nn_self.send(dest).end_get()
#
# self.torch.nn.Module.move = module_move_
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for p in nn_self.parameters():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
self.torch.nn.Module.get_ = module_get_
self.torch.nn.Module.get = module_get_
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
# TODO: add .data and .grad to syft tensors
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.share_(*args, **kwargs)
return nn_self
self.torch.nn.Module.share_ = module_share_
self.torch.nn.Module.share = module_share_
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.fix_precision_(*args, **kwargs)
return nn_self
self.torch.nn.Module.fix_precision_ = module_fix_precision_
self.torch.nn.Module.fix_precision = module_fix_precision_
self.torch.nn.Module.fix_prec = module_fix_precision_
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for p in nn_self.parameters():
p.float_precision_()
return nn_self
self.torch.nn.Module.float_precision_ = module_float_precision_
self.torch.nn.Module.float_precision = module_float_precision_
self.torch.nn.Module.float_prec = module_float_precision_
def module_copy(nn_self):
"""Returns a copy of a torch.nn.Module"""
return copy.deepcopy(nn_self)
self.torch.nn.Module.copy = module_copy
@property
def owner(nn_self):
for p in nn_self.parameters():
return p.owner
self.torch.nn.Module.owner = owner
@property
def location(nn_self):
try:
for p in nn_self.parameters():
return p.location
except AttributeError:
raise AttributeError(
"Module has no attribute location, did you already send it to some location?"
)
self.torch.nn.Module.location = location
# Make sure PySyft uses the PyTorch version
self.torch.nn.modules.rnn._rnn_impls["LSTM"] = self.torch.lstm
# Add support for GRUs
self.torch.nn.modules.rnn._rnn_impls["GRU"] = self.torch.gru
# Override _VF.LSTM_Cell and _VF.GRU_Cell with torch.LSTM_Cell and torch.GRU_Cell
# With the pytorch-based version
self.torch.nn.modules.rnn._VF = self.torch
def _hook_optim(self):
"""Overloading torch.optim.Optimizer with PySyft functionality. Optimizer
hyper-parameters should indeed be converted to fixed precision to interact
with fixed precision or additive shared tensors.
It is important to note that all the operations are actually in-place.
"""
def optim_fix_precision_(optim_self, *args, **kwargs):
"""Overloads fix_precision for torch.optim.Optimizer"""
for param_group in optim_self.param_groups:
for key, param in param_group.items():
if isinstance(param, (float, int, bool)) and param != 0 and key != "params":
param_group[key] = torch.tensor(param).fix_precision(*args, **kwargs).child
return optim_self
self.torch.optim.Optimizer.fix_precision = optim_fix_precision_
def optim_float_precision_(optim_self):
"""Overloads float_precision for torch.optim.Optimizer, convert fix_precision
hyper-parameters to normal float values"""
for param_group in optim_self.param_groups:
for key, param in param_group.items():
if isinstance(param, FixedPrecisionTensor) and key != "params":
param_group[key] = param.float_precision().item()
return optim_self
self.torch.optim.Optimizer.float_precision = optim_float_precision_
# Modification of torch/nn/utils/clip_grad.py. The plain PyTorch method was not compatible with
# PySyft remote tensors, so this method adds support for gradient clipping of remote tensors,
# and keeps functionalities from PyTorch to clip local PyTorch tensors.
def clip_grad_norm_remote_(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters stored over a remote model
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
- parameters (Iterable[Tensor] or Tensor): an iterable of PySyft remote
Tensors or PyTorch tensor will have gradients normalized or a single PySyfy / PyTorch tensor.
- max_norm (float or int): max norm of the gradients
- worker: The worker where the parameters are hosted and where the gradient clipping
will be performed
- norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
def param_is_pointer_tensor(param):
"""
A list of parameters is remote if all params contained in the list are
remote (i.e., the child of each param is a pointer tensor).
This method checks if a single param is indeed remote, so whether
the child of a parameter is a pointer tensor
"""
return hasattr(param, "child") and isinstance(param.child, PointerTensor)
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
# all parameters are remote
if all([param_is_pointer_tensor(param) for param in parameters]):
total_norm = torch.zeros(1)
# Let's send the total norm over to the remote where the remote tensor is
total_norm = total_norm.send(parameters[0].location)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
# Remote PySyft tensor
if param_is_pointer_tensor(p):
total_norm += param_norm ** norm_type
# Local PySyft tensor
else:
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1.0 / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
self.torch.nn.utils.clip_grad_norm_ = clip_grad_norm_remote_
| 39.761497
| 124
| 0.6231
|
50e7f02f7329e714ff496e56fbe9b35c2a32f699
| 3,217
|
py
|
Python
|
tests/models/test_autoencoders.py
|
dandelin/pytorch-lightning-bolts
|
8652a8a9f6c3e0d4b034e12d1f7b3a339f92cd0f
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_autoencoders.py
|
dandelin/pytorch-lightning-bolts
|
8652a8a9f6c3e0d4b034e12d1f7b3a339f92cd0f
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_autoencoders.py
|
dandelin/pytorch-lightning-bolts
|
8652a8a9f6c3e0d4b034e12d1f7b3a339f92cd0f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import pytorch_lightning as pl
import torch
from pytorch_lightning import seed_everything
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.autoencoders import AE, VAE
from pl_bolts.models.autoencoders import resnet18_encoder, resnet18_decoder
from pl_bolts.models.autoencoders import resnet50_encoder
@pytest.mark.parametrize("dm_cls", [pytest.param(CIFAR10DataModule, id="cifar10")])
def test_vae(tmpdir, dm_cls):
seed_everything()
dm = dm_cls(batch_size=4)
model = VAE(input_height=dm.size()[-1])
trainer = pl.Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
max_epochs=1,
gpus=None
)
result = trainer.fit(model, dm)
assert result == 1
@pytest.mark.parametrize("dm_cls", [pytest.param(CIFAR10DataModule, id="cifar10")])
def test_ae(tmpdir, dm_cls):
seed_everything()
dm = dm_cls(batch_size=4)
model = AE(input_height=dm.size()[-1])
trainer = pl.Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
max_epochs=1,
gpus=None
)
result = trainer.fit(model, dm)
assert result == 1
def test_encoder(tmpdir):
img = torch.rand(16, 3, 224, 224)
encoder1 = resnet18_encoder(first_conv=False, maxpool1=True)
encoder2 = resnet50_encoder(first_conv=False, maxpool1=True)
out1 = encoder1(img)
out2 = encoder2(img)
assert out1.shape == (16, 512)
assert out2.shape == (16, 2048)
def test_decoder(tmpdir):
latent_dim = 128
input_height = 288 # random but has to be a multiple of 32 for first_conv=True, maxpool1=True
decoder1 = resnet18_decoder(latent_dim=latent_dim, input_height=input_height, first_conv=True, maxpool1=True)
decoder2 = resnet18_decoder(latent_dim=latent_dim, input_height=input_height, first_conv=True, maxpool1=False)
decoder3 = resnet18_decoder(latent_dim=latent_dim, input_height=input_height, first_conv=False, maxpool1=True)
decoder4 = resnet18_decoder(latent_dim=latent_dim, input_height=input_height, first_conv=False, maxpool1=False)
z = torch.rand(2, latent_dim)
out1 = decoder1(z)
out2 = decoder2(z)
out3 = decoder3(z)
out4 = decoder4(z)
assert out1.shape == (2, 3, 288, 288)
assert out2.shape == (2, 3, 288, 288)
assert out3.shape == (2, 3, 288, 288)
assert out4.shape == (2, 3, 288, 288)
def test_from_pretrained(tmpdir):
vae = VAE(input_height=32)
ae = AE(input_height=32)
assert len(VAE.pretrained_weights_available()) > 0
assert len(AE.pretrained_weights_available()) > 0
exception_raised = False
try:
vae = vae.from_pretrained('cifar10-resnet18')
vae = vae.from_pretrained('stl10-resnet18') # try loading weights not compatible with exact architecture
ae = ae.from_pretrained('cifar10-resnet18')
except Exception as e:
exception_raised = True
assert exception_raised is False, "error in loading weights"
keyerror = False
try:
vae = vae.from_pretrained('abc')
ae = ae.from_pretrained('xyz')
except KeyError:
keyerror = True
assert keyerror is True, "KeyError not raised when provided with illegal checkpoint name"
| 29.513761
| 115
| 0.700964
|
411fa371cd9b60edc221ad75ad98c1ff5fb952f0
| 8,041
|
py
|
Python
|
benchmarking/mgd3d.py
|
0xDBFB7/Nyion
|
9bd1c3a8a70e9d1c75e04ab63325a42cc983dedb
|
[
"MIT"
] | null | null | null |
benchmarking/mgd3d.py
|
0xDBFB7/Nyion
|
9bd1c3a8a70e9d1c75e04ab63325a42cc983dedb
|
[
"MIT"
] | null | null | null |
benchmarking/mgd3d.py
|
0xDBFB7/Nyion
|
9bd1c3a8a70e9d1c75e04ab63325a42cc983dedb
|
[
"MIT"
] | null | null | null |
"""
2017 (c) A. R. Malipeddi
3D geometric multigrid code for poissons equation in a cube.
- Finite difference method
- 7pt operator
- trilinear interpolation
- Two-color Gauss Seidel smoothing
"""
import matplotlib.pyplot as plt
import numpy as np
def GSrelax(B,nx,ny,nz,u,f,iters=1,flag=1):
'''
Red-Black Gauss Seidel smoothing
flag : 1 = pre-sweep
2 = post-sweep
'''
dx=1.0/nx
dy=1.0/ny
dz=1.0/nz
Ax=1.0/dx**2
Ay=1.0/dy**2
Az=1.0/dz**2
Ap=1.0/(2.0*(1.0/dx**2+1.0/dy**2+1.0/dz**2))
#BCs. Needs to be generalized!
u[ 0,:,:] = 0
u[-1,:,:] = 0
u[: ,0,:] = 0
u[:,-1,:] = 0
u[:,:, 0] = 0
u[:,:,-1] = 0
T = u.copy()
for it in range(iters):
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
if(not B[i,j,k]):
T[i,j,k]= Ap*( Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- f[i,j,k])
np.copyto(u,T)
#BCs. Needs to be generalized!
u[ 0,:,:] = 0
u[-1,:,:] = 0
u[: ,0,:] = 0
u[:,-1,:] = 0
u[:,:, 0] = 0
u[:,:,-1] = 0
#if residual not needed
if(flag==2):
return u,None
res=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
if(not B[i,j,k]):
res[i,j,k]=f[i,j,k] - (Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- 2.0*(Ax+Ay+Az)*u[i,j,k])
return u,res
def restrict(B,nx,ny,nz,v):
'''
restrict 'v' to the coarser grid
'''
v_c=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
# if(not B[i*2,j*2,k*2]):
v_c[i,j,k]=0
count = 0
if(not B[i*2-1,j*2,k*2]):
v_c[i,j,k] += v[2*i-1,2*j,2*k]
count+=1
if(not B[i*2,j*2-1,k*2]):
v_c[i,j,k] += v[2*i,2*j-1,2*k]
count+=1
if(not B[i*2,j*2-1,k*2]):
v_c[i,j,k] += v[2*i,2*j-1,2*k]
count+=1
if(not B[i*2,j*2,k*2]):
v_c[i,j,k] += v[2*i,2*j,2*k]
count+=1
v_c[i,j,k] /= count
# 0.125*(v[2*i-1,2*j-1,2*k-1]+v[2*i,2*j-1,2*k-1]+v[2*i-1,2*j,2*k-1]+v[2*i,2*j,2*k-1]
# +v[2*i-1,2*j-1,2*k ]+v[2*i,2*j-1,2*k ]+v[2*i-1,2*j,2*k ]+v[2*i,2*j,2*k ])
return v_c
def restrict_B(nx,ny,nz,B):
v_c=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_c[i,j,k] = B[i*2,j*2,k*2]
return v_c
# def prolong(B,nx,ny,nz,v):
# '''
# interpolate correction to the fine grid
# '''
# v_f=np.zeros([2*nx+2,2*ny+2,2*nz+2])
#
# a=27.0/64
# b= 9.0/64
# c= 3.0/64
# d= 1.0/64
#
# for i in range(1,nx+1):
# for j in range(1,ny+1):
# for k in range(1,nz+1):
# v_f[2*i-1,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i-1,j-1,k] + v[i-1,j,k-1] + v[i,j-1,k-1]) + d*v[i-1,j-1,k-1]
# v_f[2*i ,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i+1,j-1,k] + v[i+1,j,k-1] + v[i,j-1,k-1]) + d*v[i+1,j-1,k-1]
# v_f[2*i-1,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i-1,j+1,k] + v[i-1,j,k-1] + v[i,j+1,k-1]) + d*v[i-1,j+1,k-1]
# v_f[2*i ,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i+1,j+1,k] + v[i+1,j,k-1] + v[i,j+1,k-1]) + d*v[i+1,j+1,k-1]
# v_f[2*i-1,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i-1,j-1,k] + v[i-1,j,k+1] + v[i,j-1,k+1]) + d*v[i-1,j-1,k+1]
# v_f[2*i ,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i+1,j-1,k] + v[i+1,j,k+1] + v[i,j-1,k+1]) + d*v[i+1,j-1,k+1]
# v_f[2*i-1,2*j ,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i-1,j+1,k] + v[i-1,j,k+1] + v[i,j+1,k+1]) + d*v[i-1,j+1,k+1]
# v_f[2*i ,2*j ,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i+1,j+1,k] + v[i+1,j,k+1] + v[i,j+1,k+1]) + d*v[i+1,j+1,k+1]
#
# return v_f
def prolong(B,nx,ny,nz,v):
v_f=np.zeros([2*nx+2,2*ny+2,2*nz+2])
for x in range(1, nx+1):
for y in range(1, ny+1):
for z in range(1, nz+1):
V000 = v[x,y,z]
V001 = v[x,y,z+1]
V010 = v[x,y+1,z]
V100 = v[x+1,y,z]
V101 = v[x+1,y,z+1]
V110 = v[x+1,y+1,z]
V111 = v[x+1,y+1,z+1]
for i in range(0,2):
for j in range(0,2):
for k in range(0,2):
f_x = float(i)/2
f_y = float(j)/2
f_z = float(k)/2
v_f[2*x+i,2*y+j,2*z+k] = 0
v_f[2*x+i,2*y+j,2*z+k] += V000*(1.0-f_x)*(1.0-f_y)*(1.0-f_z)
v_f[2*x+i,2*y+j,2*z+k] += V001*(1.0-f_x)*(1.0-f_y)*(f_z)
v_f[2*x+i,2*y+j,2*z+k] += V010*(1.0-f_x)*(f_y)*(1.0-f_z)
v_f[2*x+i,2*y+j,2*z+k] += V100*(f_x)*(1.0-f_y)*(1.0-f_z)
v_f[2*x+i,2*y+j,2*z+k] += V101*(f_x)*(1.0-f_y)*(f_z)
v_f[2*x+i,2*y+j,2*z+k] += V110*(f_x)*(f_y)*(1.0-f_z)
v_f[2*x+i,2*y+j,2*z+k] += V111*(f_x)*(f_y)*(f_z)
return v_f
def V_cycle(B,nx,ny,nz,num_levels,u,f,level=1):
'''
V cycle
'''
if(level==num_levels):#bottom solve
u,res=GSrelax(B,nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Relax Au=f on this grid
u,res=GSrelax(B,nx,ny,nz,u,f,2)
# plt.figure()
# plt.subplot(2, 3, 2)
# plt.gca().set_title('Potentials')
# plt.plot(u[:,int(nx/2),int(nx/2)])
#
# plt.subplot(2, 3, 3)
# plt.gca().set_title('Residuals')
# plt.plot(res[:,int(nx/2),int(nx/2)])
# plt.subplot(2, 3, 3)
# plt.plot(B[:,int(nx/2),int(nx/2)])
#Step 2: Restrict residual to coarse grid
res_c=restrict(B,nx//2,ny//2,nz//2,res)
b_c=restrict_B(nx//2,ny//2,nz//2,B)
#
# plt.subplot(2, 3, 4)
# plt.gca().set_title('Restricted Residuals')
# plt.plot(res_c[:,int(nx/4),int(nx/4)])
# plt.subplot(2, 3, 4)
# plt.plot(b_c[:,int(nx/4),int(nx/4)])
print(np.amax(u))
#Step 3:Solve A e_c=res_c on the coarse grid. (Recursively)
e_c=np.zeros_like(res_c)
e_c,res_c=V_cycle(b_c,nx//2,ny//2,nz//2,num_levels,e_c,res_c,level+1)
#
# plt.subplot(2, 3, 5)
# plt.gca().set_title('Restricted Correction')
# plt.plot(e_c[:,int(nx/4),int(nx/4)])
#Step 4: Interpolate(prolong) e_c to fine grid and add to u
R = prolong(B,nx//2,ny//2,nz//2,e_c)*(1.0-B)
u+= R
#
# plt.subplot(2, 3, 6)
# plt.gca().set_title('Prolongated Correction')
# plt.plot(R[:,int(nx/2),int(nx/2)])
# plt.subplot(2, 3, 6)
# plt.gca().set_title('Prolongated Correction')
# plt.plot(B[:,int(nx/2),int(nx/2)])
#
# plt.draw()
# plt.pause(1)
#Step 5: Relax Au=f on this grid
if(level==1):
u,res=GSrelax(B,nx,ny,nz,u,f,2,flag=1)
input(">")
else:
u,res=GSrelax(B,nx,ny,nz,u,f,2,flag=2)
return u,res
def FMG(B,nx,ny,nz,num_levels,f,nv=1,level=1):
if(level==num_levels):#bottom solve
u=np.zeros([nx+2,ny+2,nz+2])
u,res=GSrelax(B,nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Restrict the rhs to a coarse grid
f_c=restrict(B,nx//2,ny//2,nz//2,f)
b_c=restrict_B(nx//2,ny//2,nz//2,B)
#Step 2: Solve the coarse grid problem using FMG
u_c,_=FMG(b_c,nx//2,ny//2,nz//2,num_levels,f_c,nv,level+1)
#Step 3: Interpolate u_c to the fine grid
u=prolong(B,nx//2,ny//2,nz//2,u_c)
#step 4: Execute 'nv' V-cycles
for _ in range(nv):
u,res=V_cycle(B,nx,ny,nz,num_levels-level,u,f)
return u,res
| 31.65748
| 160
| 0.456162
|
d8e8c5c667b515fa69e7c17067714412651ba8cc
| 453
|
py
|
Python
|
src/sorunlib/commands.py
|
simonsobs/sorunlib
|
08f638c2dc77d69029acdb6deb1ed2e0fbb8bfc8
|
[
"BSD-2-Clause"
] | null | null | null |
src/sorunlib/commands.py
|
simonsobs/sorunlib
|
08f638c2dc77d69029acdb6deb1ed2e0fbb8bfc8
|
[
"BSD-2-Clause"
] | 6
|
2022-03-10T17:07:20.000Z
|
2022-03-14T15:18:44.000Z
|
src/sorunlib/commands.py
|
simonsobs/sorunlib
|
08f638c2dc77d69029acdb6deb1ed2e0fbb8bfc8
|
[
"BSD-2-Clause"
] | null | null | null |
import time
import datetime as dt
def wait(target_time):
"""Wait until a specified time.
Args:
target_time (str): Time in ISO format to wait until,
i.e. "2015-10-21T07:28:00"
"""
t0 = dt.datetime.now()
t1 = dt.datetime.fromisoformat(target_time)
assert t1 > t0, f"time {t1} is in the past"
diff = t1 - t0
print(f"Waiting for {diff.total_seconds()} seconds")
time.sleep(diff.total_seconds())
| 21.571429
| 60
| 0.624724
|
da0adbbacd391d78f15a1f417375b94a89a01ce0
| 20,426
|
py
|
Python
|
tmtoolkit/topicmod/evaluate.py
|
samir-joshi/tmtoolkit
|
41f5cbddad539107016680d132ca1ee9ebe49b5c
|
[
"Apache-2.0"
] | 167
|
2017-11-11T11:49:32.000Z
|
2022-03-15T11:45:11.000Z
|
tmtoolkit/topicmod/evaluate.py
|
samir-joshi/tmtoolkit
|
41f5cbddad539107016680d132ca1ee9ebe49b5c
|
[
"Apache-2.0"
] | 23
|
2017-11-27T22:44:09.000Z
|
2022-02-23T03:31:30.000Z
|
tmtoolkit/topicmod/evaluate.py
|
shalevy1/tmtoolkit
|
42fdd388e606d6d1c45d80d2364dfa42b8408111
|
[
"Apache-2.0"
] | 30
|
2018-03-27T13:00:41.000Z
|
2022-03-28T12:15:33.000Z
|
"""
Metrics for topic model evaluation.
In order to run model evaluations in parallel use one of the modules :mod:`~tmtoolkit.topicmod.tm_gensim`,
:mod:`~tmtoolkit.topicmod.tm_lda` or :mod:`~tmtoolkit.topicmod.tm_sklearn`.
"""
import numpy as np
from scipy.spatial.distance import pdist
from scipy.sparse import issparse
from scipy.special import gammaln
from ._eval_tools import FakedGensimDict
from tmtoolkit.bow.dtm import dtm_and_vocab_to_gensim_corpus_and_dict
from .model_stats import top_words_for_topics
from tmtoolkit.bow.bow_stats import doc_frequencies, codoc_frequencies
from ..utils import argsort
#%% Evaluation metrics
def metric_held_out_documents_wallach09(dtm_test, theta_test, phi_train, alpha, n_samples=10000):
"""
Estimation of the probability of held-out documents according to [Wallach2009]_ using a
document-topic estimation `theta_test` that was estimated via held-out documents `dtm_test` on a trained model with
a topic-word distribution `phi_train` and a document-topic prior `alpha`. Draw `n_samples` according to `theta_test`
for each document in `dtm_test` (memory consumption and run time can be very high for larger `n_samples` and
a large amount of big documents in `dtm_test`).
A document-topic estimation `theta_test` can be obtained from a trained model from the "lda" package or scikit-learn
package with the `transform()` method.
Adopted MATLAB code `originally from Ian Murray, 2009 <https://people.cs.umass.edu/~wallach/code/etm/>`_ and
downloaded from `umass.edu <https://people.cs.umass.edu/~wallach/code/etm/lda_eval_matlab_code_20120930.tar.gz>`_.
.. note:: Requires `gmpy2 <https://github.com/aleaxit/gmpy>`_ package for multiple-precision arithmetic to avoid
numerical underflow.
.. [Wallach2009] Wallach, H.M., Murray, I., Salakhutdinov, R. and Mimno, D., 2009. Evaluation methods for
topic models.
:param dtm_test: held-out documents of shape NxM with N documents and vocabulary size M
:param theta_test: document-topic estimation of `dtm_test`; shape NxK with K topics
:param phi_train: topic-word distribution of a trained topic model that should be evaluated; shape KxM
:param alpha: document-topic prior of the trained topic model that should be evaluated; either a scalar or an array
of length K
:return: estimated probability of held-out documents
"""
import gmpy2
n_test_docs, n_vocab = dtm_test.shape
if n_test_docs != theta_test.shape[0]:
raise ValueError('shapes of `dtm_test` and `theta_test` do not match (unequal number of documents)')
_, n_topics = theta_test.shape
if n_topics != phi_train.shape[0]:
raise ValueError('shapes of `theta_test` and `phi_train` do not match (unequal number of topics)')
if n_vocab != phi_train.shape[1]:
raise ValueError('shapes of `dtm_test` and `phi_train` do not match (unequal size of vocabulary)')
if isinstance(alpha, np.ndarray):
alpha_sum = np.sum(alpha)
else:
alpha_sum = alpha * n_topics
alpha = np.repeat(alpha, n_topics)
if alpha.shape != (n_topics, ):
raise ValueError('`alpha` has invalid shape (should be vector of length n_topics)')
# samples: random topic assignments for each document
# shape: n_test_docs x n_samples
# values in [0, n_topics) ~ theta_test
samples = np.array([np.random.choice(n_topics, n_samples, p=theta_test[d, :])
for d in range(n_test_docs)])
assert samples.shape == (n_test_docs, n_samples)
assert 0 <= samples.min() < n_topics
assert 0 <= samples.max() < n_topics
# n_k: number of documents per topic and sample
# shape: n_topics x n_samples
# values in [0, n_test_docs]
n_k = np.array([np.sum(samples == t, axis=0) for t in range(n_topics)])
assert n_k.shape == (n_topics, n_samples)
assert 0 <= n_k.min() <= n_test_docs
assert 0 <= n_k.max() <= n_test_docs
# calculate log p(z) for each sample
# shape: 1 x n_samples
log_p_z = np.sum(gammaln(n_k + alpha[:, np.newaxis]), axis=0) + gammaln(alpha_sum) \
- np.sum(gammaln(alpha)) - gammaln(n_test_docs + alpha_sum)
assert log_p_z.shape == (n_samples,)
# calculate log p(w|z) for each sample
# shape: 1 x n_samples
log_p_w_given_z = np.zeros(n_samples)
dtm_is_sparse = issparse(dtm_test)
for d in range(n_test_docs):
if dtm_is_sparse:
word_counts_d = dtm_test[d].toarray().flatten()
else:
word_counts_d = dtm_test[d]
words = np.repeat(np.arange(n_vocab), word_counts_d)
assert words.shape == (word_counts_d.sum(),)
phi_topics_d = phi_train[samples[d]] # phi for topics in samples for document d
log_p_w_given_z += np.sum(np.log(phi_topics_d[:, words]), axis=1)
log_joint = log_p_z + log_p_w_given_z
# calculate log theta_test
# shape: 1 x n_samples
log_theta_test = np.zeros(n_samples)
for d in range(n_test_docs):
log_theta_test += np.log(theta_test[d, samples[d]])
# compare
log_weights = log_joint - log_theta_test
# calculate final log evidence
# requires using gmpy2 to avoid numerical underflow
exp_sum = gmpy2.mpfr(0)
for exp in (gmpy2.exp(x) for x in log_weights):
exp_sum += exp
return float(gmpy2.log(exp_sum)) - np.log(n_samples)
metric_held_out_documents_wallach09.direction = 'maximize'
def metric_cao_juan_2009(topic_word_distrib):
"""
Calculate metric as in [Cao2008]_ using topic-word distribution `topic_word_distrib`.
.. [Cao2008] Cao Juan, Xia Tian, Li Jintao, Zhang Yongdong, and Tang Sheng. 2009. A density-based method for
adaptive LDA model selection. Neurocomputing — 16th European Symposium on Artificial Neural Networks
2008 72, 7–9: 1775–1781. <http://doi.org/10.1016/j.neucom.2008.06.011>.
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:return: calculated metric
"""
# pdist will calculate the pair-wise cosine distance between all topics in the topic-word distribution
# then calculate the mean of cosine similarity (1 - cosine_distance)
cos_sim = 1 - pdist(topic_word_distrib, metric='cosine')
return np.mean(cos_sim)
metric_cao_juan_2009.direction = 'minimize'
def metric_arun_2010(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate metric as in [Arun2010]_ using topic-word distribution `topic_word_distrib`, document-topic
distribution `doc_topic_distrib` and document lengths `doc_lengths`.
.. note:: It will fail when num. of words in the vocabulary is less then the num. of topics (which is very unusual).
.. [Arun2010] Rajkumar Arun, V. Suresh, C. E. Veni Madhavan, and M. N. Narasimha Murthy. 2010. On finding the natural
number of topics with latent dirichlet allocation: Some observations. In Advances in knowledge discovery and
data mining, Mohammed J. Zaki, Jeffrey Xu Yu, Balaraman Ravindran and Vikram Pudi (eds.). Springer Berlin
Heidelberg, 391–402. http://doi.org/10.1007/978-3-642-13657-3_43.
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:param doc_topic_distrib: document-topic distribution; shape NxK, where N is the number of documents
:param doc_lengths: array of length `N` with number of tokens per document
:return: calculated metric
"""
# CM1 = SVD(M1)
cm1 = np.linalg.svd(topic_word_distrib, compute_uv=False)
#cm1 /= np.sum(cm1) # normalize by L1 norm # the paper says nothing about normalizing so let's leave it as it is...
# CM2 = L*M2 / norm2(L)
if doc_lengths.shape[0] != 1:
doc_lengths = doc_lengths.T
cm2 = np.array(doc_lengths * np.matrix(doc_topic_distrib))[0]
cm2 /= np.linalg.norm(doc_lengths, 2)
# wrong:
#cm2 /= np.linalg.norm(cm2, 2) # normalize by L2 norm
# also wrong:
#cm2 /= np.sum(cm2) # normalize by L1 norm
# symmetric Kullback-Leibler divergence KL(cm1||cm2) + KL(cm2||cm1)
# KL is called entropy in scipy
# we can't use this because entropy() will normalize the vectors so that they sum up to 1 but this should not
# be done according to the paper
#return entropy(cm1, cm2) + entropy(cm2, cm1)
# use it as in the paper (note: cm1 and cm2 are not prob. distributions that sum up to 1)
return np.sum(cm1*np.log(cm1/cm2)) + np.sum(cm2*np.log(cm2/cm1))
metric_arun_2010.direction = 'minimize'
def metric_griffiths_2004(logliks):
"""
Calculate metric as in [GriffithsSteyvers2004]_.
Calculates the harmonic mean of the log-likelihood values `logliks`. Burn-in values
should already be removed from `logliks`.
.. [GriffithsSteyvers2004] Thomas L. Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of
the National Academy of Sciences 101, suppl 1: 5228–5235.
http://doi.org/10.1073/pnas.0307752101
.. note:: Requires `gmpy2 <https://github.com/aleaxit/gmpy>`_ package for multiple-precision arithmetic to avoid
numerical underflow.
:param logliks: array with log-likelihood values
:return: calculated metric
"""
import gmpy2
# using median trick as in Martin Ponweiser's Diploma Thesis 2012, p.36
ll_med = np.median(logliks)
ps = [gmpy2.exp(ll_med - x) for x in logliks]
ps_mean = gmpy2.mpfr(0)
for p in ps:
ps_mean += p / len(ps)
return float(ll_med - gmpy2.log(ps_mean)) # after taking the log() we can use a Python float() again
metric_griffiths_2004.direction = 'maximize'
def metric_coherence_mimno_2011(topic_word_distrib, dtm, top_n=20, eps=1e-12, normalize=True, return_mean=False):
"""
Calculate coherence metric according to [Mimno2011]_ (a.k.a. "U_Mass" coherence metric). There are two
modifications to the originally suggested measure:
- uses a different epsilon by default (set `eps=1` for original)
- uses a normalizing constant by default (set `normalize=False` for original)
Provide a topic word distribution as `topic_word_distrib` and a document-term-matrix `dtm` (can be sparse).
`top_n` controls how many most probable words per topic are selected.
By default, it will return a NumPy array of coherence values per topic (same ordering as in `topic_word_distrib`).
Set `return_mean` to True to return the mean of all topics instead.
.. [Mimno2011] D. Mimno, H. Wallach, E. Talley, M. Leenders, A. McCullum 2011: Optimizing semantic coherence in
topic models
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:param dtm: document-term matrix of shape NxM with N documents and vocabulary size M
:param top_n: number of most probable words selected per topic
:param eps: smoothing constant epsilon
:param normalize: if True, normalize coherence values
:param return_mean: if True, return mean of all coherence values, otherwise array of coherence per topic
:return: if `return_mean` is True, mean of all coherence values, otherwise array of length K with coherence per
topic
"""
n_topics, n_vocab = topic_word_distrib.shape
if n_vocab != dtm.shape[1]:
raise ValueError('shapes of provided `topic_word_distrib` and `dtm` do not match (vocab sizes differ)')
if top_n > n_vocab:
raise ValueError('`top_n=%d` is larger than the vocabulary size of %d words'
% (top_n, topic_word_distrib.shape[1]))
top_words = top_words_for_topics(topic_word_distrib, top_n) # V
if issparse(dtm) and dtm.format != 'csc':
dtm = dtm.tocsc()
coh = []
for t in range(n_topics):
c_t = 0
v = top_words[t]
top_dtm = dtm[:, v]
df = doc_frequencies(top_dtm) # D(v)
codf = codoc_frequencies(top_dtm) # D(v, v')
for m in range(1, top_n):
for l in range(m):
c_t += np.log((codf[m, l] + eps) / df[l])
coh.append(c_t)
coh = np.array(coh)
if normalize:
coh *= 2 / (top_n * (top_n-1))
if return_mean:
return coh.mean()
else:
return coh
metric_coherence_mimno_2011.direction = 'maximize'
def metric_coherence_gensim(measure, topic_word_distrib=None, gensim_model=None, vocab=None, dtm=None,
gensim_corpus=None, texts=None, top_n=20,
return_coh_model=False, return_mean=False, **kwargs):
"""
Calculate model coherence using Gensim's
`CoherenceModel <https://radimrehurek.com/gensim/models/coherencemodel.html>`_. See also this `tutorial
<https://rare-technologies.com/what-is-topic-coherence/>`_.
Define which measure to use with parameter `measure`:
- ``'u_mass'``
- ``'c_v'``
- ``'c_uci'``
- ``'c_npmi'``
Provide a topic word distribution `topic_word_distrib` OR a Gensim model `gensim_model`
and the corpus' vocabulary as `vocab` OR pass a gensim corpus as `gensim_corpus`. `top_n` controls how many most
probable words per topic are selected.
If measure is ``'u_mass'``, a document-term-matrix `dtm` or `gensim_corpus` must be provided and `texts` can be
None. If any other measure than ``'u_mass'`` is used, tokenized input as `texts` must be provided as 2D list::
[['some', 'text', ...], # doc. 1
['some', 'more', ...], # doc. 2
['another', 'document', ...]] # doc. 3
If `return_coh_model` is True, the whole :class:`gensim.models.CoherenceModel` instance will be returned, otherwise:
- if `return_mean` is True, the mean coherence value will be returned
- if `return_mean` is False, a list of coherence values (for each topic) will be returned
Provided `kwargs` will be passed to :class:`gensim.models.CoherenceModel` or
:meth:`gensim.models.CoherenceModel.get_coherence_per_topic`.
.. note:: This function also supports models from `lda` and `sklearn` (by passing `topic_word_distrib`, `dtm` and
`vocab`)!
:param measure: the coherence calculation type; one of the values listed above
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size if
`gensim_model` is not given
:param gensim_model: a topic model from Gensim if `topic_word_distrib` is not given
:param vocab: vocabulary list/array if `gensim_corpus` is not given
:param dtm: document-term matrix of shape NxM with N documents and vocabulary size M if `gensim_corpus` is not
given
:param gensim_corpus: a Gensim corpus if `vocab` is not given
:param texts: list of tokenized documents; necessary if using a `measure` other than ``'u_mass'``
:param top_n: number of most probable words selected per topic
:param return_coh_model: if True, return :class:`gensim.models.CoherenceModel` as result
:param return_mean: if `return_coh_model` is False and `return_mean` is True, return mean coherence
:param kwargs: parameters passed to :class:`gensim.models.CoherenceModel` or
:meth:`gensim.models.CoherenceModel.get_coherence_per_topic`
:return: if `return_coh_model` is True, return :class:`gensim.models.CoherenceModel` as result; otherwise if
`return_mean` is True, mean of all coherence values, otherwise array of length K with coherence per
topic
"""
try:
import gensim
except ImportError:
raise ValueError('package `gensim` must be installed for `coherence_gensim` metric')
if measure == 'u_mass' and dtm is None and gensim_corpus is None:
raise ValueError('document-term-matrix `dtm` or Gensim corpus `gensim_corpus` must be provided for measure '
'`u_mass`')
elif measure != 'u_mass' and texts is None:
raise ValueError('`texts` must be provided for any other measure than `u_mass`')
if gensim_model is None:
if topic_word_distrib is None:
raise ValueError('`topic_word_distrib` must be given if `gensim_model` was not passed')
n_topics, n_vocab = topic_word_distrib.shape
else:
n_topics, n_vocab = None, None
if vocab is not None:
if len(vocab) != n_vocab:
raise ValueError('shape of provided `topic_word_distrib` and length of `vocab` do not match '
'(vocab sizes differ)')
if top_n > n_vocab:
raise ValueError('`top_n=%d` is larger than the vocabulary size of %d words'
% (top_n, topic_word_distrib.shape[1]))
elif gensim_corpus is None:
raise ValueError('a gensim corpus `gensim_corpus` must be passed if no `vocab` is given')
if measure == 'u_mass' and gensim_corpus is None and n_vocab != dtm.shape[1]:
raise ValueError('shapes of provided `topic_word_distrib` and `dtm` do not match (vocab sizes differ)')
if vocab is not None:
top_words = top_words_for_topics(topic_word_distrib, top_n, vocab=vocab) # V
else:
top_words = None
coh_model_kwargs = {'coherence': measure}
if measure == 'u_mass':
if gensim_corpus is None:
gensim_corpus, gensim_dict = dtm_and_vocab_to_gensim_corpus_and_dict(dtm, vocab)
coh_model_kwargs.update(dict(corpus=gensim_corpus, dictionary=gensim_dict, topics=top_words))
else:
coh_model_kwargs.update(dict(model=gensim_model, corpus=gensim_corpus, topn=top_n))
else:
if gensim_corpus is None:
coh_model_kwargs.update(dict(texts=texts, topics=top_words, dictionary=FakedGensimDict.from_vocab(vocab)))
else:
coh_model_kwargs.update(dict(texts=texts, model=gensim_model, corpus=gensim_corpus, topn=top_n))
get_coh_kwargs = {}
for opt in ('segmented_topics', 'with_std', 'with_support'):
if opt in kwargs:
get_coh_kwargs[opt] = kwargs.pop(opt)
coh_model_kwargs.update(kwargs)
coh_model = gensim.models.CoherenceModel(**coh_model_kwargs)
if return_coh_model:
return coh_model
else:
if return_mean:
return coh_model.get_coherence()
else:
return coh_model.get_coherence_per_topic(**get_coh_kwargs)
metric_coherence_gensim.direction = 'maximize'
#%% Helper functions for topic model evaluation
def results_by_parameter(res, param, sort_by=None, sort_desc=False):
"""
Takes a list of evaluation results `res` returned by a topic model evaluation function – a list in the form:
.. code-block:: text
[(parameter_set_1, {'<metric_name>': result_1, ...}),
...,
(parameter_set_n, {'<metric_name>': result_n, ...})])
Then returns a list with tuple pairs using only the parameter `param` from the parameter sets in the evaluation
results such that the returned list is:
.. code-block:: text
[(param_1, {'<metric_name>': result_1, ...}),
...,
(param_n, {'<metric_name>': result_n, ...})]
Optionally order either by parameter value (`sort_by` is None - the default) or by result metric
(``sort_by='<metric name>'``).
:param res: list of evaluation results
:param param: string of parameter name
:param sort_by: order by parameter value if this is None, or by a certain result metric given as string
:param sort_desc: sort in descending order
:return: list with tuple pairs using only the parameter `param` from the parameter sets
"""
if len(res) == 0:
return []
tuples = [(p[param], r) for p, r in res]
# single validation results
if len(tuples[0]) != 2:
raise ValueError('invalid evaluation results passed')
params, metric_results = list(zip(*tuples))
if sort_by:
sorted_ind = argsort([r[sort_by] for r in metric_results])
else:
sorted_ind = argsort(params)
if sort_desc:
sorted_ind = reversed(sorted_ind)
measurements = tuples
return [measurements[i] for i in sorted_ind]
| 43.459574
| 126
| 0.679526
|
0c7918907bbf8b6631211ec4d640bfc6bb165e5f
| 1,874
|
py
|
Python
|
capstone/capdb/migrations/0051_auto_20181001_1905.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 134
|
2017-07-12T17:03:06.000Z
|
2022-03-27T06:38:29.000Z
|
capstone/capdb/migrations/0051_auto_20181001_1905.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 1,362
|
2017-06-22T17:42:49.000Z
|
2022-03-31T15:28:00.000Z
|
capstone/capdb/migrations/0051_auto_20181001_1905.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 38
|
2017-06-22T14:46:23.000Z
|
2022-03-16T05:32:54.000Z
|
# Generated by Django 2.0.8 on 2018-10-01 19:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('capdb', '0050_fts'),
]
operations = [
migrations.CreateModel(
name='Ngram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField()),
('year', models.IntegerField()),
('jurisdiction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ngrams', to='capdb.Jurisdiction', to_field='slug')),
],
),
migrations.CreateModel(
name='NgramWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=10000, unique=True)),
],
),
migrations.AddField(
model_name='ngram',
name='w1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='w1', to='capdb.NgramWord'),
),
migrations.AddField(
model_name='ngram',
name='w2',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='w2', to='capdb.NgramWord'),
),
migrations.AddField(
model_name='ngram',
name='w3',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='w3', to='capdb.NgramWord'),
),
migrations.AddIndex(
model_name='ngram',
index=models.Index(fields=['w1', 'w2', 'w3'], name='capdb_ngram_w1_id_22799f_idx'),
),
]
| 37.48
| 162
| 0.58111
|
e06b3c7184bf1af176c6cbb42dbc511b9ac75f23
| 25,178
|
py
|
Python
|
rvlyzer/analysis/graphs.py
|
nessdoor/RVlyzer
|
239beb63a4db1653261bc1cc59227ee2ddb77d1a
|
[
"MIT"
] | null | null | null |
rvlyzer/analysis/graphs.py
|
nessdoor/RVlyzer
|
239beb63a4db1653261bc1cc59227ee2ddb77d1a
|
[
"MIT"
] | null | null | null |
rvlyzer/analysis/graphs.py
|
nessdoor/RVlyzer
|
239beb63a4db1653261bc1cc59227ee2ddb77d1a
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from enum import Enum
from itertools import chain, tee, repeat
from operator import attrgetter
from typing import FrozenSet, List, Tuple, Optional, Mapping, Hashable, Iterable, MutableMapping, \
NamedTuple, Dict, Union
from networkx import DiGraph, simple_cycles, restricted_view, all_simple_paths, relabel_nodes, dfs_preorder_nodes, \
Graph
from networkx.classes.graphviews import subgraph_view
from networkx.utils import generate_unique_node
from rep.base import Instruction, to_line_iterator
from rep.fragments import FragmentView, CodeFragment
class InvalidCodeError(Exception):
"""An error raised when a code fragment doesn't follow some expected layout or assumption."""
pass
class Transition(Enum):
"""
A type of control flow progression.
Each member carries some information characterizing the type of advancement:
- resolve_symbol: whether the progression implies a symbol resolution;
- branching: whether progressing in this direction is conditional.
"""
SEQ = (False, False)
"""Sequential advancement: PC advances linearly, towards the instruction that follows."""
U_JUMP = (True, False)
"""Unconditional jump: a simple local unconditional jump."""
C_JUMP = (True, True)
""" Conditional jump: a simple local conditional jump. An alternate sequential execution path exists."""
CALL = (True, False)
"""Procedure call: non-local jump to an internal or external procedure."""
RETURN = (False, False)
"""Return: return jump from a call."""
def __new__(cls, *args, **kwargs):
# Calculate a unique ID to avoid aliasing
id_val = len(cls.__members__) + 1
instance = object.__new__(cls)
instance._value_ = id_val
return instance
def __init__(self, resolve_symbol: bool, branching: bool):
self.resolve_symbol = resolve_symbol
self.branching = branching
def __repr__(self):
return '<%s.%s: (%s,%s)>' % (self.__class__.__name__, self.name, self.resolve_symbol, self.branching)
jump_ops: Mapping[str, Transition] = {
"call": Transition.CALL,
"jr": Transition.RETURN,
"j": Transition.U_JUMP,
"jal": Transition.CALL,
"jalr": Transition.CALL,
"beq": Transition.C_JUMP,
"beqz": Transition.C_JUMP,
"bne": Transition.C_JUMP,
"bnez": Transition.C_JUMP,
"blt": Transition.C_JUMP,
"bltz": Transition.C_JUMP,
"bltu": Transition.C_JUMP,
"ble": Transition.C_JUMP,
"blez": Transition.C_JUMP,
"bleu": Transition.C_JUMP,
"bgt": Transition.C_JUMP,
"bgtz": Transition.C_JUMP,
"bgtu": Transition.C_JUMP,
"bge": Transition.C_JUMP,
"bgez": Transition.C_JUMP,
"bgeu": Transition.C_JUMP
}
"""Mapping between control flow manipulation instructions and the kind of transition that they introduce."""
class BasicBlock:
"""
A program's basic block.
Each member of this class is a code container, decorated with additional metadata that would have to be extracted
every time from bare assembly.
Members of this class are identified by some hashable object. Uniqueness is not enforced.
:ivar identifier: the hashable identifier for the basic block
:ivar labels: labels representing marking the entry point for the basic block, if any
:ivar code: the code fragment containing the actual code
:ivar outgoing_flow: the shape of the outgoing flow and its destination, in the format returned by the
execution_flow_at function
"""
identifier: Hashable
labels: List[str]
code: CodeFragment
outgoing_flow: Tuple[Transition, Optional[str]]
def __init__(self, fragment: CodeFragment, block_id: Hashable):
starting_line = fragment[fragment.begin]
ending_line = fragment[fragment.end - 1]
if not isinstance(ending_line, Instruction):
raise InvalidCodeError("A basic block must always end with an instruction.")
self.identifier = block_id
self.labels = list(starting_line.labels)
self.code = fragment
self.outgoing_flow = execution_flow_at(ending_line)
def __repr__(self):
return "BasicBlock(" + repr(self.code) + ", " + repr(self.identifier) + ")"
def __str__(self):
return "---\nBB ID: " + str(self.identifier) + "\nLabels: " + str(self.labels) + "\n\n" + str(self.code) + \
"\nOutgoing exec arc: " + str(self.outgoing_flow) + "\n---\n"
class ProcedureCall(NamedTuple):
"""
A procedure call.
Calls are represented in terms of caller, callee and point where execution is expected to return
(`confluence point`).
"""
caller: Hashable
callee: Hashable
confluence_point: Hashable
class LocalGraph:
"""
A CFG representing some part of a program.
A local graph is characterized by one or more entry-points, a digraph, some terminal nodes and a collection of
"arcs" directed to some external procedures. All entering execution flows proceed from the entry-points and reach
the terminal nodes, unless an external call diverges.
A local graph may not be connected, with disconnected components being confluence point for flows returning from
external calls. The information necessary to obtain a connected graph can be extracted by resolving the external
calls.
Internal calls are represented by edges connecting caller and confluence point, labeled with the `CALL` transition
kind and with a `caller` attribute indicating the called procedure's symbolic name.
No check is performed on the consistency of the information used to instantiate these objects.
"""
entry_labels: List[str]
entry_point_ids: List[Hashable]
graph: DiGraph
external_calls: List[ProcedureCall]
terminal_nodes_ids: List[Hashable]
def __init__(self,
entry_points: Iterable[Hashable],
graph: DiGraph,
calls: Iterable[ProcedureCall],
terminals: Iterable[Hashable]):
"""
Construct a new local graph.
:param entry_points: a collection of node IDs indicating the entry-points
:param graph: the local graph, as a NetworkX DiGraph
:param calls: a collection of purportedly external calls
:param terminals: a collection of node IDs indicating which are the terminal nodes
"""
# Set up the entry-point information
self.entry_point_ids = list(entry_points)
# Characterize the function in terms of a graph and the nested calls it performs
self.graph = graph
self.external_calls = list(calls)
# Keep track of the terminal nodes
self.terminal_nodes_ids = list(terminals)
@property
def entry_labels(self) -> List[str]:
labeling_lists = map(lambda n: self.graph.nodes[n]['labels'], self.entry_point_ids)
return list(chain.from_iterable(labeling_lists))
def get_symbol_table(self) -> Mapping[str, Hashable]:
"""
Return a mapping between entry labels and entry-points' node IDs.
:return: a mapping from public labels to their entry-point IDs
"""
return {lab: nid for nid in self.entry_point_ids for lab in self.graph.nodes[nid]['labels']}
def merge(self, other: LocalGraph) -> LocalGraph:
"""
Merge this local graph with `other` into a new local graph.
The resulting local graph has the union of entry-points, graphs and terminals. A cross-lookup is performed
between the two objects in an attempt to resolve external calls that may become internal. Newly found internal
calls are converted into graph edges of kind `Transition.CALL` with attribute `callee` pointing to one of the
local entry-points. The remaining external calls are merged and included in the new object.
:param other: the other graph that takes part in the operation
:return: a local graph obtained by merging self with other
:raise InvalidCodeError: when there is a naming clash between entry labels of the two graphs
"""
self_symbols = self.get_symbol_table()
other_symbols = other.get_symbol_table()
# Look for entry labels collisions. If the original code is rational, this shouldn't happen.
if not frozenset(self_symbols).isdisjoint(other_symbols):
raise InvalidCodeError("Labeling clash")
# Remap the other graph, entry-point IDs, callers and terminals
other = remap_local_graph(other, solve_graph_collision(self.graph, other.graph))
# Start merging stuff
merged_eps = chain(self.entry_point_ids, other.entry_point_ids)
merged_graph = self.graph.copy()
merged_graph.update(other.graph)
merged_terminals = chain(self.terminal_nodes_ids, other.terminal_nodes_ids)
# TODO re-implement with partitions
sec1, sec2 = tee(self.external_calls)
oec1, oec2 = tee(other.external_calls)
# Merge external calls
merged_calls = chain(filter(lambda c: c.callee not in other_symbols, sec1),
filter(lambda c: c.callee not in self_symbols, oec1))
# Resolve internal calls
for ic in filter(lambda c: c.callee in other_symbols, sec2):
merged_graph.add_edge(ic.caller, ic.confluence_point, kind=Transition.CALL, callee=ic.callee)
for ic in filter(lambda c: c.callee in self_symbols, oec2):
merged_graph.add_edge(ic.caller, ic.confluence_point, kind=Transition.CALL, callee=ic.callee)
return LocalGraph(merged_eps, merged_graph, merged_calls, merged_terminals)
def solve_graph_collision(ref: Graph, other: Graph) -> Dict[Hashable, Hashable]:
"""
Given two NetworkX graphs, find eventual name collisions between nodes and propose a solution.
The proposed solution comes in the form of a dictionary, containing remapping rules that could be applied to the
second graph in order to solve any clash.
:param ref: a reference graph
:param other: the other graph, on which renaming has to be performed
:return: a partial mapping that solves eventual clashes once applied on the second graph
"""
id_clashes = ref.nbunch_iter(other.nodes)
return {idc: generate_unique_node() for idc in id_clashes}
def remap_local_graph(cfg: LocalGraph, mapping: Dict[Hashable, Hashable]) -> LocalGraph:
"""
Given a local graph, use the provided mapping to remap node identifiers.
An invocation of this function results in the creation of a new local graph in which node identifiers have been
remapped according to the contents in the supplied dictionary. The original graph is left untouched.
The new mapping may be partial. In that case, only the nodes for which a corresponding key exists are remapped.
:param cfg: the local graph to be remapped
:param mapping: a dictionary containing the new mappings
:return: a new local graph where the selected nodes have been remapped
"""
new_entry = map(lambda ep: mapping.get(ep, ep), cfg.entry_point_ids)
new_graph = relabel_nodes(cfg.graph, mapping)
new_calls = map(lambda c:
ProcedureCall(mapping.get(c.caller, c.caller),
c.callee,
mapping.get(c.confluence_point, c.confluence_point)), cfg.external_calls)
new_terminals = map(lambda term: mapping.get(term, term), cfg.terminal_nodes_ids)
return LocalGraph(new_entry, new_graph, new_calls, new_terminals)
def execution_flow_at(inst: Instruction) -> Tuple[Transition, Optional[str]]:
"""
Determine the state of the execution flow at the given instruction.
This function returns a tuple containing a `Transition` type specifier and, in case of a jump, the symbol
representing its destination. The transition type indicates in what manner the execution flow shall progress past
the given instruction.
:param inst: the instruction at which the control flow status must be checked
:return: the tuple containing the parting transition
"""
if inst.opcode in jump_ops:
trans_type = jump_ops[inst.opcode]
if trans_type.resolve_symbol:
return trans_type, inst.immediate.symbol
else:
return trans_type, None
else:
# Any instruction that is not a jump instruction must maintain the sequential control flow
return Transition.SEQ, None
def basic_blocks(code: CodeFragment) -> List[BasicBlock]:
"""
Extract the basic blocks from a code fragment.
The resulting basic blocks contain views on the source fragment, and come in the same order in which they appear in
the original fragment. Non-code statements are discarded if they reside between BB boundaries and are not
interleaved with code statements.
For a correct behaviour, launch this function on a well-delimited code fragment (started by at least one label,
terminated by a jump).
Be aware that fancy ways of jumping around based on runtime-loaded addresses are not currently supported by this
package.
:param code: the code fragment whose basic blocks will be extracted
:return: the list of basic blocks contained in the original fragment
:raise InvalidCodeError: when the provided code fragment has no label or no outgoing jump
"""
# Identify the block boundaries, that is: those lines marked by a label or containing a control transfer instruction
block_boundaries = filter(lambda asl: isinstance(asl.statement, Instruction)
and (asl.statement.opcode in jump_ops or len(asl.statement.labels) > 0),
# Use a line-oriented iterator, so that we can extract the line numbers
to_line_iterator(iter(code), code.begin))
# Given the boundaries, calculate the appropriate cutoff points.
# A dictionary is used as a way of implementing an "ordered set" for easy duplicate removal.
# TODO find a more elegant way to remove duplicates online
cutoff_points = dict()
for boundary in block_boundaries:
if len(boundary.statement.labels) > 0 and boundary.statement.opcode in jump_ops:
# For a labeled line that also contains a jump, record two cut-points so that a single-line block can be
# created.
cutoff_points[boundary.number] = None
cutoff_points[boundary.number + 1] = None
elif len(boundary.statement.labels) > 0:
# Labeled lines mark cut-points themselves
cutoff_points[boundary.number] = None
else:
# A cut has to be made below any line containing a jump
cutoff_points[boundary.number + 1] = None
if len(cutoff_points) < 2:
raise InvalidCodeError("Code fragment does not start with a label or end with a jump/return.")
# Convert the "ordered set" back into a list
cutoff_points = list(iter(cutoff_points))
# Start slicing code into basic blocks
bb = []
head = cutoff_points[0]
for tail in cutoff_points[1:]:
if any(isinstance(line, Instruction) for line in code[head:tail]):
# Since these blocks are probably gonna end up inside a graph, use the NetworkX's function for unique IDs
bb.append(BasicBlock(FragmentView(code, head, tail, head), generate_unique_node()))
head = tail
return bb
def local_cfg(bbs: List[BasicBlock]) -> LocalGraph:
"""
Construct a local graph from a list of basic blocks.
Nodes and edges of the resulting graph will be decorated, respectively, with assembly labels and transition types,
registered with the attribute names of `labels` and `kind`.
This function works based on a few assumptions:
- the basic blocks are provided in the same order they appear inside the original code fragment;
- the first block is the entry-point;
- all jumps are local;
- all blocks with a final `RETURN` transition actually return control to whoever caused the PC to reach the EP.
When these conditions are satisfied, a well-formed local graph is returned.
:param bbs: the list of basic blocks of which the local graph is formed
:return: a LocalGraph object representing the local graph
"""
local_graph = DiGraph()
local_symbol_table: MutableMapping[str, Hashable] = {}
pending_jumps: List[Tuple[Hashable, str, Transition]] = []
terminal_nodes = []
calls = []
parent_seq_block = None
pending_call = None
for bb in bbs:
local_graph.add_node(bb.identifier, labels=list(bb.labels), block=bb.code)
if parent_seq_block is not None:
# Attach the current node to the sequence-wise previous one
local_graph.add_edge(parent_seq_block, bb.identifier, kind=Transition.SEQ)
parent_seq_block = None
elif pending_call is not None:
# Set the current node as the return point of an external procedure call
calls.append(ProcedureCall(pending_call[0], pending_call[1], bb.identifier))
pending_call = None
# Embed the basic block's labels into the node
local_symbol_table.update((lab, bb.identifier) for lab in bb.labels)
outgoing_transition = bb.outgoing_flow[0]
if outgoing_transition is Transition.RETURN:
# The outgoing transition is a return-jump: add the node to the list of terminals.
terminal_nodes.append(bb.identifier)
elif outgoing_transition is Transition.CALL:
# The outgoing transition is a procedure call: keep track of it so that the subsequent block will be set as
# its confluence point.
pending_call = bb.identifier, bb.outgoing_flow[1]
else:
if outgoing_transition is Transition.SEQ or outgoing_transition.branching:
# In case of a sequential or branching transition, the subsequent basic block is to be attached to the
# current one.
parent_seq_block = bb.identifier
if outgoing_transition.resolve_symbol:
# In case of a jump, store its origin and symbolic destination for the coming one-pass resolution.
pending_jumps.append((bb.identifier, bb.outgoing_flow[1], bb.outgoing_flow[0]))
for jumper, dst, kind in pending_jumps:
# Resolve the internal symbolic jumps and add the missing edges
local_graph.add_edge(jumper, local_symbol_table[dst], kind=kind)
# Transform recursive calls into internal call arcs
# TODO re-implement with partitions or sets
ci, ce = tee(calls)
for cll in filter(lambda c: c.callee in local_symbol_table, ci):
local_graph.add_edge(cll.caller, cll.confluence_point, kind=Transition.CALL, callee=cll.callee)
return LocalGraph([bbs[0].identifier],
local_graph,
filter(lambda c: c.callee not in local_symbol_table, ce),
terminal_nodes)
def internalize_calls(cfg: LocalGraph) -> LocalGraph:
"""
Transform external callees into symbolic internal nodes.
A symbolic node will bear an identifier and a single label, both equal to the callee's symbolic name. Of course,
these new nodes will be isolated from the rest of the graph. Therefore, this method is of practical use only when
the user is planning an attempt at name resolution by modifying the internal graph.
:param cfg: a local graph
:return: a new local graph, with all external calls converted into symbolic nodes
"""
# Gather all the callees' names
external_nodes_ids = set(map(attrgetter('callee'), cfg.external_calls))
# Create a new local graph containing only the symbolic nodes
foreign_graph = DiGraph()
foreign_graph.add_nodes_from((i, {'labels': [i], 'external': True}) for i in external_nodes_ids)
external = LocalGraph(external_nodes_ids, foreign_graph, [], external_nodes_ids)
# Merge the new graph with the original and return the result
return cfg.merge(external)
def exec_graph(cfg: LocalGraph,
entry_point: Union[str, Hashable],
ignore_calls: FrozenSet[str] = frozenset()) -> DiGraph:
"""
Given a local CFG and an entry-point, return the graph of the node visits performed by the execution flow.
The procedure consists in a recursive, depth-first visit of sub-graphs, starting from the initial node and repeating
itself for every `CALL` arc encountered. Given their nasty nature, recursive calls are not expanded; instead, they
are represented by special nodes with IDs of the form `call{<call destination>, <unique ID>}`.
The user can specify additional calls that mustn't be expanded.
Different calls to the same procedure result in differently-labeled sub-graphs being attached, so the resulting
graph is more a substantiation of the execution paths than a sub-graph of the original CFG. As a consequence, don't
expect a one-to-one correspondence between the CFG's nodes and the one in the execution graph.
Terminal nodes reachability is guaranteed only if the graph is well formed and any external call reached by the
execution flow has been internalized, if not explicitly set as ignored.
:param cfg: a CFG description of some code
:param entry_point: an entry-point specification for the CFG, either as a node ID or as a symbolic label
:param ignore_calls: a set of calls that won't be expanded into sub-graphs
:return: a directed graph representing the execution starting from the specified entry-point
"""
# Get the entry-point ID
source = entry_point if entry_point in cfg.entry_point_ids else cfg.get_symbol_table()[entry_point]
source_labels = cfg.graph.nodes[source]['labels']
# If one of the entry-point's labels is in the ignore set, return a node summarizing the call
if not ignore_calls.isdisjoint(source_labels):
res = DiGraph()
# The node will have a synthetic ID 'call{<call destination>, <unique ID>}', and will carry the original labels.
res.add_node('call{' + str(source) + ', ' + generate_unique_node() + '}', labels=source_labels, external=True)
return res
# Traverse the subtree rooted at the entry-point and collect the visited nodes
visited_nodes = frozenset(dfs_preorder_nodes(cfg.graph, source))
# Produce a view of the visited component
visited_component: Graph = subgraph_view(cfg.graph, lambda n: n in visited_nodes)
# Initialize the returned graph with the contents of the visited component
res = DiGraph()
res.update(visited_component)
# Iterate over the CALL edges inside the visited component
for edge in filter(lambda e: visited_component.edges[e]['kind'] == Transition.CALL, visited_component.edges):
# Recursively compute the component of the called procedures
nested_component = exec_graph(cfg,
visited_component.edges[edge]['callee'],
ignore_calls.union(source_labels))
# Add the nested component to the result, avoiding ID clashes
relabel_nodes(nested_component, solve_graph_collision(res, nested_component), False)
res.update(nested_component)
# Take the root of the sub-component and its terminal nodes
head = next(filter(lambda n: nested_component.in_degree(n) == 0, nested_component.nodes))
tail = filter(lambda n: nested_component.out_degree(n) == 0, nested_component.nodes)
# Substitute the original edge with call and return edges toward/from the sub-component
res.remove_edge(*edge)
res.add_edge(edge[0], head, kind=Transition.CALL)
res.add_edges_from(zip(tail, repeat(edge[1])), kind=Transition.RETURN)
return res
def merge_points(cfg: DiGraph) -> FrozenSet[int]:
"""
Find all the merge point in the CFG.
A merge point is a node on which multiple directed edges converge.
:arg cfg: the CFG representing a program
:return: a frozen set containing all the merge points
"""
# Node 0 represents the calling environment, so it must be excluded from the analysis
return frozenset((n for n in cfg.nodes.keys() if n != 0 and cfg.in_degree(n) > 1))
def loop_back_nodes(cfg: DiGraph) -> FrozenSet[int]:
"""
Find all the nodes of a CFG that are exclusively part of a loop.
A node is exclusively part of a loop if it belongs only to those paths that traverse the back-loop of a cycle.
:arg cfg: the CFG representation of a program
:return: a frozen set of all the loop-exclusive nodes
"""
# Node 0 closes an improper loop over the CFG, so it must be ignored
cycle_nodes = frozenset(chain.from_iterable(simple_cycles(restricted_view(cfg, [0], []))))
return frozenset(cycle_nodes.difference(chain.from_iterable(
# For every path, its last component is node 0; therefore, we have to cut it.
map(lambda l: l[:-1], all_simple_paths(cfg, 1, 0)))))
| 43.864111
| 120
| 0.698308
|
d9aa957182d6521f7cd86108ab6408f7edfe4d78
| 2,663
|
py
|
Python
|
pandas/tests/test_msgpack/test_limits.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
pandas/tests/test_msgpack/test_limits.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
pandas/tests/test_msgpack/test_limits.py
|
springcoil/pandas
|
945075ad78cef652039feb50d60092b0580604e6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas.util.testing as tm
from pandas.msgpack import packb, unpackb, Packer, Unpacker, ExtType
class TestLimits(tm.TestCase):
def test_integer(self):
x = -(2 ** 63)
assert unpackb(packb(x)) == x
self.assertRaises((OverflowError, ValueError), packb, x-1)
x = 2 ** 64 - 1
assert unpackb(packb(x)) == x
self.assertRaises((OverflowError, ValueError), packb, x+1)
def test_array_header(self):
packer = Packer()
packer.pack_array_header(2**32-1)
self.assertRaises((OverflowError, ValueError),
packer.pack_array_header, 2**32)
def test_map_header(self):
packer = Packer()
packer.pack_map_header(2**32-1)
self.assertRaises((OverflowError, ValueError),
packer.pack_array_header, 2**32)
def test_max_str_len(self):
d = 'x' * 3
packed = packb(d)
unpacker = Unpacker(max_str_len=3, encoding='utf-8')
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_str_len=2, encoding='utf-8')
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_bin_len(self):
d = b'x' * 3
packed = packb(d, use_bin_type=True)
unpacker = Unpacker(max_bin_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_bin_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_array_len(self):
d = [1, 2, 3]
packed = packb(d)
unpacker = Unpacker(max_array_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_array_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_map_len(self):
d = {1: 2, 3: 4, 5: 6}
packed = packb(d)
unpacker = Unpacker(max_map_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_map_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_ext_len(self):
d = ExtType(42, b"abc")
packed = packb(d)
unpacker = Unpacker(max_ext_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_ext_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
| 28.031579
| 82
| 0.614345
|
36fa1044f90401ce931c47d01539df024db862bf
| 644
|
py
|
Python
|
edukasi/migrations/0028_auto_20210114_1654.py
|
irzaip/trampill_py
|
74310e44864ab1bb8b20f529c21079cbd8076875
|
[
"MIT"
] | 2
|
2020-11-21T04:25:11.000Z
|
2021-07-07T08:26:10.000Z
|
edukasi/migrations/0028_auto_20210114_1654.py
|
irzaip/trampill_py
|
74310e44864ab1bb8b20f529c21079cbd8076875
|
[
"MIT"
] | 7
|
2021-07-04T14:55:48.000Z
|
2021-09-08T12:51:22.000Z
|
edukasi/migrations/0028_auto_20210114_1654.py
|
irzaip/trampill_py
|
74310e44864ab1bb8b20f529c21079cbd8076875
|
[
"MIT"
] | 1
|
2021-09-10T06:05:49.000Z
|
2021-09-10T06:05:49.000Z
|
# Generated by Django 3.1.2 on 2021-01-14 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edukasi', '0027_review'),
]
operations = [
migrations.AddField(
model_name='review',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='review',
name='review',
field=models.IntegerField(choices=[(1, '*'), (2, '**'), (3, '***'), (4, '****'), (5, '*****')], default='5'),
),
]
| 26.833333
| 122
| 0.509317
|
37db91139e5ae03f7d95a6648f26f63231204928
| 6,491
|
py
|
Python
|
external/BB2SegNet/refinement_net/core/Engine.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 154
|
2019-07-25T02:27:39.000Z
|
2022-02-18T19:40:43.000Z
|
external/BB2SegNet/refinement_net/core/Engine.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 4
|
2019-11-12T00:38:54.000Z
|
2021-08-14T08:40:12.000Z
|
external/BB2SegNet/refinement_net/core/Engine.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | 25
|
2019-09-17T08:49:57.000Z
|
2022-03-21T20:11:57.000Z
|
import tensorflow as tf
from external.BB2SegNet.refinement_net.core import Measures
from external.BB2SegNet.refinement_net.core.Log import log
from external.BB2SegNet.refinement_net.core.Measures import measures_string_to_print, accumulate_measures, compute_measures_average
from external.BB2SegNet.refinement_net.core.Saver import Saver
from external.BB2SegNet.refinement_net.core.Timer import Timer
from external.BB2SegNet.refinement_net.core.Trainer import Trainer
from external.BB2SegNet.refinement_net.datasets.Loader import load_dataset
from external.BB2SegNet.refinement_net.forwarding.FewShotSegmentationForwarder import FewShotSegmentationForwarder
from external.BB2SegNet.refinement_net.network.Network import Network
class Engine:
def __init__(self, config, session=None):
self.config = config
self.save = config.bool("save", True)
self.task = config.string("task", "train")
self.dataset = config.string("dataset").lower()
self.num_epochs = config.int("num_epochs", 1000)
self.session = self._create_session(session)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# need_train = True # TODO should be self.task != "eval", but right now testnet needs to reuse variables from train
need_train = config.bool("need_train",True)
if need_train:
self.train_data = load_dataset(config, "train", self.session, self.dataset)
freeze_batchnorm = config.bool("freeze_batchnorm", False)
print("creating trainnet...")
self.train_network = Network(self.config, self.train_data, is_trainnet=True, freeze_batchnorm=freeze_batchnorm,
name="trainnet")
else:
self.train_data = None
self.train_network = None
need_val = self.task != "train_no_val"
if need_val:
self.valid_data = load_dataset(config, "val", self.session, self.dataset)
print("creating testnet...")
reuse_variables = None if need_train else False
# with tf.variable_scope('refinement_net'):
self.test_network = Network(config, self.valid_data, is_trainnet=False, freeze_batchnorm=True, name="testnet",
reuse_variables=reuse_variables)
else:
self.valid_data = None
self.test_network = None
self.trainer = Trainer(config, self.train_network, self.test_network, self.global_step, self.session)
self.saver = Saver(config, self.session)
tf.global_variables_initializer().run(session=self.session)
tf.local_variables_initializer().run(session=self.session)
self.start_epoch = self.saver.try_load_weights()
# self.session.graph.finalize()
@staticmethod
def _create_session(sess):
if sess is None:
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
# sess = tf.Session(graph=tf.Graph(),config=sess_config)
sess = tf.Session(config=sess_config)
# sess = tf.InteractiveSession(config=sess_config)
return sess
def run(self):
if self.task in ("train", "train_no_val"):
self.train()
elif self.task == "eval":
self.eval()
elif self.task == "test_dataset_speed":
self.test_dataset_speed()
elif self.task == "few_shot_segmentation":
FewShotSegmentationForwarder(self).forward()
# elif self.task == "davis_iterative_few_shot_segmentation":
# DAVISIterativeFewShotSegmentationForwarder(self).forward()
else:
assert False, ("unknown task", self.task)
def test_dataset_speed(self):
n_total = self.train_data.n_examples_per_epoch()
batch_size = self.config.int("batch_size")
input_tensors_dict = self.train_network.input_tensors_dict
n_curr = 0
with Timer(message="elapsed"):
while n_curr < n_total:
self.session.run(input_tensors_dict)
n_curr += batch_size
print("{:>5}".format(n_curr), "/", n_total)
def train(self):
print("starting training", file=log.v1)
for epoch in range(self.start_epoch, self.num_epochs):
timer = Timer()
train_measures = self.run_epoch(self.trainer.train_step, self.train_data, epoch, is_train_run=True)
if self.valid_data is not None:
valid_measures = self.run_epoch(self.trainer.validation_step, self.valid_data, epoch, is_train_run=False)
else:
valid_measures = {}
if self.save:
self.saver.save_model(epoch + 1)
if hasattr(self.train_data, "save_masks"):
self.train_data.save_masks(epoch + 1)
elapsed = timer.elapsed()
train_measures_str = measures_string_to_print(train_measures)
val_measures_str = measures_string_to_print(valid_measures)
print("epoch", epoch + 1, "finished. elapsed:", "%.5f" % elapsed, "train:", train_measures_str,
"valid:", val_measures_str, file=log.v1)
def eval(self):
timer = Timer()
measures = self.run_epoch(self.trainer.validation_step, self.valid_data, epoch=0, is_train_run=False)
elapsed = timer.elapsed()
print("eval finished. elapsed:", elapsed, measures, file=log.v1)
@staticmethod
def run_epoch(step_fn, data, epoch, is_train_run):
n_examples_processed = 0
n_examples_per_epoch = data.n_examples_per_epoch()
extraction_keys = data.get_extraction_keys()
measures_accumulated = {}
if not is_train_run and hasattr(data, "prepare_saving_epoch_measures"):
data.prepare_saving_epoch_measures(epoch + 1)
while n_examples_processed < n_examples_per_epoch:
timer = Timer()
res = step_fn(epoch, extraction_keys=extraction_keys)
measures = res[Measures.MEASURES]
n_examples_processed += measures[Measures.N_EXAMPLES]
measures_str = measures_string_to_print(compute_measures_average(measures, for_final_result=False))
accumulate_measures(measures_accumulated, measures)
if not is_train_run and hasattr(data, "save_epoch_measures"):
data.save_epoch_measures(measures)
if hasattr(data, "use_segmentation_mask"):
data.use_segmentation_mask(res)
elapsed = timer.elapsed()
print("{:>5}".format(n_examples_processed), '/', n_examples_per_epoch, measures_str, "elapsed", elapsed, file=log.v5)
measures_averaged = compute_measures_average(measures_accumulated, for_final_result=True)
if not is_train_run and hasattr(data, "finalize_saving_epoch_measures"):
new_measures = data.finalize_saving_epoch_measures()
measures_averaged.update(new_measures)
return measures_averaged
| 45.391608
| 131
| 0.725158
|
2bf1e601e8bd724c073a00fd575e78ced4e13706
| 53,264
|
py
|
Python
|
src/bondora_api/models/loan_part_details.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 8
|
2019-03-09T20:38:27.000Z
|
2021-02-10T20:44:22.000Z
|
src/bondora_api/models/loan_part_details.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 1
|
2018-03-06T09:44:21.000Z
|
2018-03-06T09:44:21.000Z
|
src/bondora_api/models/loan_part_details.py
|
parruc/bondora_api
|
f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d
|
[
"Apache-2.0"
] | 3
|
2019-06-03T13:44:05.000Z
|
2020-11-16T13:17:38.000Z
|
# coding: utf-8
"""
Bondora API V1
Bondora API version 1
OpenAPI spec version: v1
Contact: investor@bondora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class LoanPartDetails(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, loan_part_id=None, amount=None, auction_id=None, auction_name=None, auction_number=None, auction_bid_number=None, country=None, credit_score=None, credit_score_es_micro_l=None, credit_score_es_equifax_risk=None, credit_score_fi_asiakas_tieto_risk_grade=None, credit_score_ee_mini=None, rating=None, initial_interest=None, interest=None, use_of_loan=None, income_verification_status=None, loan_id=None, loan_status_code=None, user_name=None, gender=None, date_of_birth=None, signed_date=None, re_scheduled_on=None, debt_occured_on=None, debt_occured_on_for_secondary=None, loan_duration=None, next_payment_nr=None, next_payment_date=None, next_payment_sum=None, nr_of_scheduled_payments=None, last_payment_date=None, principal_repaid=None, interest_repaid=None, late_amount_paid=None, principal_late_amount=None, interest_late_amount=None, penalty_late_amount=None, principal_write_off_amount=None, interest_write_off_amount=None, penalty_write_off_amount=None, debt_servicing_cost_main_amount=None, debt_servicing_cost_interest_amount=None, debt_servicing_cost_penalty_amount=None, repaid_principal_current_owner=None, repaid_interests_current_owner=None, late_charges_paid_current_owner=None, repaid_total_current_owner=None, total_repaid=None, debt_managment_events=None, loan_transfers=None, scheduled_payments=None):
"""
LoanPartDetails - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'loan_part_id': 'str',
'amount': 'float',
'auction_id': 'str',
'auction_name': 'str',
'auction_number': 'int',
'auction_bid_number': 'int',
'country': 'str',
'credit_score': 'float',
'credit_score_es_micro_l': 'str',
'credit_score_es_equifax_risk': 'str',
'credit_score_fi_asiakas_tieto_risk_grade': 'str',
'credit_score_ee_mini': 'str',
'rating': 'str',
'initial_interest': 'float',
'interest': 'float',
'use_of_loan': 'int',
'income_verification_status': 'int',
'loan_id': 'str',
'loan_status_code': 'int',
'user_name': 'str',
'gender': 'int',
'date_of_birth': 'datetime',
'signed_date': 'datetime',
're_scheduled_on': 'datetime',
'debt_occured_on': 'datetime',
'debt_occured_on_for_secondary': 'datetime',
'loan_duration': 'int',
'next_payment_nr': 'int',
'next_payment_date': 'datetime',
'next_payment_sum': 'float',
'nr_of_scheduled_payments': 'int',
'last_payment_date': 'datetime',
'principal_repaid': 'float',
'interest_repaid': 'float',
'late_amount_paid': 'float',
'principal_remaining': 'float',
'principal_late_amount': 'float',
'interest_late_amount': 'float',
'penalty_late_amount': 'float',
'late_amount_total': 'float',
'principal_write_off_amount': 'float',
'interest_write_off_amount': 'float',
'penalty_write_off_amount': 'float',
'write_off_total': 'float',
'debt_servicing_cost_main_amount': 'float',
'debt_servicing_cost_interest_amount': 'float',
'debt_servicing_cost_penalty_amount': 'float',
'debt_servicing_cost_total': 'float',
'repaid_principal_current_owner': 'float',
'repaid_interests_current_owner': 'float',
'late_charges_paid_current_owner': 'float',
'repaid_total_current_owner': 'float',
'total_repaid': 'float',
'debt_managment_events': 'list[DebtManagementEvent]',
'loan_transfers': 'list[LoanTransfer]',
'scheduled_payments': 'list[ScheduledPayment]'
}
self.attribute_map = {
'loan_part_id': 'LoanPartId',
'amount': 'Amount',
'auction_id': 'AuctionId',
'auction_name': 'AuctionName',
'auction_number': 'AuctionNumber',
'auction_bid_number': 'AuctionBidNumber',
'country': 'Country',
'credit_score': 'CreditScore',
'credit_score_es_micro_l': 'CreditScoreEsMicroL',
'credit_score_es_equifax_risk': 'CreditScoreEsEquifaxRisk',
'credit_score_fi_asiakas_tieto_risk_grade': 'CreditScoreFiAsiakasTietoRiskGrade',
'credit_score_ee_mini': 'CreditScoreEeMini',
'rating': 'Rating',
'initial_interest': 'InitialInterest',
'interest': 'Interest',
'use_of_loan': 'UseOfLoan',
'income_verification_status': 'IncomeVerificationStatus',
'loan_id': 'LoanId',
'loan_status_code': 'LoanStatusCode',
'user_name': 'UserName',
'gender': 'Gender',
'date_of_birth': 'DateOfBirth',
'signed_date': 'SignedDate',
're_scheduled_on': 'ReScheduledOn',
'debt_occured_on': 'DebtOccuredOn',
'debt_occured_on_for_secondary': 'DebtOccuredOnForSecondary',
'loan_duration': 'LoanDuration',
'next_payment_nr': 'NextPaymentNr',
'next_payment_date': 'NextPaymentDate',
'next_payment_sum': 'NextPaymentSum',
'nr_of_scheduled_payments': 'NrOfScheduledPayments',
'last_payment_date': 'LastPaymentDate',
'principal_repaid': 'PrincipalRepaid',
'interest_repaid': 'InterestRepaid',
'late_amount_paid': 'LateAmountPaid',
'principal_remaining': 'PrincipalRemaining',
'principal_late_amount': 'PrincipalLateAmount',
'interest_late_amount': 'InterestLateAmount',
'penalty_late_amount': 'PenaltyLateAmount',
'late_amount_total': 'LateAmountTotal',
'principal_write_off_amount': 'PrincipalWriteOffAmount',
'interest_write_off_amount': 'InterestWriteOffAmount',
'penalty_write_off_amount': 'PenaltyWriteOffAmount',
'write_off_total': 'WriteOffTotal',
'debt_servicing_cost_main_amount': 'DebtServicingCostMainAmount',
'debt_servicing_cost_interest_amount': 'DebtServicingCostInterestAmount',
'debt_servicing_cost_penalty_amount': 'DebtServicingCostPenaltyAmount',
'debt_servicing_cost_total': 'DebtServicingCostTotal',
'repaid_principal_current_owner': 'RepaidPrincipalCurrentOwner',
'repaid_interests_current_owner': 'RepaidInterestsCurrentOwner',
'late_charges_paid_current_owner': 'LateChargesPaidCurrentOwner',
'repaid_total_current_owner': 'RepaidTotalCurrentOwner',
'total_repaid': 'TotalRepaid',
'debt_managment_events': 'DebtManagmentEvents',
'loan_transfers': 'LoanTransfers',
'scheduled_payments': 'ScheduledPayments'
}
self._principal_remaining = None
self._late_amount_total = None
self._write_off_total = None
self._debt_servicing_cost_total = None
self._loan_part_id = loan_part_id
self._amount = amount
self._auction_id = auction_id
self._auction_name = auction_name
self._auction_number = auction_number
self._auction_bid_number = auction_bid_number
self._country = country
self._credit_score = credit_score
self._credit_score_es_micro_l = credit_score_es_micro_l
self._credit_score_es_equifax_risk = credit_score_es_equifax_risk
self._credit_score_fi_asiakas_tieto_risk_grade = credit_score_fi_asiakas_tieto_risk_grade
self._credit_score_ee_mini = credit_score_ee_mini
self._rating = rating
self._initial_interest = initial_interest
self._interest = interest
self._use_of_loan = use_of_loan
self._income_verification_status = income_verification_status
self._loan_id = loan_id
self._loan_status_code = loan_status_code
self._user_name = user_name
self._gender = gender
self._date_of_birth = date_of_birth
self._signed_date = signed_date
self._re_scheduled_on = re_scheduled_on
self._debt_occured_on = debt_occured_on
self._debt_occured_on_for_secondary = debt_occured_on_for_secondary
self._loan_duration = loan_duration
self._next_payment_nr = next_payment_nr
self._next_payment_date = next_payment_date
self._next_payment_sum = next_payment_sum
self._nr_of_scheduled_payments = nr_of_scheduled_payments
self._last_payment_date = last_payment_date
self._principal_repaid = principal_repaid
self._interest_repaid = interest_repaid
self._late_amount_paid = late_amount_paid
self._principal_late_amount = principal_late_amount
self._interest_late_amount = interest_late_amount
self._penalty_late_amount = penalty_late_amount
self._principal_write_off_amount = principal_write_off_amount
self._interest_write_off_amount = interest_write_off_amount
self._penalty_write_off_amount = penalty_write_off_amount
self._debt_servicing_cost_main_amount = debt_servicing_cost_main_amount
self._debt_servicing_cost_interest_amount = debt_servicing_cost_interest_amount
self._debt_servicing_cost_penalty_amount = debt_servicing_cost_penalty_amount
self._repaid_principal_current_owner = repaid_principal_current_owner
self._repaid_interests_current_owner = repaid_interests_current_owner
self._late_charges_paid_current_owner = late_charges_paid_current_owner
self._repaid_total_current_owner = repaid_total_current_owner
self._total_repaid = total_repaid
self._debt_managment_events = debt_managment_events
self._loan_transfers = loan_transfers
self._scheduled_payments = scheduled_payments
@property
def loan_part_id(self):
"""
Gets the loan_part_id of this LoanPartDetails.
LoanPart unique identifier
:return: The loan_part_id of this LoanPartDetails.
:rtype: str
"""
return self._loan_part_id
@loan_part_id.setter
def loan_part_id(self, loan_part_id):
"""
Sets the loan_part_id of this LoanPartDetails.
LoanPart unique identifier
:param loan_part_id: The loan_part_id of this LoanPartDetails.
:type: str
"""
self._loan_part_id = loan_part_id
@property
def amount(self):
"""
Gets the amount of this LoanPartDetails.
Investment amount
:return: The amount of this LoanPartDetails.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this LoanPartDetails.
Investment amount
:param amount: The amount of this LoanPartDetails.
:type: float
"""
self._amount = amount
@property
def auction_id(self):
"""
Gets the auction_id of this LoanPartDetails.
Auction unique identifier
:return: The auction_id of this LoanPartDetails.
:rtype: str
"""
return self._auction_id
@auction_id.setter
def auction_id(self, auction_id):
"""
Sets the auction_id of this LoanPartDetails.
Auction unique identifier
:param auction_id: The auction_id of this LoanPartDetails.
:type: str
"""
self._auction_id = auction_id
@property
def auction_name(self):
"""
Gets the auction_name of this LoanPartDetails.
Auction name
:return: The auction_name of this LoanPartDetails.
:rtype: str
"""
return self._auction_name
@auction_name.setter
def auction_name(self, auction_name):
"""
Sets the auction_name of this LoanPartDetails.
Auction name
:param auction_name: The auction_name of this LoanPartDetails.
:type: str
"""
self._auction_name = auction_name
@property
def auction_number(self):
"""
Gets the auction_number of this LoanPartDetails.
Auction number
:return: The auction_number of this LoanPartDetails.
:rtype: int
"""
return self._auction_number
@auction_number.setter
def auction_number(self, auction_number):
"""
Sets the auction_number of this LoanPartDetails.
Auction number
:param auction_number: The auction_number of this LoanPartDetails.
:type: int
"""
self._auction_number = auction_number
@property
def auction_bid_number(self):
"""
Gets the auction_bid_number of this LoanPartDetails.
Auction bid number
:return: The auction_bid_number of this LoanPartDetails.
:rtype: int
"""
return self._auction_bid_number
@auction_bid_number.setter
def auction_bid_number(self, auction_bid_number):
"""
Sets the auction_bid_number of this LoanPartDetails.
Auction bid number
:param auction_bid_number: The auction_bid_number of this LoanPartDetails.
:type: int
"""
self._auction_bid_number = auction_bid_number
@property
def country(self):
"""
Gets the country of this LoanPartDetails.
Residency of the borrower
:return: The country of this LoanPartDetails.
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""
Sets the country of this LoanPartDetails.
Residency of the borrower
:param country: The country of this LoanPartDetails.
:type: str
"""
self._country = country
@property
def credit_score(self):
"""
Gets the credit_score of this LoanPartDetails.
<para> 1000 No previous payments problems</para> <para> 900 Payments problems finished 24-36 months ago</para> <para> 800 Payments problems finished 12-24 months ago</para> <para> 700 Payments problems finished 6-12 months ago</para> <para> 600 Payment problems finished <6 months ago</para> <para> 500 Active payment problems</para>
:return: The credit_score of this LoanPartDetails.
:rtype: float
"""
return self._credit_score
@credit_score.setter
def credit_score(self, credit_score):
"""
Sets the credit_score of this LoanPartDetails.
<para> 1000 No previous payments problems</para> <para> 900 Payments problems finished 24-36 months ago</para> <para> 800 Payments problems finished 12-24 months ago</para> <para> 700 Payments problems finished 6-12 months ago</para> <para> 600 Payment problems finished <6 months ago</para> <para> 500 Active payment problems</para>
:param credit_score: The credit_score of this LoanPartDetails.
:type: float
"""
self._credit_score = credit_score
@property
def credit_score_es_micro_l(self):
"""
Gets the credit_score_es_micro_l of this LoanPartDetails.
A score that is specifically designed for risk classifying subprime borrowers (defined by Equifax as borrowers that do not have access to bank loans). A measure of the probability of default one month ahead. <para>The score is given on a 10-grade scale, from the best score to the worst:</para><para>M1, M2, M3, M4, M5, M6, M7, M8, M9, M10</para>
:return: The credit_score_es_micro_l of this LoanPartDetails.
:rtype: str
"""
return self._credit_score_es_micro_l
@credit_score_es_micro_l.setter
def credit_score_es_micro_l(self, credit_score_es_micro_l):
"""
Sets the credit_score_es_micro_l of this LoanPartDetails.
A score that is specifically designed for risk classifying subprime borrowers (defined by Equifax as borrowers that do not have access to bank loans). A measure of the probability of default one month ahead. <para>The score is given on a 10-grade scale, from the best score to the worst:</para><para>M1, M2, M3, M4, M5, M6, M7, M8, M9, M10</para>
:param credit_score_es_micro_l: The credit_score_es_micro_l of this LoanPartDetails.
:type: str
"""
self._credit_score_es_micro_l = credit_score_es_micro_l
@property
def credit_score_es_equifax_risk(self):
"""
Gets the credit_score_es_equifax_risk of this LoanPartDetails.
Generic score for the loan applicants that do not have active past due operations in ASNEF. A measure of the probability of default one year ahead. The score is given on a 6-grade scale. <para>AAA Very low</para><para>AA Low</para><para>A Average</para><para>B Average High</para><para>C High</para><para>D Very High</para>
:return: The credit_score_es_equifax_risk of this LoanPartDetails.
:rtype: str
"""
return self._credit_score_es_equifax_risk
@credit_score_es_equifax_risk.setter
def credit_score_es_equifax_risk(self, credit_score_es_equifax_risk):
"""
Sets the credit_score_es_equifax_risk of this LoanPartDetails.
Generic score for the loan applicants that do not have active past due operations in ASNEF. A measure of the probability of default one year ahead. The score is given on a 6-grade scale. <para>AAA Very low</para><para>AA Low</para><para>A Average</para><para>B Average High</para><para>C High</para><para>D Very High</para>
:param credit_score_es_equifax_risk: The credit_score_es_equifax_risk of this LoanPartDetails.
:type: str
"""
self._credit_score_es_equifax_risk = credit_score_es_equifax_risk
@property
def credit_score_fi_asiakas_tieto_risk_grade(self):
"""
Gets the credit_score_fi_asiakas_tieto_risk_grade of this LoanPartDetails.
Credit Scoring model for Finnish Asiakastieto <para>RL1 Very low risk 01-20</para><para>RL2 Low risk 21-40</para><para>RL3 Average risk 41-60</para><para>RL4 Big risk 61-80</para><para>RL5 Huge risk 81-100</para>
:return: The credit_score_fi_asiakas_tieto_risk_grade of this LoanPartDetails.
:rtype: str
"""
return self._credit_score_fi_asiakas_tieto_risk_grade
@credit_score_fi_asiakas_tieto_risk_grade.setter
def credit_score_fi_asiakas_tieto_risk_grade(self, credit_score_fi_asiakas_tieto_risk_grade):
"""
Sets the credit_score_fi_asiakas_tieto_risk_grade of this LoanPartDetails.
Credit Scoring model for Finnish Asiakastieto <para>RL1 Very low risk 01-20</para><para>RL2 Low risk 21-40</para><para>RL3 Average risk 41-60</para><para>RL4 Big risk 61-80</para><para>RL5 Huge risk 81-100</para>
:param credit_score_fi_asiakas_tieto_risk_grade: The credit_score_fi_asiakas_tieto_risk_grade of this LoanPartDetails.
:type: str
"""
self._credit_score_fi_asiakas_tieto_risk_grade = credit_score_fi_asiakas_tieto_risk_grade
@property
def credit_score_ee_mini(self):
"""
Gets the credit_score_ee_mini of this LoanPartDetails.
Credit scoring for Estonian loans <para>1000 No previous payments problems</para><para>900 Payments problems finished 24-36 months ago</para><para>800 Payments problems finished 12-24 months ago</para><para>700 Payments problems finished 6-12 months ago</para><para>600 Payment problems finished <6 months ago</para><para>500 Active payment problems</para>
:return: The credit_score_ee_mini of this LoanPartDetails.
:rtype: str
"""
return self._credit_score_ee_mini
@credit_score_ee_mini.setter
def credit_score_ee_mini(self, credit_score_ee_mini):
"""
Sets the credit_score_ee_mini of this LoanPartDetails.
Credit scoring for Estonian loans <para>1000 No previous payments problems</para><para>900 Payments problems finished 24-36 months ago</para><para>800 Payments problems finished 12-24 months ago</para><para>700 Payments problems finished 6-12 months ago</para><para>600 Payment problems finished <6 months ago</para><para>500 Active payment problems</para>
:param credit_score_ee_mini: The credit_score_ee_mini of this LoanPartDetails.
:type: str
"""
self._credit_score_ee_mini = credit_score_ee_mini
@property
def rating(self):
"""
Gets the rating of this LoanPartDetails.
Bondora Rating issued by the Rating model
:return: The rating of this LoanPartDetails.
:rtype: str
"""
return self._rating
@rating.setter
def rating(self, rating):
"""
Sets the rating of this LoanPartDetails.
Bondora Rating issued by the Rating model
:param rating: The rating of this LoanPartDetails.
:type: str
"""
self._rating = rating
@property
def initial_interest(self):
"""
Gets the initial_interest of this LoanPartDetails.
Initial interest rate
:return: The initial_interest of this LoanPartDetails.
:rtype: float
"""
return self._initial_interest
@initial_interest.setter
def initial_interest(self, initial_interest):
"""
Sets the initial_interest of this LoanPartDetails.
Initial interest rate
:param initial_interest: The initial_interest of this LoanPartDetails.
:type: float
"""
self._initial_interest = initial_interest
@property
def interest(self):
"""
Gets the interest of this LoanPartDetails.
Current interest rate
:return: The interest of this LoanPartDetails.
:rtype: float
"""
return self._interest
@interest.setter
def interest(self, interest):
"""
Sets the interest of this LoanPartDetails.
Current interest rate
:param interest: The interest of this LoanPartDetails.
:type: float
"""
self._interest = interest
@property
def use_of_loan(self):
"""
Gets the use_of_loan of this LoanPartDetails.
Use of loan
:return: The use_of_loan of this LoanPartDetails.
:rtype: int
"""
return self._use_of_loan
@use_of_loan.setter
def use_of_loan(self, use_of_loan):
"""
Sets the use_of_loan of this LoanPartDetails.
Use of loan
:param use_of_loan: The use_of_loan of this LoanPartDetails.
:type: int
"""
allowed_values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, -1]
if use_of_loan not in allowed_values:
raise ValueError(
"Invalid value for `use_of_loan` ({0}), must be one of {1}"
.format(use_of_loan, allowed_values)
)
self._use_of_loan = use_of_loan
@property
def income_verification_status(self):
"""
Gets the income_verification_status of this LoanPartDetails.
Income verification type
:return: The income_verification_status of this LoanPartDetails.
:rtype: int
"""
return self._income_verification_status
@income_verification_status.setter
def income_verification_status(self, income_verification_status):
"""
Sets the income_verification_status of this LoanPartDetails.
Income verification type
:param income_verification_status: The income_verification_status of this LoanPartDetails.
:type: int
"""
allowed_values = [1, 2, 3, 4]
if income_verification_status not in allowed_values:
raise ValueError(
"Invalid value for `income_verification_status` ({0}), must be one of {1}"
.format(income_verification_status, allowed_values)
)
self._income_verification_status = income_verification_status
@property
def loan_id(self):
"""
Gets the loan_id of this LoanPartDetails.
Loan unique identifier
:return: The loan_id of this LoanPartDetails.
:rtype: str
"""
return self._loan_id
@loan_id.setter
def loan_id(self, loan_id):
"""
Sets the loan_id of this LoanPartDetails.
Loan unique identifier
:param loan_id: The loan_id of this LoanPartDetails.
:type: str
"""
self._loan_id = loan_id
@property
def loan_status_code(self):
"""
Gets the loan_status_code of this LoanPartDetails.
Loan status code <para>2 Current</para><para>100 Overdue</para><para>5 60+ days overdue</para><para>4 Repaid</para><para>8 Released</para>
:return: The loan_status_code of this LoanPartDetails.
:rtype: int
"""
return self._loan_status_code
@loan_status_code.setter
def loan_status_code(self, loan_status_code):
"""
Sets the loan_status_code of this LoanPartDetails.
Loan status code <para>2 Current</para><para>100 Overdue</para><para>5 60+ days overdue</para><para>4 Repaid</para><para>8 Released</para>
:param loan_status_code: The loan_status_code of this LoanPartDetails.
:type: int
"""
self._loan_status_code = loan_status_code
@property
def user_name(self):
"""
Gets the user_name of this LoanPartDetails.
Borrower's username
:return: The user_name of this LoanPartDetails.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this LoanPartDetails.
Borrower's username
:param user_name: The user_name of this LoanPartDetails.
:type: str
"""
self._user_name = user_name
@property
def gender(self):
"""
Gets the gender of this LoanPartDetails.
Borrower's Gender
:return: The gender of this LoanPartDetails.
:rtype: int
"""
return self._gender
@gender.setter
def gender(self, gender):
"""
Sets the gender of this LoanPartDetails.
Borrower's Gender
:param gender: The gender of this LoanPartDetails.
:type: int
"""
allowed_values = [0, 1, 2]
if gender not in allowed_values:
raise ValueError(
"Invalid value for `gender` ({0}), must be one of {1}"
.format(gender, allowed_values)
)
self._gender = gender
@property
def date_of_birth(self):
"""
Gets the date_of_birth of this LoanPartDetails.
Borrower's date of birth
:return: The date_of_birth of this LoanPartDetails.
:rtype: datetime
"""
return self._date_of_birth
@date_of_birth.setter
def date_of_birth(self, date_of_birth):
"""
Sets the date_of_birth of this LoanPartDetails.
Borrower's date of birth
:param date_of_birth: The date_of_birth of this LoanPartDetails.
:type: datetime
"""
self._date_of_birth = date_of_birth
@property
def signed_date(self):
"""
Gets the signed_date of this LoanPartDetails.
Loan issued date
:return: The signed_date of this LoanPartDetails.
:rtype: datetime
"""
return self._signed_date
@signed_date.setter
def signed_date(self, signed_date):
"""
Sets the signed_date of this LoanPartDetails.
Loan issued date
:param signed_date: The signed_date of this LoanPartDetails.
:type: datetime
"""
self._signed_date = signed_date
@property
def re_scheduled_on(self):
"""
Gets the re_scheduled_on of this LoanPartDetails.
Last rescheduling date
:return: The re_scheduled_on of this LoanPartDetails.
:rtype: datetime
"""
return self._re_scheduled_on
@re_scheduled_on.setter
def re_scheduled_on(self, re_scheduled_on):
"""
Sets the re_scheduled_on of this LoanPartDetails.
Last rescheduling date
:param re_scheduled_on: The re_scheduled_on of this LoanPartDetails.
:type: datetime
"""
self._re_scheduled_on = re_scheduled_on
@property
def debt_occured_on(self):
"""
Gets the debt_occured_on of this LoanPartDetails.
Debt occured on date
:return: The debt_occured_on of this LoanPartDetails.
:rtype: datetime
"""
return self._debt_occured_on
@debt_occured_on.setter
def debt_occured_on(self, debt_occured_on):
"""
Sets the debt_occured_on of this LoanPartDetails.
Debt occured on date
:param debt_occured_on: The debt_occured_on of this LoanPartDetails.
:type: datetime
"""
self._debt_occured_on = debt_occured_on
@property
def debt_occured_on_for_secondary(self):
"""
Gets the debt_occured_on_for_secondary of this LoanPartDetails.
Debt occured on date
:return: The debt_occured_on_for_secondary of this LoanPartDetails.
:rtype: datetime
"""
return self._debt_occured_on_for_secondary
@debt_occured_on_for_secondary.setter
def debt_occured_on_for_secondary(self, debt_occured_on_for_secondary):
"""
Sets the debt_occured_on_for_secondary of this LoanPartDetails.
Debt occured on date
:param debt_occured_on_for_secondary: The debt_occured_on_for_secondary of this LoanPartDetails.
:type: datetime
"""
self._debt_occured_on_for_secondary = debt_occured_on_for_secondary
@property
def loan_duration(self):
"""
Gets the loan_duration of this LoanPartDetails.
Loan original lenght
:return: The loan_duration of this LoanPartDetails.
:rtype: int
"""
return self._loan_duration
@loan_duration.setter
def loan_duration(self, loan_duration):
"""
Sets the loan_duration of this LoanPartDetails.
Loan original lenght
:param loan_duration: The loan_duration of this LoanPartDetails.
:type: int
"""
self._loan_duration = loan_duration
@property
def next_payment_nr(self):
"""
Gets the next_payment_nr of this LoanPartDetails.
Next scheduled payment number
:return: The next_payment_nr of this LoanPartDetails.
:rtype: int
"""
return self._next_payment_nr
@next_payment_nr.setter
def next_payment_nr(self, next_payment_nr):
"""
Sets the next_payment_nr of this LoanPartDetails.
Next scheduled payment number
:param next_payment_nr: The next_payment_nr of this LoanPartDetails.
:type: int
"""
self._next_payment_nr = next_payment_nr
@property
def next_payment_date(self):
"""
Gets the next_payment_date of this LoanPartDetails.
Next scheduled payment date
:return: The next_payment_date of this LoanPartDetails.
:rtype: datetime
"""
return self._next_payment_date
@next_payment_date.setter
def next_payment_date(self, next_payment_date):
"""
Sets the next_payment_date of this LoanPartDetails.
Next scheduled payment date
:param next_payment_date: The next_payment_date of this LoanPartDetails.
:type: datetime
"""
self._next_payment_date = next_payment_date
@property
def next_payment_sum(self):
"""
Gets the next_payment_sum of this LoanPartDetails.
Next scheduled payment amount
:return: The next_payment_sum of this LoanPartDetails.
:rtype: float
"""
return self._next_payment_sum
@next_payment_sum.setter
def next_payment_sum(self, next_payment_sum):
"""
Sets the next_payment_sum of this LoanPartDetails.
Next scheduled payment amount
:param next_payment_sum: The next_payment_sum of this LoanPartDetails.
:type: float
"""
self._next_payment_sum = next_payment_sum
@property
def nr_of_scheduled_payments(self):
"""
Gets the nr_of_scheduled_payments of this LoanPartDetails.
Total number of scheduled payments
:return: The nr_of_scheduled_payments of this LoanPartDetails.
:rtype: int
"""
return self._nr_of_scheduled_payments
@nr_of_scheduled_payments.setter
def nr_of_scheduled_payments(self, nr_of_scheduled_payments):
"""
Sets the nr_of_scheduled_payments of this LoanPartDetails.
Total number of scheduled payments
:param nr_of_scheduled_payments: The nr_of_scheduled_payments of this LoanPartDetails.
:type: int
"""
self._nr_of_scheduled_payments = nr_of_scheduled_payments
@property
def last_payment_date(self):
"""
Gets the last_payment_date of this LoanPartDetails.
Last scheduled payment date
:return: The last_payment_date of this LoanPartDetails.
:rtype: datetime
"""
return self._last_payment_date
@last_payment_date.setter
def last_payment_date(self, last_payment_date):
"""
Sets the last_payment_date of this LoanPartDetails.
Last scheduled payment date
:param last_payment_date: The last_payment_date of this LoanPartDetails.
:type: datetime
"""
self._last_payment_date = last_payment_date
@property
def principal_repaid(self):
"""
Gets the principal_repaid of this LoanPartDetails.
Total principal repaid amount
:return: The principal_repaid of this LoanPartDetails.
:rtype: float
"""
return self._principal_repaid
@principal_repaid.setter
def principal_repaid(self, principal_repaid):
"""
Sets the principal_repaid of this LoanPartDetails.
Total principal repaid amount
:param principal_repaid: The principal_repaid of this LoanPartDetails.
:type: float
"""
self._principal_repaid = principal_repaid
@property
def interest_repaid(self):
"""
Gets the interest_repaid of this LoanPartDetails.
Total interest repaid amount
:return: The interest_repaid of this LoanPartDetails.
:rtype: float
"""
return self._interest_repaid
@interest_repaid.setter
def interest_repaid(self, interest_repaid):
"""
Sets the interest_repaid of this LoanPartDetails.
Total interest repaid amount
:param interest_repaid: The interest_repaid of this LoanPartDetails.
:type: float
"""
self._interest_repaid = interest_repaid
@property
def late_amount_paid(self):
"""
Gets the late_amount_paid of this LoanPartDetails.
Total late charges paid amount
:return: The late_amount_paid of this LoanPartDetails.
:rtype: float
"""
return self._late_amount_paid
@late_amount_paid.setter
def late_amount_paid(self, late_amount_paid):
"""
Sets the late_amount_paid of this LoanPartDetails.
Total late charges paid amount
:param late_amount_paid: The late_amount_paid of this LoanPartDetails.
:type: float
"""
self._late_amount_paid = late_amount_paid
@property
def principal_remaining(self):
"""
Gets the principal_remaining of this LoanPartDetails.
Remaining principal amount
:return: The principal_remaining of this LoanPartDetails.
:rtype: float
"""
return self._principal_remaining
@property
def principal_late_amount(self):
"""
Gets the principal_late_amount of this LoanPartDetails.
Principal debt amount
:return: The principal_late_amount of this LoanPartDetails.
:rtype: float
"""
return self._principal_late_amount
@principal_late_amount.setter
def principal_late_amount(self, principal_late_amount):
"""
Sets the principal_late_amount of this LoanPartDetails.
Principal debt amount
:param principal_late_amount: The principal_late_amount of this LoanPartDetails.
:type: float
"""
self._principal_late_amount = principal_late_amount
@property
def interest_late_amount(self):
"""
Gets the interest_late_amount of this LoanPartDetails.
Interest debt amount
:return: The interest_late_amount of this LoanPartDetails.
:rtype: float
"""
return self._interest_late_amount
@interest_late_amount.setter
def interest_late_amount(self, interest_late_amount):
"""
Sets the interest_late_amount of this LoanPartDetails.
Interest debt amount
:param interest_late_amount: The interest_late_amount of this LoanPartDetails.
:type: float
"""
self._interest_late_amount = interest_late_amount
@property
def penalty_late_amount(self):
"""
Gets the penalty_late_amount of this LoanPartDetails.
Late charges debt amount
:return: The penalty_late_amount of this LoanPartDetails.
:rtype: float
"""
return self._penalty_late_amount
@penalty_late_amount.setter
def penalty_late_amount(self, penalty_late_amount):
"""
Sets the penalty_late_amount of this LoanPartDetails.
Late charges debt amount
:param penalty_late_amount: The penalty_late_amount of this LoanPartDetails.
:type: float
"""
self._penalty_late_amount = penalty_late_amount
@property
def late_amount_total(self):
"""
Gets the late_amount_total of this LoanPartDetails.
Late amount total
:return: The late_amount_total of this LoanPartDetails.
:rtype: float
"""
return self._late_amount_total
@property
def principal_write_off_amount(self):
"""
Gets the principal_write_off_amount of this LoanPartDetails.
Total amount of principal written off
:return: The principal_write_off_amount of this LoanPartDetails.
:rtype: float
"""
return self._principal_write_off_amount
@principal_write_off_amount.setter
def principal_write_off_amount(self, principal_write_off_amount):
"""
Sets the principal_write_off_amount of this LoanPartDetails.
Total amount of principal written off
:param principal_write_off_amount: The principal_write_off_amount of this LoanPartDetails.
:type: float
"""
self._principal_write_off_amount = principal_write_off_amount
@property
def interest_write_off_amount(self):
"""
Gets the interest_write_off_amount of this LoanPartDetails.
Total amount of interest written off
:return: The interest_write_off_amount of this LoanPartDetails.
:rtype: float
"""
return self._interest_write_off_amount
@interest_write_off_amount.setter
def interest_write_off_amount(self, interest_write_off_amount):
"""
Sets the interest_write_off_amount of this LoanPartDetails.
Total amount of interest written off
:param interest_write_off_amount: The interest_write_off_amount of this LoanPartDetails.
:type: float
"""
self._interest_write_off_amount = interest_write_off_amount
@property
def penalty_write_off_amount(self):
"""
Gets the penalty_write_off_amount of this LoanPartDetails.
Total amount of penalty written off
:return: The penalty_write_off_amount of this LoanPartDetails.
:rtype: float
"""
return self._penalty_write_off_amount
@penalty_write_off_amount.setter
def penalty_write_off_amount(self, penalty_write_off_amount):
"""
Sets the penalty_write_off_amount of this LoanPartDetails.
Total amount of penalty written off
:param penalty_write_off_amount: The penalty_write_off_amount of this LoanPartDetails.
:type: float
"""
self._penalty_write_off_amount = penalty_write_off_amount
@property
def write_off_total(self):
"""
Gets the write_off_total of this LoanPartDetails.
Write off total
:return: The write_off_total of this LoanPartDetails.
:rtype: float
"""
return self._write_off_total
@property
def debt_servicing_cost_main_amount(self):
"""
Gets the debt_servicing_cost_main_amount of this LoanPartDetails.
Total amount of principal debt servicing cost
:return: The debt_servicing_cost_main_amount of this LoanPartDetails.
:rtype: float
"""
return self._debt_servicing_cost_main_amount
@debt_servicing_cost_main_amount.setter
def debt_servicing_cost_main_amount(self, debt_servicing_cost_main_amount):
"""
Sets the debt_servicing_cost_main_amount of this LoanPartDetails.
Total amount of principal debt servicing cost
:param debt_servicing_cost_main_amount: The debt_servicing_cost_main_amount of this LoanPartDetails.
:type: float
"""
self._debt_servicing_cost_main_amount = debt_servicing_cost_main_amount
@property
def debt_servicing_cost_interest_amount(self):
"""
Gets the debt_servicing_cost_interest_amount of this LoanPartDetails.
Total amount of interest debt servicing cost
:return: The debt_servicing_cost_interest_amount of this LoanPartDetails.
:rtype: float
"""
return self._debt_servicing_cost_interest_amount
@debt_servicing_cost_interest_amount.setter
def debt_servicing_cost_interest_amount(self, debt_servicing_cost_interest_amount):
"""
Sets the debt_servicing_cost_interest_amount of this LoanPartDetails.
Total amount of interest debt servicing cost
:param debt_servicing_cost_interest_amount: The debt_servicing_cost_interest_amount of this LoanPartDetails.
:type: float
"""
self._debt_servicing_cost_interest_amount = debt_servicing_cost_interest_amount
@property
def debt_servicing_cost_penalty_amount(self):
"""
Gets the debt_servicing_cost_penalty_amount of this LoanPartDetails.
Total amount of penalty debt servicing cost
:return: The debt_servicing_cost_penalty_amount of this LoanPartDetails.
:rtype: float
"""
return self._debt_servicing_cost_penalty_amount
@debt_servicing_cost_penalty_amount.setter
def debt_servicing_cost_penalty_amount(self, debt_servicing_cost_penalty_amount):
"""
Sets the debt_servicing_cost_penalty_amount of this LoanPartDetails.
Total amount of penalty debt servicing cost
:param debt_servicing_cost_penalty_amount: The debt_servicing_cost_penalty_amount of this LoanPartDetails.
:type: float
"""
self._debt_servicing_cost_penalty_amount = debt_servicing_cost_penalty_amount
@property
def debt_servicing_cost_total(self):
"""
Gets the debt_servicing_cost_total of this LoanPartDetails.
Debt servicing cost total
:return: The debt_servicing_cost_total of this LoanPartDetails.
:rtype: float
"""
return self._debt_servicing_cost_total
@property
def repaid_principal_current_owner(self):
"""
Gets the repaid_principal_current_owner of this LoanPartDetails.
Total principal repaid amount to current note owner
:return: The repaid_principal_current_owner of this LoanPartDetails.
:rtype: float
"""
return self._repaid_principal_current_owner
@repaid_principal_current_owner.setter
def repaid_principal_current_owner(self, repaid_principal_current_owner):
"""
Sets the repaid_principal_current_owner of this LoanPartDetails.
Total principal repaid amount to current note owner
:param repaid_principal_current_owner: The repaid_principal_current_owner of this LoanPartDetails.
:type: float
"""
self._repaid_principal_current_owner = repaid_principal_current_owner
@property
def repaid_interests_current_owner(self):
"""
Gets the repaid_interests_current_owner of this LoanPartDetails.
Total interest repaid amount to current note owner
:return: The repaid_interests_current_owner of this LoanPartDetails.
:rtype: float
"""
return self._repaid_interests_current_owner
@repaid_interests_current_owner.setter
def repaid_interests_current_owner(self, repaid_interests_current_owner):
"""
Sets the repaid_interests_current_owner of this LoanPartDetails.
Total interest repaid amount to current note owner
:param repaid_interests_current_owner: The repaid_interests_current_owner of this LoanPartDetails.
:type: float
"""
self._repaid_interests_current_owner = repaid_interests_current_owner
@property
def late_charges_paid_current_owner(self):
"""
Gets the late_charges_paid_current_owner of this LoanPartDetails.
Late charges paid amount to current note owner
:return: The late_charges_paid_current_owner of this LoanPartDetails.
:rtype: float
"""
return self._late_charges_paid_current_owner
@late_charges_paid_current_owner.setter
def late_charges_paid_current_owner(self, late_charges_paid_current_owner):
"""
Sets the late_charges_paid_current_owner of this LoanPartDetails.
Late charges paid amount to current note owner
:param late_charges_paid_current_owner: The late_charges_paid_current_owner of this LoanPartDetails.
:type: float
"""
self._late_charges_paid_current_owner = late_charges_paid_current_owner
@property
def repaid_total_current_owner(self):
"""
Gets the repaid_total_current_owner of this LoanPartDetails.
Total repaid amount to current note owner
:return: The repaid_total_current_owner of this LoanPartDetails.
:rtype: float
"""
return self._repaid_total_current_owner
@repaid_total_current_owner.setter
def repaid_total_current_owner(self, repaid_total_current_owner):
"""
Sets the repaid_total_current_owner of this LoanPartDetails.
Total repaid amount to current note owner
:param repaid_total_current_owner: The repaid_total_current_owner of this LoanPartDetails.
:type: float
"""
self._repaid_total_current_owner = repaid_total_current_owner
@property
def total_repaid(self):
"""
Gets the total_repaid of this LoanPartDetails.
Total repaid amount
:return: The total_repaid of this LoanPartDetails.
:rtype: float
"""
return self._total_repaid
@total_repaid.setter
def total_repaid(self, total_repaid):
"""
Sets the total_repaid of this LoanPartDetails.
Total repaid amount
:param total_repaid: The total_repaid of this LoanPartDetails.
:type: float
"""
self._total_repaid = total_repaid
@property
def debt_managment_events(self):
"""
Gets the debt_managment_events of this LoanPartDetails.
Debt managment event collection
:return: The debt_managment_events of this LoanPartDetails.
:rtype: list[DebtManagementEvent]
"""
return self._debt_managment_events
@debt_managment_events.setter
def debt_managment_events(self, debt_managment_events):
"""
Sets the debt_managment_events of this LoanPartDetails.
Debt managment event collection
:param debt_managment_events: The debt_managment_events of this LoanPartDetails.
:type: list[DebtManagementEvent]
"""
self._debt_managment_events = debt_managment_events
@property
def loan_transfers(self):
"""
Gets the loan_transfers of this LoanPartDetails.
Collection of all loan payments
:return: The loan_transfers of this LoanPartDetails.
:rtype: list[LoanTransfer]
"""
return self._loan_transfers
@loan_transfers.setter
def loan_transfers(self, loan_transfers):
"""
Sets the loan_transfers of this LoanPartDetails.
Collection of all loan payments
:param loan_transfers: The loan_transfers of this LoanPartDetails.
:type: list[LoanTransfer]
"""
self._loan_transfers = loan_transfers
@property
def scheduled_payments(self):
"""
Gets the scheduled_payments of this LoanPartDetails.
Collection of all loan scheduled payments. Contains previous period values before rescheduling was made
:return: The scheduled_payments of this LoanPartDetails.
:rtype: list[ScheduledPayment]
"""
return self._scheduled_payments
@scheduled_payments.setter
def scheduled_payments(self, scheduled_payments):
"""
Sets the scheduled_payments of this LoanPartDetails.
Collection of all loan scheduled payments. Contains previous period values before rescheduling was made
:param scheduled_payments: The scheduled_payments of this LoanPartDetails.
:type: list[ScheduledPayment]
"""
self._scheduled_payments = scheduled_payments
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.904325
| 1,343
| 0.663863
|
09463f4ceba2674bc43b5fac3a30cc57727dbdfd
| 3,012
|
py
|
Python
|
lib/tests/ServerTestCase.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/ServerTestCase.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | 9
|
2021-03-01T20:47:52.000Z
|
2022-02-12T20:49:50.000Z
|
lib/tests/ServerTestCase.py
|
Camilo-Mendoza/streamlit-ML
|
be8aafdf9f334b92a6e056e6c4f994da82587f80
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import requests
import tornado.testing
import tornado.web
import tornado.websocket
from tornado.concurrent import Future
from streamlit.server.Server import Server
class ServerTestCase(tornado.testing.AsyncHTTPTestCase):
"""Base class for async streamlit.Server testing.
Subclasses should patch 'streamlit.server.Server.ReportSession',
to prevent ReportSessions from being created, and scripts from
being run. (Script running involves creating new threads, which
interfere with other tests if not properly terminated.)
See the "ServerTest" class for example usage.
"""
def get_app(self):
# Create a Server, and patch its _on_stopped function
# to no-op. This prevents it from shutting down the
# ioloop when it stops.
self.server = Server(self.io_loop, '/not/a/script.py', [])
self.server._on_stopped = mock.MagicMock()
app = self.server._create_app()
return app
def tearDown(self):
super(ServerTestCase, self).tearDown()
# Clear the Server singleton for the next test
Server._singleton = None
def start_server_loop(self):
"""Starts the server's loop coroutine.
Returns
-------
Future
A Future that resolves when the loop has started.
You need to yield on this value from within a
'tornado.testing.gen_test' coroutine.
"""
server_started = Future()
self.io_loop.spawn_callback(
self.server._loop_coroutine,
lambda _: server_started.set_result(None))
return server_started
def get_ws_url(self, path):
"""Return a ws:// URL with the given path for our test server."""
# get_url() gives us a result with the 'http' scheme;
# we swap it out for 'ws'.
url = self.get_url(path)
parts = list(requests.utils.urlparse(url))
parts[0] = 'ws'
return requests.utils.urlunparse(tuple(parts))
def ws_connect(self):
"""Open a websocket connection to the server.
Returns
-------
Future
A Future that resolves with the connected websocket client.
You need to yield on this value from within a
'tornado.testing.gen_test' coroutine.
"""
return tornado.websocket.websocket_connect(self.get_ws_url('/stream'))
| 34.227273
| 78
| 0.669323
|
8c0322741b983325f1644cb066eb81b6673866d8
| 26,989
|
py
|
Python
|
source/pydwf/core/auxiliary/enum_types.py
|
sidneycadot/pydwf
|
cd9eba8b45d990f09095bec62b20115f0757baba
|
[
"MIT"
] | 14
|
2021-05-10T16:19:45.000Z
|
2022-03-13T08:30:12.000Z
|
source/pydwf/core/auxiliary/enum_types.py
|
sidneycadot/pydwf
|
cd9eba8b45d990f09095bec62b20115f0757baba
|
[
"MIT"
] | 22
|
2021-05-01T09:51:09.000Z
|
2021-11-13T12:35:36.000Z
|
source/pydwf/core/auxiliary/enum_types.py
|
sidneycadot/pydwf
|
cd9eba8b45d990f09095bec62b20115f0757baba
|
[
"MIT"
] | 2
|
2021-05-02T12:13:16.000Z
|
2022-03-11T21:15:07.000Z
|
"""This module defines Python equivalents of the enumerations found in the C library header file *dwf.h*.
Note:
The obsolete enumerations *TRIGCOND* and *STS* that are defined in the C header file have not been defined here.
*TRIGCOND* has been replaced by |DwfTriggerSlope:link|; *STS* has been replaced by |DwfState:link|.
"""
import enum
class _FixReprMethod:
"""Mixin class that overrides the __repr__ methods for Enum classes.
Enums in Python have a strange default '__repr__' implementation. This mixin class fixes that.
"""
# pylint: disable=no-member, too-few-public-methods
def __repr__(self):
return "{}.{}".format(self.__class__.__name__, self.name)
@enum.unique
class DwfErrorCode(_FixReprMethod, enum.Enum):
"""Enumeration type for error reporting constants of the DWF API.
This type is used by the |DwfLibrary.getLastError:link| method to report the error condition of the most
recent C API call.
In |pydwf|, it is only used as the type of the :py:attr:`~pydwf.core.auxiliary.exceptions.DwfLibraryError.code`
field of |DwfLibraryError:link| instances.
In the C API, this type is called 'DWFERC', and it is represented as an *int*.
"""
NoErc = 0
"""No error occurred."""
UnknownError = 1
"""Unknown error."""
ApiLockTimeout = 2
"""API waiting on pending operation timed out."""
AlreadyOpened = 3
"""Device already opened."""
NotSupported = 4
"""Device not supported."""
InvalidParameter0 = 0x10
"""Invalid parameter sent in API call."""
InvalidParameter1 = 0x11
"""Invalid parameter sent in API call."""
InvalidParameter2 = 0x12
"""Invalid parameter sent in API call."""
InvalidParameter3 = 0x13
"""Invalid parameter sent in API call."""
InvalidParameter4 = 0x14
"""Invalid parameter sent in API call."""
class DwfEnumFilter(_FixReprMethod, enum.Flag):
"""Enumeration type for device class constants, used during device enumeration.
This type is used by the |DeviceEnum.enumerateDevices:link| method to constrain
the type of devices that will be considered during device enumeration.
In the C API, this type is called 'ENUMFILTER', and it is represented as an *int*.
"""
All = 0
"""Enumerate all available devices."""
Type = 0x80000000
"""Enumerate by type (0x80000000)."""
USB = 0x00000001
"""Enumerate USB devices."""
Network = 0x00000002
"""Enumerate Network devices."""
AXI = 0x00000004
"""Enumerate AXI devices (used when running on an ADP 3x50 device)."""
Remote = 0x10000000
"""Enumerate remote devices (0x10000000)."""
Audio = 0x20000000
"""Enumerate audio devices (0x20000000)."""
Demo = 0x40000000
"""Enumerate demo devices (0x40000000)."""
EExplorer = 1
"""Enumerate only Electronics Explorer devices. Deprecated since DWF version 3.17."""
Discovery = 2
"""Enumerate only Analog Discovery devices. Deprecated since DWF version 3.17."""
Discovery2 = 3
"""Enumerate only Analog Discovery 2 devices. Deprecated since DWF version 3.17."""
DDiscovery = 4
"""Enumerate only Digital Discovery devices. Deprecated since DWF version 3.17."""
@enum.unique
class DwfEnumConfigInfo(_FixReprMethod, enum.Enum):
"""Enumeration type for device configuration parameter type constants.
This type lists the device parameters that can vary between different |device configurations:link|
of the same device.
In the C API, this type is represented as an *int*.
"""
#pylint: disable=line-too-long
TooltipText = -1
"""Tooltip text.
Maximum length: 2048 characters.
Note:
This value is not officially documented. Its existence was revealed in a
`message on the Digilent forum <https://forum.digilentinc.com/topic/21720-small-issue-and-questions-about-device-configurations/#comment-62717>`_.
"""
OtherInfoText = -2
"""Other info text.
Maximum length: 256 characters.
Note:
This value is not officially documented. Its existence was revealed in a
`message on the Digilent forum <https://forum.digilentinc.com/topic/21720-small-issue-and-questions-about-device-configurations/#comment-62717>`_.
"""
AnalogInChannelCount = 1
"""Number of analog input channels."""
AnalogOutChannelCount = 2
"""Number of analog output channels."""
AnalogIOChannelCount = 3
"""Number of analog power supply channels.
Note:
This is a different number than the number of channels reported by the |AnalogIO.channelCount:link|
method.
"""
DigitalInChannelCount = 4
"""Number of digital input channels."""
DigitalOutChannelCount = 5
"""Number of digital output channels."""
DigitalIOChannelCount = 6
"""Number of digital I/O channels."""
AnalogInBufferSize = 7
"""Analog in buffer size, in samples."""
AnalogOutBufferSize = 8
"""Analog out buffer size, in samples."""
DigitalInBufferSize = 9
"""Digital in buffer size, in samples."""
DigitalOutBufferSize = 10
"""Digital out buffer size, in samples."""
@enum.unique
class DwfDeviceID(_FixReprMethod, enum.Enum):
"""Enumeration type for device ID constants.
This type is used by the |DeviceEnum.deviceType:link| method to report on a selected device.
In the C API, this type is called 'DEVID', and it is represented as an *int*.
"""
EExplorer = 1
"""Electronics Explorer devices."""
Discovery = 2
"""Analog Discovery devices."""
Discovery2 = 3
"""Analog Discovery 2 devices."""
DDiscovery = 4
"""Digital Discovery devices."""
ADP3X50 = 6
"""Analog Discovery Pro devices."""
class DwfDeviceVersion(_FixReprMethod, enum.Enum):
"""Enumeration type for device version (i.e., hardware revision) constants.
This type is used by the |DeviceEnum.deviceType:link| method to report the
hardware revision of a selected device.
Note:
The device revision list given here is not complete; it does not cover all devices.
Additionally, the enumeration values :py:attr:`EExplorerC` and :py:attr:`DiscoveryB`
have identical integer values (2).
In the C API, this type is called 'DEVVER', and it is represented as an *int*.
"""
EExplorerC = 2
"""Electronics Explorer devices, revision C."""
EExplorerE = 4
"""Electronics Explorer devices, revision E."""
EExplorerF = 5
"""Electronics Explorer devices, revision F."""
DiscoveryA = 1
"""Discovery devices, revision A."""
DiscoveryB = 2
"""Discovery devices, revision B."""
DiscoveryC = 3
"""Discovery devices, revision C."""
@enum.unique
class DwfDeviceParameter(_FixReprMethod, enum.Enum):
"""Enumeration type for device parameter constants.
Device parameters are miscellaneous integer settings that influence the behavior of a device.
The different device parameters are selected by one of the constant values defined here.
This type is used to select device parameters, either to set/get global defaults using the |DwfLibrary|, or to
to set/get parameter values on a specific, previously opened device |DwfDevice|.
In the C API, this type is called 'DwfParam', and it is represented as an *int*.
"""
KeepOnClose = 1
"""Keep the device running after close.
Warning:
This value is *obsolete*. Use *OnClose* instead.
"""
UsbPower = 2
"""USB power behavior if AUX power is connected.
Possible values:
* 0 — Disable USB power.
* 1 — Keep USB power enabled.
This setting is implemented on the Analog Discovery 2.
"""
LedBrightness = 3
"""Set multicolor LED brightness.
The Digital Discovery features a multi-color LED. It is normally blue in case the device is not
currently controlled by software, or green if it is.
Setting this parameter from 0 to 100 changes the LED's relative brightness, in percents. This can be
useful, for example, in a lab with sensitive optics that would preferably be completely dark.
On the Analog Discovery 2, this setting has no effect.
"""
OnClose = 4
"""Define behavior on close.
Possible values:
* 0 — On close, continue.
* 1 — On close, stop the device.
* 2 — On close, shut down the device.
"""
AudioOut = 5
"""Enable or disable audio output.
Possible values:
* 0 — Disable audio output.
* 1 — Enable audio output.
This setting is implemented on the Analog Discovery and the Analog Discovery 2.
"""
UsbLimit = 6
"""USB power limit.
The value ranges from 0 to 1000, in mA. The value -1 denotes no limit.
This setting is implemented on the Analog Discovery and the Analog Discovery 2.
"""
AnalogOut = 7
"""Enable or disable analog output.
Possible values:
* 0 — Disable analog output.
* 1 — Enable analog output.
"""
Frequency = 8
"""This parameter is undocumented.
Todo:
The meaning of this parameter needs to be understood.
It is some frequency; comments say unit is Hz.
"""
ExtFreq = 9
"""This parameter is undocumented.
Todo:
The meaning of this parameter needs to be understood.
It is some frequency; comments say unit is Hz.
"""
ClockMode = 10
"""This parameter is undocumented.
Todo:
The meaning of this parameter needs to be understood.
Possible values:
* 0 — internal.
* 1 — output.
* 2 — input.
* 3 — IO.
"""
class DwfState(_FixReprMethod, enum.Enum):
"""Enumeration type for instrument state constants, for instruments that are controlled by an internal
state-machine.
The following instrument APIs are controlled by a state machine:
* |AnalogIn|
* |AnalogOut| — *independent state machine for each channel*
* |DigitalIn|
* |DigitalOut|
* |AnalogImpedance|
This type is used to return the current state from their *status()* methods.
Note:
The enumeration values :py:attr:`Triggered` and :py:attr:`Running` have identical integer values (3).
The state name :py:attr:`Triggered` is used for capture instruments (|AnalogIn|, |DigitalIn|),
while :py:attr:`Running` is used for signal generation instruments (|AnalogOut|, |DigitalOut|).
In the C API, this type is represented as an *unsigned char*.
"""
Ready = 0
"""The instrument is idle, waiting to be configured or started."""
Config = 4
"""The instrument is being configured."""
Prefill = 5
"""The instrument is collecting data prior to arming itself, so it can deliver pre-trigger samples."""
Armed = 1
"""The instrument is collecting samples and waiting for the trigger."""
Wait = 7
"""The signal generation instrument is waiting before its next run."""
Triggered = 3
"""The capture instrument is triggered and collecting data."""
Running = 3
"""The signal generation instrument is running (generating signals)."""
Done = 2
"""The instrument has completed a measurement or signal-generating sequence."""
@enum.unique
class DwfTriggerSource(_FixReprMethod, enum.Enum):
"""Enumeration type for trigger source constants.
This type is used by the |DeviceControl| functionality and by the |AnalogIn|, |AnalogOut|, |DigitalIn|,
and |DigitalOut| instruments.
In the C API, this type is called 'TRIGSRC', and it is represented as an *unsigned char*.
"""
None_ = 0
"""No trigger configured (device starts immediately)."""
PC = 1
"""PC (software) trigger."""
DetectorAnalogIn = 2
"""AnalogIn trigger detector."""
DetectorDigitalIn = 3
"""DigitalIn trigger detector."""
AnalogIn = 4
"""AnalogIn instrument start."""
DigitalIn = 5
"""DigitalIn instrument start."""
DigitalOut = 6
"""DigitalOut instrument start."""
AnalogOut1 = 7
"""|AnalogOut| instrument channel 1 start."""
AnalogOut2 = 8
"""|AnalogOut| instrument channel 2 start."""
AnalogOut3 = 9
"""|AnalogOut| instrument channel 3 start."""
AnalogOut4 = 10
"""|AnalogOut| instrument channel 4 start."""
External1 = 11
"""External trigger #1."""
External2 = 12
"""External trigger #2."""
External3 = 13
"""External trigger #3."""
External4 = 14
"""External trigger #4."""
High = 15
"""High (undocumented)."""
Low = 16
"""Low (undocumented)."""
Clock = 17
"""Clock (undocumented)."""
@enum.unique
class DwfTriggerSlope(_FixReprMethod, enum.Enum):
"""Enumeration type for trigger slope constants.
This type is used by the |AnalogIn|, |AnalogOut|, |DigitalIn|, and |DigitalOut| instruments to select
the trigger slope.
In addition, the |AnalogIn| instrument uses it to select the slope of the sampling clock.
In the C API, this type is represented as an *int*.
"""
Rise = 0
"""Rising trigger slope."""
Fall = 1
"""Falling trigger slope."""
Either = 2
"""Either rising or falling trigger slope."""
@enum.unique
class DwfAcquisitionMode(_FixReprMethod, enum.Enum):
"""Enumeration type for acquisition mode constants.
This type is used by the |AnalogIn| and |DigitalIn| instruments. These instruments
support multiple acquisition modes that are appropriate for different data
acquisition tasks.
In the C API, this type is called 'ACQMODE', and it is represented as an *int*.
"""
Single = 0
"""Perform a single buffer acquisition.
Re-arm the instrument for the next capture after the data is fetched to the host
using the instrument-specific *Status()* method.
Note:
The difference with the :py:attr:`Single1` mode is unclear.
"""
ScanShift = 1
"""Perform a continuous acquisition in FIFO style.
The trigger setting is ignored.
The last sample is at the end of the buffer.
The instrument's *statusSamplesValid()* method gives the number of the acquired samples,
which will increase until reaching the buffer size.
After that, the waveform image is shifted for every new sample.
"""
ScanScreen = 2
"""Perform continuous acquisition circularly writing samples into the buffer.
This is similar to a heart-monitor display.
The trigger setting is ignored.
The instrument's *statusIndexWrite()* method gives the buffer write position.
"""
Record = 3
"""Perform acquisition for the length of time set by the instrument's *recordLengthSet()* method."""
Overs = 4
"""Overscan mode (undocumented)."""
Single1 = 5
"""Perform a single buffer acquisition.
Note:
The difference with the :py:attr:`Single` mode is unclear.
"""
@enum.unique
class DwfAnalogInFilter(_FixReprMethod, enum.Enum):
"""Enumeration type for analog input filter constants.
This type is used by the |AnalogIn| instrument to select a filtering algorithm for the input and trigger channels.
The |AnalogIn| instrument's ADC always captures samples at the maximum possible rate. If data acquisition at
a lower sampling rate is requested, the resampling can be handled it several different ways.
The most obvious choice is *averaging*. This will suppress high-frequency noise, which is often a good thing,
but often it is also desirable to know that high-frequency noise is present in the signal, and the averaging
may hide that fact.
For that reason, the *decimation* filter is available, which simply selects a single sample captured at high
frequency when resampling to a lower frequency. The signal-to-noise ratio (SNR) will suffer, but the presence
of high-frequency noise (outliers) will be more easily seen in the resampled data.
Todo:
Examine the MinMax filter choice; it is not currently understood.
In the C API, this type is called 'FILTER', and it is represented as an *int*.
"""
Decimate = 0
"""Decimation filter."""
Average = 1
"""Averaging filter."""
MinMax = 2
"""Min/max filter."""
@enum.unique
class DwfAnalogInTriggerType(_FixReprMethod, enum.Enum):
"""Enumeration type for analog input trigger mode constants.
This type is used by the |AnalogIn| instrument to specify the trigger type.
In the C API, this type is called 'TRIGTYPE', and it is represented as an *int*.
"""
Edge = 0
"""Edge trigger type."""
Pulse = 1
"""Pulse trigger type."""
Transition = 2
"""Transition trigger type."""
Window = 3
"""Window trigger type."""
@enum.unique
class DwfAnalogInTriggerLengthCondition(_FixReprMethod, enum.Enum):
"""Enumeration type for analog input trigger length condition constants.
This type is used by the |AnalogIn| instrument to specify the trigger length condition.
In the C API, this type is called 'TRIGLEN', and it is represented as an *int*.
"""
Less = 0
"""Trigger length condition 'less'."""
Timeout = 1
"""Trigger length condition 'timeout'."""
More = 2
"""Trigger length condition 'more'."""
@enum.unique
class DwfAnalogOutFunction(_FixReprMethod, enum.Enum):
"""Enumeration type for analog output waveform-shape function constants.
This type is used by the |AnalogOut| instrument to represent the wave-shape produced on an
analog output channel node. The nine fixed waveform shape options are shown below.
.. only:: html
.. image:: /images/waveforms.gif
.. only:: latex
.. image:: /images/waveforms.pdf
In the C API, this type is called 'FUNC', and it is represented as an *unsigned char*.
"""
DC = 0
"""DC (constant signal) waveform shape. The signal level varies between -1 and 1."""
Sine = 1
"""Sinusoid waveform shape. The signal level varies between -1 and 1."""
Square = 2
"""Square waveform shape. The signal level varies between -1 and 1."""
Triangle = 3
"""Triangle waveform shape. The signal level varies between -1 and 1."""
RampUp = 4
"""Ramp Up waveform shape. The signal level varies between -1 and 1."""
RampDown = 5
"""Ramp Down waveform shape. The signal level varies between -1 and 1."""
Noise = 6
"""Noise waveform shape. The signal level is uniformly distributed between -1 and 1."""
Pulse = 7
"""Pulse waveform shape. The signal level varies between 0 and 1."""
Trapezium = 8
"""Trapezium waveform shape. The signal level varies between -1 and 1."""
SinePower = 9
"""Sinusoid Power waveform shape. The signal level varies between -1 and 1."""
Custom = 30
"""Custom (user-defined) waveform shape. The signal level varies between -1 and 1."""
Play = 31
"""Continuous playback mode. The signal level varies between -1 and 1."""
@enum.unique
class DwfAnalogOutNode(_FixReprMethod, enum.Enum):
"""Enumeration type for analog output node type constants.
This type is used by the |AnalogOut| instrument to represent the node types associated with each output channel.
In the C API, this type is called 'AnalogOutNode' (without the *Dwf* prefix), and it is represented as an *int*.
"""
Carrier = 0
"""Carrier signal node. This node represents the base signal without modulation applied."""
FM = 1
"""Frequency Modulation node."""
AM = 2
"""Amplitude Modulation node."""
@enum.unique
class DwfAnalogOutMode(_FixReprMethod, enum.Enum):
"""Enumeration type for analog out mode constants (voltage or current).
This type is used by the |AnalogOut| instrument to set or retrieve the mode of a channel.
In the C API, this type is represented as an *int*.
"""
Voltage = 0
"""Voltage mode."""
Current = 1
"""Current mode."""
@enum.unique
class DwfAnalogOutIdle(_FixReprMethod, enum.Enum):
"""Enumeration type for analog output idle state constants.
This type is used by the |AnalogOut| instrument to set the idle behavior of an output channel.
In the C API, this type is represented as an *int*.
"""
Disable = 0
"""When idle, disable the output."""
Offset = 1
"""When idle, drive the analog output offset."""
Initial = 2
"""When idle, drive the initial value of the selected waveform shape."""
@enum.unique
class DwfDigitalInClockSource(_FixReprMethod, enum.Enum):
"""Enumeration type for digital input clock source constants.
This type is used by the |DigitalIn| instrument to specify a clock source.
In the C API, this type is represented as an *int*.
"""
Internal = 0
"""Use internal clock source."""
External = 1
"""Use external clock source."""
External2 = 2
"""Use alternate external clock source."""
@enum.unique
class DwfDigitalInSampleMode(_FixReprMethod, enum.Enum):
"""Enumeration type for digital input sample mode constants.
This type is used by the |DigitalIn| instrument to specify a sample mode.
In the C API, this type is represented as an *int*.
"""
Simple = 0
"""Only digital samples (no noise)."""
Noise = 1
"""Alternate samples (noise, sample, noise, sample, …) where noise is more than one transition between two samples.
This setting is available when the sample rate is less than the maximum clock frequency (i.e., the divider is
greater than one). Digital noise can indicate glitches or ringing.
"""
@enum.unique
class DwfDigitalOutOutput(_FixReprMethod, enum.Enum):
"""Enumeration type for digital output mode constants.
This type is used by the |DigitalOut| instrument to specify the electronic behavior of a digital output channel.
In the C API, this type is represented as an *int*.
"""
PushPull = 0
"""Push/Pull."""
OpenDrain = 1
"""Open Drain."""
OpenSource = 2
"""Open Source."""
ThreeState = 3
"""Tristate (for custom and random)."""
@enum.unique
class DwfDigitalOutType(_FixReprMethod, enum.Enum):
"""Enumeration type for digital output type constants.
This type is used by the |DigitalOut| instrument to specify the behavior mode of a digital output channel.
In the C API, this type is represented as an *int*.
"""
Pulse = 0
"""Pulse output."""
Custom = 1
"""Custom output."""
Random = 2
"""Random output."""
ROM = 3
"""ROM (lookup table) output."""
State = 4
"""State machine output."""
Play = 5
"""Continuous playback output."""
@enum.unique
class DwfDigitalOutIdle(_FixReprMethod, enum.Enum):
"""Enumeration type for digital output idle mode constants.
This type is used primarily by the |DigitalOut| instrument to specify the idle behavior mode of a digital
output channel.
In addition to that, it is used by the |DigitalSpi| protocol functionality to specify the idle behavior
of the pins it controls.
In the C API, this type is represented as an *int*.
"""
Init = 0
"""Same as initial value of selected output pattern."""
Low = 1
"""Low signal level."""
High = 2
"""High signal level."""
Zet = 3
"""High impedance."""
@enum.unique
class DwfAnalogIO(_FixReprMethod, enum.Enum):
"""Enumeration type for Analog I/O channel node type constants.
This type is used by the |AnalogIO| functionality to report the node type.
In the C API, this type is called 'ANALOGIO', and it is represented as an *unsigned char*.
"""
Undocumented = 0
"""This value is returned in Analog Pro devices when using 3.16.3 of the DWF library. That is probably a bug."""
Enable = 1
"""The node represent an on/off switch."""
Voltage = 2
"""The node represents a voltage."""
Current = 3
"""The node represents a current."""
Power = 4
"""The node represents a power."""
Temperature = 5
"""The node represents a temperature."""
Dmm = 6
"""The node represents a DMM (digital multimeter) value."""
Range = 7
"""The node represents a range."""
Measure = 8
"""(unknown)"""
Time = 9
"""The node represents a time."""
Frequency = 10
"""The node represents a frequency."""
Resistance = 11
"""The node represents a resistance."""
@enum.unique
class DwfAnalogImpedance(_FixReprMethod, enum.Enum):
"""Enumeration type for analog impedance measurement types.
This type is used by the |AnalogImpedance| measurement functionality to specify a measurement quantity type.
In the C API, this type is represented as an *int*.
"""
Impedance = 0
"""Measure impedance, in Ohms."""
ImpedancePhase = 1
"""Measure impedance phase, in radians."""
Resistance = 2
"""Measure resistance, in Ohms."""
Reactance = 3
"""Measure reactance, in Ohms."""
Admittance = 4
"""Measure admittance, in Siemens."""
AdmittancePhase = 5
"""Measure admittance phase, in radians."""
Conductance = 6
"""Measure conductance, in Siemens."""
Susceptance = 7
"""Measure susceptance, in Siemens."""
SeriesCapacitance = 8
"""Measure series capacitance, in Farad."""
ParallelCapacitance = 9
"""Measure parallel capacitance, in Farad."""
SeriesInductance = 10
"""Measure series inductance, in Henry."""
ParallelInductance = 11
"""Measure parallel inductance, in Henry."""
Dissipation = 12
"""Measure dissipation, as a factor."""
Quality = 13
"""Measure quality, as a factor."""
Vrms = 14
"""Measure Vrms, in Volts."""
Vreal = 15
"""Measure Vreal (real part of complex voltage), in Volts."""
Vimag = 16
"""Measure Vimag (imaginary part of complex voltage), in Volts."""
Irms = 17
"""Measure Irms, in Amps."""
Ireal = 18
"""Measure Ireal (real part of complex current), in Amps."""
Iimag = 19
"""Measure Iimag (imaginary part of complex current), in Amps."""
@enum.unique
class DwfDmm(_FixReprMethod, enum.Enum):
"""Enumeration type for DMM (digital multimeter) measurements.
Note:
This type is currently unused in the API. It is intended for functionality in the new ADP5250 device.
In the C API, this type is called 'DwfDmm', and it is represented as an *int*.
"""
Resistance = 1
"""Resistance measurement."""
Continuity = 2
"""Continuity measurement."""
Diode = 3
"""Diode measurement."""
DCVoltage = 4
"""DC voltage measurement."""
ACVoltage = 5
"""AC voltage measurement."""
DCCurrent = 6
"""DC current measurement."""
ACCurrent = 7
"""AC current measurement."""
DCLowCurrent = 8
"""DC low current measurement."""
ACLowCurrent = 9
"""AC low current measurement."""
Temperature = 10
"""Temperature measurement."""
| 27.124623
| 154
| 0.666716
|
bebc326041fdb6d1b0586916a42face43bc25ad8
| 6,972
|
py
|
Python
|
train.py
|
zijundeng/DM2F-Net
|
d6f231f8a3f1a3c5ca877b30d8778b9bb9b76668
|
[
"MIT"
] | 30
|
2019-07-23T06:44:48.000Z
|
2021-03-27T16:18:55.000Z
|
train.py
|
zijundeng/DM2F-Net
|
d6f231f8a3f1a3c5ca877b30d8778b9bb9b76668
|
[
"MIT"
] | 3
|
2019-10-29T14:04:30.000Z
|
2022-03-30T10:27:08.000Z
|
train.py
|
zijundeng/DM2F-Net
|
d6f231f8a3f1a3c5ca877b30d8778b9bb9b76668
|
[
"MIT"
] | 6
|
2019-10-30T03:04:53.000Z
|
2022-01-02T14:28:20.000Z
|
# coding: utf-8
import argparse
import os
import datetime
from tqdm import tqdm
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from model import DM2FNet
from tools.config import TRAIN_ITS_ROOT, TEST_SOTS_ROOT
from datasets import ItsDataset, SotsDataset
from tools.utils import AvgMeter, check_mkdir
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
def parse_args():
parser = argparse.ArgumentParser(description='Train a DM2FNet')
parser.add_argument(
'--gpus', type=str, default='0', help='gpus to use ')
parser.add_argument('--ckpt-path', default='./ckpt', help='checkpoint path')
parser.add_argument(
'--exp-name',
default='RESIDE_ITS',
help='experiment name.')
args = parser.parse_args()
return args
cfgs = {
'use_physical': True,
'iter_num': 40000,
'train_batch_size': 16,
'last_iter': 0,
'lr': 5e-4,
'lr_decay': 0.9,
'weight_decay': 0,
'momentum': 0.9,
'snapshot': '',
'val_freq': 5000,
'crop_size': 256
}
def main():
net = DM2FNet().cuda().train()
# net = nn.DataParallel(net)
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters()
if name[-4:] == 'bias' and param.requires_grad],
'lr': 2 * cfgs['lr']},
{'params': [param for name, param in net.named_parameters()
if name[-4:] != 'bias' and param.requires_grad],
'lr': cfgs['lr'], 'weight_decay': cfgs['weight_decay']}
])
if len(cfgs['snapshot']) > 0:
print('training resumes from \'%s\'' % cfgs['snapshot'])
net.load_state_dict(torch.load(os.path.join(args.ckpt_path,
args.exp_name, cfgs['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(args.ckpt_path,
args.exp_name, cfgs['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * cfgs['lr']
optimizer.param_groups[1]['lr'] = cfgs['lr']
check_mkdir(args.ckpt_path)
check_mkdir(os.path.join(args.ckpt_path, args.exp_name))
open(log_path, 'w').write(str(cfgs) + '\n\n')
train(net, optimizer)
def train(net, optimizer):
curr_iter = cfgs['last_iter']
while curr_iter <= cfgs['iter_num']:
train_loss_record = AvgMeter()
loss_x_jf_record, loss_x_j0_record = AvgMeter(), AvgMeter()
loss_x_j1_record, loss_x_j2_record = AvgMeter(), AvgMeter()
loss_x_j3_record, loss_x_j4_record = AvgMeter(), AvgMeter()
loss_t_record, loss_a_record = AvgMeter(), AvgMeter()
for data in train_loader:
optimizer.param_groups[0]['lr'] = 2 * cfgs['lr'] * (1 - float(curr_iter) / cfgs['iter_num']) \
** cfgs['lr_decay']
optimizer.param_groups[1]['lr'] = cfgs['lr'] * (1 - float(curr_iter) / cfgs['iter_num']) \
** cfgs['lr_decay']
haze, gt_trans_map, gt_ato, gt, _ = data
batch_size = haze.size(0)
haze = haze.cuda()
gt_trans_map = gt_trans_map.cuda()
gt_ato = gt_ato.cuda()
gt = gt.cuda()
optimizer.zero_grad()
x_jf, x_j0, x_j1, x_j2, x_j3, x_j4, t, a = net(haze)
loss_x_jf = criterion(x_jf, gt)
loss_x_j0 = criterion(x_j0, gt)
loss_x_j1 = criterion(x_j1, gt)
loss_x_j2 = criterion(x_j2, gt)
loss_x_j3 = criterion(x_j3, gt)
loss_x_j4 = criterion(x_j4, gt)
loss_t = criterion(t, gt_trans_map)
loss_a = criterion(a, gt_ato)
loss = loss_x_jf + loss_x_j0 + loss_x_j1 + loss_x_j2 + loss_x_j3 + loss_x_j4 \
+ 10 * loss_t + loss_a
loss.backward()
optimizer.step()
# update recorder
train_loss_record.update(loss.item(), batch_size)
loss_x_jf_record.update(loss_x_jf.item(), batch_size)
loss_x_j0_record.update(loss_x_j0.item(), batch_size)
loss_x_j1_record.update(loss_x_j1.item(), batch_size)
loss_x_j2_record.update(loss_x_j2.item(), batch_size)
loss_x_j3_record.update(loss_x_j3.item(), batch_size)
loss_x_j4_record.update(loss_x_j4.item(), batch_size)
loss_t_record.update(loss_t.item(), batch_size)
loss_a_record.update(loss_a.item(), batch_size)
curr_iter += 1
log = '[iter %d], [train loss %.5f], [loss_x_fusion %.5f], [loss_x_phy %.5f], [loss_x_j1 %.5f], ' \
'[loss_x_j2 %.5f], [loss_x_j3 %.5f], [loss_x_j4 %.5f], [loss_t %.5f], [loss_a %.5f], ' \
'[lr %.13f]' % \
(curr_iter, train_loss_record.avg, loss_x_jf_record.avg, loss_x_j0_record.avg,
loss_x_j1_record.avg, loss_x_j2_record.avg, loss_x_j3_record.avg, loss_x_j4_record.avg,
loss_t_record.avg, loss_a_record.avg, optimizer.param_groups[1]['lr'])
print(log)
open(log_path, 'a').write(log + '\n')
if (curr_iter + 1) % cfgs['val_freq'] == 0:
validate(net, curr_iter, optimizer)
if curr_iter > cfgs['iter_num']:
break
def validate(net, curr_iter, optimizer):
print('validating...')
net.eval()
loss_record = AvgMeter()
with torch.no_grad():
for data in tqdm(val_loader):
haze, gt, _ = data
haze = haze.cuda()
gt = gt.cuda()
dehaze = net(haze)
loss = criterion(dehaze, gt)
loss_record.update(loss.item(), haze.size(0))
snapshot_name = 'iter_%d_loss_%.5f_lr_%.6f' % (curr_iter + 1, loss_record.avg, optimizer.param_groups[1]['lr'])
print('[validate]: [iter %d], [loss %.5f]' % (curr_iter + 1, loss_record.avg))
torch.save(net.state_dict(),
os.path.join(args.ckpt_path, args.exp_name, snapshot_name + '.pth'))
torch.save(optimizer.state_dict(),
os.path.join(args.ckpt_path, args.exp_name, snapshot_name + '_optim.pth'))
net.train()
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
cudnn.benchmark = True
torch.cuda.set_device(int(args.gpus))
train_dataset = ItsDataset(TRAIN_ITS_ROOT, True, cfgs['crop_size'])
train_loader = DataLoader(train_dataset, batch_size=cfgs['train_batch_size'], num_workers=4,
shuffle=True, drop_last=True)
val_dataset = SotsDataset(TEST_SOTS_ROOT)
val_loader = DataLoader(val_dataset, batch_size=8)
criterion = nn.L1Loss().cuda()
log_path = os.path.join(args.ckpt_path, args.exp_name, str(datetime.datetime.now()) + '.txt')
main()
| 34.686567
| 115
| 0.590361
|
9142b7ea35029a0e278ff84ce14454906bc2d967
| 1,245
|
py
|
Python
|
mfile/urls.py
|
vikifox/CMDB
|
bac9b7da204c3eee344f55bb2187df38ef3b3d4c
|
[
"Apache-2.0"
] | 16
|
2020-08-13T04:28:50.000Z
|
2021-06-10T06:24:51.000Z
|
mfile/urls.py
|
vikifox/CMDB
|
bac9b7da204c3eee344f55bb2187df38ef3b3d4c
|
[
"Apache-2.0"
] | 1
|
2019-04-15T07:01:42.000Z
|
2019-04-15T07:01:42.000Z
|
mfile/urls.py
|
vikifox/CMDB
|
bac9b7da204c3eee344f55bb2187df38ef3b3d4c
|
[
"Apache-2.0"
] | 2
|
2018-12-05T09:51:34.000Z
|
2019-12-16T16:26:41.000Z
|
"""example_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth.views import LoginView,LogoutView
from mfile.views import finder
from django.views.static import serve
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^$',finder.as_view(),name='mfile'),
url(r'^elfinder/',include('elfinder.urls'))
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += [
url(r'^media/(?P<path>.*)$', serve, { 'document_root': settings.MEDIA_ROOT, }),
]
| 36.617647
| 87
| 0.719679
|
0e06325f4bcdcc1a7d72eb5ff1f9989d17fba35c
| 8,374
|
py
|
Python
|
fpn/train_end2end.py
|
jyqi/mx-viddet
|
af89ed736362fecdd1ee5e6584d9b221c59ff8c4
|
[
"MIT"
] | null | null | null |
fpn/train_end2end.py
|
jyqi/mx-viddet
|
af89ed736362fecdd1ee5e6584d9b221c59ff8c4
|
[
"MIT"
] | null | null | null |
fpn/train_end2end.py
|
jyqi/mx-viddet
|
af89ed736362fecdd1ee5e6584d9b221c59ff8c4
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Haozhi Qi
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import _init_paths
import cv2
import argparse
import pprint
import os
import sys
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Train Faster-RCNN network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--frequent', help='frequency of logging', default=config.default.frequent, type=int)
args = parser.parse_args()
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import shutil
import numpy as np
import mxnet as mx
from mxboard import SummaryWriter
from symbols import *
from core.loader import PyramidAnchorIterator
from core import callback, metric
from core.module import MutableModule
from utils.create_logger import create_logger
from utils.load_data import load_gt_roidb, merge_roidb, filter_roidb
from utils.load_model import load_param
from utils.PrefetchingIter import PrefetchingIter
from utils.lr_scheduler import WarmupMultiFactorScheduler
def train_net(args, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step):
mx.random.seed(3)
np.random.seed(3)
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
prefix = os.path.join(final_output_path, prefix)
tboardlog = SummaryWriter(logdir=os.path.join(final_output_path, 'tb'), flush_secs=5)
# load symbol
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), final_output_path)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=True)
feat_pyramid_level = np.log2(config.network.RPN_FEAT_STRIDE).astype(int)
feat_sym = [sym.get_internals()['rpn_cls_score_p' + str(x) + '_output'] for x in feat_pyramid_level]
# setup multi-gpu
batch_size = len(ctx)
input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size
# print config
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# load dataset and prepare imdb for training
image_sets = [iset for iset in config.dataset.image_set.split('+')]
roidbs = [load_gt_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path,
flip=config.TRAIN.FLIP)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, config)
# load training data
train_data = PyramidAnchorIterator(feat_sym, roidb, config, batch_size=input_batch_size, shuffle=config.TRAIN.SHUFFLE,
ctx=ctx, feat_strides=config.network.RPN_FEAT_STRIDE, anchor_scales=config.network.ANCHOR_SCALES,
anchor_ratios=config.network.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING,
allowed_border=np.inf)
# infer max shape
max_data_shape = [('data', (config.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
max_data_shape.append(('gt_boxes', (config.TRAIN.BATCH_IMAGES, 100, 5)))
print 'providing maximum shape', max_data_shape, max_label_shape
data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single)
pprint.pprint(data_shape_dict)
sym_instance.infer_shape(data_shape_dict)
# load and initialize params
if config.TRAIN.RESUME:
print('continue training from ', begin_epoch)
arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
else:
arg_params, aux_params = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight(config, arg_params, aux_params)
# check parameter shapes
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
# create solver
fixed_param_prefix = config.network.FIXED_PARAMS
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
mod = MutableModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)],
max_label_shapes=[max_label_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix)
if config.TRAIN.RESUME:
mod._preload_opt_states = '%s-%04d.states'%(prefix, begin_epoch)
# decide training params
# metric
rpn_eval_metric = metric.RPNAccMetric()
rpn_cls_metric = metric.RPNLogLossMetric()
rpn_bbox_metric = metric.RPNL1LossMetric()
rpn_fg_metric = metric.RPNFGFraction(config)
eval_metric = metric.RCNNAccMetric(config)
eval_fg_metric = metric.RCNNFGAccuracy(config)
cls_metric = metric.RCNNLogLossMetric(config)
bbox_metric = metric.RCNNL1LossMetric(config)
eval_metrics = mx.metric.CompositeEvalMetric()
# rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric
for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, rpn_fg_metric, eval_fg_metric, eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
# batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=args.frequent)
batch_end_callback = [callback.Speedometer(train_data.batch_size, frequent=args.frequent),
callback.MXBoard(tboardlog, frequent=100)]
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
# decide learning rate
base_lr = lr
lr_factor = config.TRAIN.lr_factor
lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, config.TRAIN.warmup, config.TRAIN.warmup_lr, config.TRAIN.warmup_step)
# optimizer
optimizer_params = {'momentum': config.TRAIN.momentum,
'wd': config.TRAIN.wd,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'clip_gradient': None}
#
if not isinstance(train_data, PrefetchingIter):
train_data = PrefetchingIter(train_data)
# train
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback, kvstore=config.default.kvstore,
optimizer='sgd', optimizer_params=optimizer_params,
arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
def main():
print('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
train_net(args, ctx, config.network.pretrained, config.network.pretrained_epoch, config.TRAIN.model_prefix,
config.TRAIN.begin_epoch, config.TRAIN.end_epoch, config.TRAIN.lr, config.TRAIN.lr_step)
if __name__ == '__main__':
main()
| 45.264865
| 152
| 0.709219
|
c1cd6be83de1edf00e5353689a3e01683e0f8b0c
| 1,379
|
py
|
Python
|
dialogue-engine/test/programytest/parser/test_factory.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 104
|
2020-03-30T09:40:00.000Z
|
2022-03-06T22:34:25.000Z
|
dialogue-engine/test/programytest/parser/test_factory.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 25
|
2020-06-12T01:36:35.000Z
|
2022-02-19T07:30:44.000Z
|
dialogue-engine/test/programytest/parser/test_factory.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 10
|
2020-04-02T23:43:56.000Z
|
2021-05-14T13:47:01.000Z
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.parser.factory import NodeFactory
class FactoryTests(unittest.TestCase):
def test_init(self):
factory = NodeFactory("Test")
self.assertIsNotNone(factory)
self.assertEqual({}, factory._nodes_config)
self.assertEqual("Test", factory._type)
| 49.25
| 126
| 0.778825
|
1895bcc815ab09c601263457f04cb70db3750ef1
| 209
|
py
|
Python
|
tint/testing/sample_files.py
|
Cxl1013/TINT
|
479c5899713d149003096fbbc900b3c52793394d
|
[
"BSD-2-Clause"
] | 53
|
2017-12-11T14:27:19.000Z
|
2021-11-03T06:16:44.000Z
|
tint/testing/sample_files.py
|
Cxl1013/TINT
|
479c5899713d149003096fbbc900b3c52793394d
|
[
"BSD-2-Clause"
] | 14
|
2017-09-25T16:08:46.000Z
|
2021-11-30T19:25:12.000Z
|
tint/testing/sample_files.py
|
Cxl1013/TINT
|
479c5899713d149003096fbbc900b3c52793394d
|
[
"BSD-2-Clause"
] | 38
|
2017-07-24T13:49:06.000Z
|
2021-11-03T06:16:43.000Z
|
"""
============
Sample Files
============
Sample files for unit tests.
"""
import os
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
SAMPLE_GRID_FILE = os.path.join(DATA_PATH, 'test_grid.nc')
| 14.928571
| 59
| 0.631579
|
34d5eafd9e629f1bec348aad49e25ac6a77c8a23
| 4,277
|
py
|
Python
|
benchmark/startQiskit_noisy3047.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy3047.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy3047.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=41
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=31
prog.cz(input_qubit[2],input_qubit[1]) # number=32
prog.h(input_qubit[1]) # number=33
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=24
prog.cz(input_qubit[3],input_qubit[0]) # number=25
prog.h(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.z(input_qubit[3]) # number=29
prog.cx(input_qubit[3],input_qubit[0]) # number=30
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[2]) # number=37
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3047.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.772358
| 140
| 0.653963
|
93cc02a61fcbf1e0ecc0596f34d2f996a33043ee
| 231
|
py
|
Python
|
offline_install/portalocker-2.0.0/portalocker/__about__.py
|
weisenpan/detectron2-master
|
a8e0954c414d63fa09e54aeae60e31aa437acaa3
|
[
"Apache-2.0"
] | null | null | null |
offline_install/portalocker-2.0.0/portalocker/__about__.py
|
weisenpan/detectron2-master
|
a8e0954c414d63fa09e54aeae60e31aa437acaa3
|
[
"Apache-2.0"
] | null | null | null |
offline_install/portalocker-2.0.0/portalocker/__about__.py
|
weisenpan/detectron2-master
|
a8e0954c414d63fa09e54aeae60e31aa437acaa3
|
[
"Apache-2.0"
] | null | null | null |
__package_name__ = 'portalocker'
__author__ = 'Rick van Hattem'
__email__ = 'wolph@wol.ph'
__version__ = '2.0.0'
__description__ = '''Wraps the portalocker recipe for easy usage'''
__url__ = 'https://github.com/WoLpH/portalocker'
| 28.875
| 67
| 0.744589
|
e26e4a1989fd12a46da08aa9ff0d7dfe330613ee
| 881
|
py
|
Python
|
tasks.py
|
lixiang2017/patchwork
|
d653591cc8130765978a91d02c4cd108e76cca06
|
[
"BSD-2-Clause"
] | 157
|
2015-01-07T06:48:34.000Z
|
2022-03-21T10:04:55.000Z
|
tasks.py
|
lixiang2017/patchwork
|
d653591cc8130765978a91d02c4cd108e76cca06
|
[
"BSD-2-Clause"
] | 23
|
2015-08-12T06:34:56.000Z
|
2021-09-13T19:25:31.000Z
|
tasks.py
|
lixiang2017/patchwork
|
d653591cc8130765978a91d02c4cd108e76cca06
|
[
"BSD-2-Clause"
] | 43
|
2016-01-05T05:08:27.000Z
|
2021-11-11T13:11:12.000Z
|
from importlib import import_module
from invocations import docs, travis
from invocations.checks import blacken
from invocations.packaging import release
from invocations.pytest import test, coverage
from invoke import Collection, task
@task
def sanity(c):
"""
Quick sanity check to ensure we're installed successfully. Mostly for CI.
"""
# Doesn't need to literally import everything, but "a handful" will do.
for name in ("environment", "files", "transfers"):
mod = "patchwork.{}".format(name)
import_module(mod)
print("Imported {} successfully".format(mod))
ns = Collection(docs, release, travis, test, coverage, sanity, blacken)
ns.configure(
{
"packaging": {
"sign": True,
"wheel": True,
"check_desc": True,
"changelog_file": "docs/changelog.rst",
}
}
)
| 25.911765
| 77
| 0.649262
|
d57013c3a422214c3904f24a839a29eb6d78ce1b
| 2,570
|
py
|
Python
|
cherrypy/test/test_wsgi_ns.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | 2
|
2020-12-28T22:37:45.000Z
|
2021-01-23T18:04:46.000Z
|
cherrypy/test/test_wsgi_ns.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | null | null | null |
cherrypy/test/test_wsgi_ns.py
|
seshness/bearlol
|
ff89bc1e66a96b6e55538a5cee38370e08e5b682
|
[
"BSD-3-Clause"
] | null | null | null |
import cherrypy
from cherrypy.test import helper
class WSGI_Namespace_Test(helper.CPWebCase):
def setup_server():
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
def next(self):
return self.iter.next()
def close(self):
if hasattr(self.appresults, "close"):
self.appresults.close()
class ChangeCase(object):
def __init__(self, app, to=None):
self.app = app
self.to = to
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class CaseResults(WSGIResponse):
def next(this):
return getattr(this.iter.next(), self.to)()
return CaseResults(res)
class Replacer(object):
def __init__(self, app, map={}):
self.app = app
self.map = map
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class ReplaceResults(WSGIResponse):
def next(this):
line = this.iter.next()
for k, v in self.map.iteritems():
line = line.replace(k, v)
return line
return ReplaceResults(res)
class Root(object):
def index(self):
return "HellO WoRlD!"
index.exposed = True
root_conf = {'wsgi.pipeline': [('replace', Replacer)],
'wsgi.replace.map': {'L': 'X', 'l': 'r'},
}
app = cherrypy.Application(Root())
app.wsgiapp.pipeline.append(('changecase', ChangeCase))
app.wsgiapp.config['changecase'] = {'to': 'upper'}
cherrypy.tree.mount(app, config={'/': root_conf})
setup_server = staticmethod(setup_server)
def test_pipeline(self):
if not cherrypy.server.httpserver:
return self.skip()
self.getPage("/")
# If body is "HEXXO WORXD!", the middleware was applied out of order.
self.assertBody("HERRO WORRD!")
| 31.728395
| 77
| 0.473541
|
abb8f1549151fa72494ab62b8e83933a0bdc8dba
| 19,498
|
py
|
Python
|
venv/lib/python3.8/site-packages/_pytest/terminal.py
|
LachlanAttwood/ConvertMe
|
da544b5cacead3213dab76e9e716222b011d7688
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/_pytest/terminal.py
|
LachlanAttwood/ConvertMe
|
da544b5cacead3213dab76e9e716222b011d7688
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/_pytest/terminal.py
|
LachlanAttwood/ConvertMe
|
da544b5cacead3213dab76e9e716222b011d7688
|
[
"MIT"
] | null | null | null |
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import pytest
import py
import sys
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default=None, metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed (w)warnings.")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--report',
action="store", dest="report", default=None, metavar="opts",
help="(deprecated, use -r)")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
optvalue = config.option.report
if optvalue:
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
file=py.std.sys.stderr)
if optvalue:
for setting in optvalue.split(","):
setting = setting.strip()
if setting == "skipped":
reportopts += "s"
elif setting == "xfailed":
reportopts += "x"
reportchars = config.option.reportchars
if reportchars:
for char in reportchars:
if char not in reportopts:
reportopts += char
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
def __init__(self, code, message, nodeid=None, fslocation=None):
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
class TerminalReporter:
def __init__(self, config, file=None):
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = self.curdir = py.path.local()
if file is None:
file = py.std.sys.stdout
self._tw = self.writer = py.io.TerminalWriter(file)
if self.config.option.color == 'yes':
self._tw.hasmarkup = True
if self.config.option.color == 'no':
self._tw.hasmarkup = False
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, fspath, res):
if fspath != self.currentfspath:
self.currentfspath = fspath
#fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
#relpath = self.startdir.bestrelpath(fspath)
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
line = str(line)
self._tw.write("\r" + line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
fspath = nodeid.split("::")[0]
if self.showlongtestinfo:
line = self._locationline(fspath, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
self.write_fspath_result(fspath, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.fspath, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green':True}
elif rep.failed:
markup = {'red':True}
elif rep.skipped:
markup = {'yellow':True}
line = self._locationline(str(rep.fspath), *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
#self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.hasmarkup and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.hasmarkup:
#self.write_fspath_result(report.fspath, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " items"
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.hasmarkup:
if final:
line += " \n"
self.rewrite(line, bold=True)
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.mark.trylast
def pytest_sessionstart(self, session):
self._sessionstarttime = py.std.time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = ".".join(map(str, sys.version_info[:3]))
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += " -- py-%s -- pytest-%s" % (py.__version__, pytest.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
plugininfo = config.pluginmanager._plugin_distinfo
if plugininfo:
l = []
for dist, plugin in plugininfo:
name = dist.project_name
if name.startswith("pytest-"):
name = name[7:]
l.append(name)
return "plugins: %s" % ", ".join(l)
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
if not self.showheader:
return
#for i, testarg in enumerate(self.config.args):
# self.write_line("test path %d: %s" %(i+1, testarg))
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
#if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
def pytest_sessionfinish(self, exitstatus, __multicall__):
__multicall__.execute()
self._tw.line("")
if exitstatus in (0, 1, 2, 4):
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.config.hook.pytest_terminal_summary(terminalreporter=self)
if exitstatus == 2:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, collect_fspath, fspath, lineno, domain):
# collect_fspath comes from testid which has a "/"-normalized path
if fspath and fspath.replace("\\", "/") != collect_fspath:
fspath = "%s <- %s" % (collect_fspath, fspath)
if fspath:
line = str(fspath)
if lineno is not None:
lineno += 1
line += "@" + str(lineno)
if domain:
split = str(domain).split('[')
split[0] = split[0].replace('.', '::') # don't replace '.' in params
line += "::" + '['.join(split)
else:
line = "[location]"
return line + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
l = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
l.append(x)
return l
def summary_warnings(self):
if self.hasopt("w"):
warnings = self.stats.get("warnings")
if not warnings:
return
self.write_sep("=", "warning summary")
for w in warnings:
self._tw.line("W%s %s %s" % (w.code,
w.fslocation, w.message))
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = py.std.time.time() - self._sessionstarttime
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings").split()
for key in self.stats.keys():
if key not in keys:
keys.append(key)
parts = []
for key in keys:
if key: # setup/teardown reports have an empty key, ignore them
val = self.stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
line = ", ".join(parts)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {'bold': True}
if 'failed' in self.stats or 'error' in self.stats:
markup = {'red': True, 'bold': True}
else:
markup = {'green': True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
l = []
k = self.config.option.keyword
if k:
l.append("-k%s" % k)
m = self.config.option.markexpr
if m:
l.append("-m %r" % m)
if l:
self.write_sep("=", "%d tests deselected by %r" % (
len(self.stats['deselected']), " ".join(l)), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(l):
for x in l:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
| 36.92803
| 85
| 0.544364
|
6c1262fbdb6d9df782631aa31451550c92bb7ef2
| 6,569
|
py
|
Python
|
setup.py
|
giammi56/iminuit
|
afd9f4d48d8e9500875f992f663f1a2f609b9575
|
[
"MIT"
] | null | null | null |
setup.py
|
giammi56/iminuit
|
afd9f4d48d8e9500875f992f663f1a2f609b9575
|
[
"MIT"
] | null | null | null |
setup.py
|
giammi56/iminuit
|
afd9f4d48d8e9500875f992f663f1a2f609b9575
|
[
"MIT"
] | null | null | null |
# Use CFLAGS="-g -Og -DDEBUG" python setup.py ... for debugging
import os
import platform
from os.path import dirname, join, exists
from glob import glob
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.ccompiler import CCompiler
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import MSVCCompiler
import distutils.ccompiler
extra_flags = []
if bool(os.environ.get("COVERAGE", False)):
extra_flags += ["--coverage"]
if platform.system() == "Darwin":
extra_flags += ["-stdlib=libc++"]
# turn off warnings raised by Minuit and generated Cython code that need
# to be fixed in the original code bases of Minuit and Cython
compiler_opts = {
CCompiler: {},
UnixCCompiler: {
"extra_compile_args": [
"-std=c++11",
"-Wno-shorten-64-to-32",
"-Wno-parentheses",
"-Wno-unused-variable",
"-Wno-sign-compare",
"-Wno-cpp", # suppresses #warnings from numpy
"-Wno-deprecated-declarations",
]
+ extra_flags,
"extra_link_args": extra_flags,
},
MSVCCompiler: {"extra_compile_args": ["/EHsc"]},
}
class SmartBuildExt(build_ext):
def build_extensions(self):
c = self.compiler
opts = [v for k, v in compiler_opts.items() if isinstance(c, k)]
for e in self.extensions:
for o in opts:
for attrib, value in o.items():
getattr(e, attrib).extend(value)
build_ext.build_extensions(self)
# prevent setup from recompiling static Minuit2 code again and again
def lazy_compile(
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
pp_opts += compiler_opts.get(self, {}).get("extra_compile_args", [])
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if not exists(obj) or os.stat(obj).st_mtime < os.stat(src).st_mtime:
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
return objects
# monkey-patching lazy_compile into CCompiler
distutils.ccompiler.CCompiler.compile = lazy_compile
# Static linking
cwd = dirname(__file__)
# We follow the recommendation how to distribute Cython modules:
# http://docs.cython.org/src/reference/compilation.html#distributing-cython-modules
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
if exists("src/iminuit/_libiminuit.cpp"):
print("Cython is not available ... using pre-generated cpp file.")
else:
raise SystemExit(
"Looks like you are compiling iminuit from sources. "
"This requires Cython. Run\n\n"
" pip install cython\n\n"
"for a system-wide installation, or\n\n"
" pip install --user cython\n\n"
"for a user-wide installation."
)
ext = ".pyx" if USE_CYTHON else ".cpp"
try:
import numpy
numpy_header = [numpy.get_include()]
except ImportError:
numpy_header = []
# Install missing Minuit2 submodule as needed
if not os.listdir(join(cwd, "extern/Minuit2")):
try:
import subprocess as subp
print("Minuit2 submodule is missing, attempting download...")
subp.check_call(["git", "submodule", "update"])
except subp.CalledProcessError:
raise SystemExit(
"Could not download Minuit2 submodule, run `git submodule update` manually"
)
minuit2_cxx = [
join(cwd, "extern/Minuit2/src", x) + ".cxx"
for x in open(join(cwd, "minuit2_cxx.lst"), "r").read().split("\n")
if x
]
libiminuit = Extension(
"iminuit._libiminuit",
sources=sorted(glob(join(cwd, "src/iminuit/*" + ext)) + minuit2_cxx),
include_dirs=[join(cwd, "extern/Minuit2/inc")] + numpy_header,
define_macros=[
("WARNINGMSG", "1"),
("ROOT_Math_VecTypes", "1"),
("MATH_NO_PLUGIN_MANAGER", "1"),
],
)
extensions = [libiminuit]
if USE_CYTHON:
extensions = cythonize(extensions)
# Getting the version number at this point is a bit tricky in Python:
# https://packaging.python.org/guides/single-sourcing-package-version/?highlight=single%20sourcing
with open(join(cwd, "src/iminuit/version.py")) as fp:
version = {}
exec(fp.read(), version) # this loads __version__
version = version["__version__"]
with open(join(cwd, "README.rst")) as readme_rst:
txt = readme_rst.read()
# skip everything up to the skip marker
skip_marker = ".. skip-marker-do-not-remove"
long_description = txt[txt.index(skip_marker) + len(skip_marker) :].lstrip()
setup(
name="iminuit",
version=version,
description="Jupyter-friendly Python frontend for MINUIT2 in C++",
long_description=long_description,
long_description_content_type="text/x-rst",
author="Piti Ongmongkolkul and the iminuit team",
maintainer="Hans Dembinski",
maintainer_email="hans.dembinski@gmail.com",
url="http://github.com/scikit-hep/iminuit",
project_urls={
"Documentation": "https://iminuit.readthedocs.io",
"Source Code": "http://github.com/scikit-hep/iminuit",
},
packages=["iminuit", "iminuit.tests"],
package_dir={"": "src"},
ext_modules=extensions,
python_requires=">=3.5",
install_requires=["numpy>=1.11.3"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: C++",
"Programming Language :: Cython",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"License :: OSI Approved :: MIT License",
],
cmdclass={"build_ext": SmartBuildExt},
)
| 32.359606
| 98
| 0.650023
|
057b5ac645c39da8f8ce5be9de53053ce1161ee0
| 6,229
|
py
|
Python
|
apps/eth/migrations/0001_squashed_0005_transactionreceipt_evm_hash.py
|
kevingduck/transmission
|
c29ae529c02c885cdb0e64a35d7d4750ab1b8001
|
[
"Apache-2.0"
] | 19
|
2018-09-04T14:49:01.000Z
|
2020-06-09T22:13:10.000Z
|
apps/eth/migrations/0001_squashed_0005_transactionreceipt_evm_hash.py
|
kevingduck/transmission
|
c29ae529c02c885cdb0e64a35d7d4750ab1b8001
|
[
"Apache-2.0"
] | 50
|
2018-09-18T17:28:57.000Z
|
2021-01-09T16:18:45.000Z
|
apps/eth/migrations/0001_squashed_0005_transactionreceipt_evm_hash.py
|
kevingduck/transmission
|
c29ae529c02c885cdb0e64a35d7d4750ab1b8001
|
[
"Apache-2.0"
] | 4
|
2019-12-15T13:44:18.000Z
|
2021-06-09T20:39:54.000Z
|
# Generated by Django 3.0.8 on 2020-10-08 22:05
import apps.eth.fields
import django.core.validators
from django.db import migrations, models
import django.db.migrations.operations.special
import django.db.models.deletion
import django_extensions.db.fields.json
import shipchain_common.utils
class Migration(migrations.Migration):
replaces = [('eth', '0001_squashed_091919'), ('eth', '0002_nullable_fields'), ('eth', '0003_deduplicate_events'), ('eth', '0004_event_unique_constraint'), ('eth', '0005_transactionreceipt_evm_hash')]
initial = True
dependencies = [
('shipments', '0001_squashed_091919'),
('jobs', '0001_squashed_091919'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='EthAction',
fields=[
('transaction_hash', apps.eth.fields.HashField(default='', max_length=66, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')])),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('async_job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobs.AsyncJob')),
('shipment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shipments.Shipment')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.CharField(default=shipchain_common.utils.random_id, max_length=36, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('address', apps.eth.fields.AddressField(default='0x0', max_length=42, validators=[django.core.validators.RegexValidator(message='Invalid address.', regex='^0x([A-Fa-f0-9]{40})$')])),
('block_number', models.BigIntegerField()),
('transaction_hash', apps.eth.fields.HashField(default='', max_length=66, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')])),
('transaction_index', models.IntegerField()),
('block_hash', apps.eth.fields.HashField(default='', max_length=66, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')])),
('log_index', models.IntegerField()),
('removed', models.BooleanField()),
('event_id', models.CharField(max_length=514)),
('return_values', django_extensions.db.fields.json.JSONField(default=dict)),
('event_name', models.CharField(max_length=514)),
('signature', apps.eth.fields.HashField(default='', max_length=66, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')])),
('raw', django_extensions.db.fields.json.JSONField(default=dict)),
('eth_action', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='eth.EthAction')),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('eth_action', models.OneToOneField(db_column='hash', on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='eth.EthAction')),
('nonce', models.CharField(max_length=32)),
('to_address', apps.eth.fields.AddressField(default='0x0', max_length=42, validators=[django.core.validators.RegexValidator(message='Invalid address.', regex='^0x([A-Fa-f0-9]{40})$')])),
('value', models.CharField(max_length=32)),
('gas_limit', models.CharField(max_length=32)),
('gas_price', models.CharField(max_length=32)),
('data', models.TextField()),
('chain_id', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='TransactionReceipt',
fields=[
('block_hash', apps.eth.fields.HashField(default='', max_length=66, null=True, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')])),
('block_number', models.BigIntegerField(null=True)),
('contract_address', apps.eth.fields.AddressField(default='0x0', max_length=42, null=True, validators=[django.core.validators.RegexValidator(message='Invalid address.', regex='^0x([A-Fa-f0-9]{40})$')])),
('cumulative_gas_used', models.IntegerField(null=True)),
('from_address', apps.eth.fields.AddressField(default='0x0', max_length=42, validators=[django.core.validators.RegexValidator(message='Invalid address.', regex='^0x([A-Fa-f0-9]{40})$')])),
('gas_used', models.IntegerField(null=True)),
('logs', django_extensions.db.fields.json.JSONField(default=dict, null=True)),
('logs_bloom', models.CharField(max_length=514, null=True)),
('status', models.BooleanField(null=True)),
('to_address', apps.eth.fields.AddressField(default='0x0', max_length=42, null=True, validators=[django.core.validators.RegexValidator(message='Invalid address.', regex='^0x([A-Fa-f0-9]{40})$')])),
('eth_action', models.OneToOneField(db_column='transaction_hash', on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='eth.EthAction')),
('transaction_index', models.IntegerField(null=True)),
],
),
migrations.AddConstraint(
model_name='event',
constraint=models.UniqueConstraint(fields=('eth_action', 'log_index'), name='unique event'),
),
migrations.AddField(
model_name='transactionreceipt',
name='evm_hash',
field=apps.eth.fields.HashField(default='', max_length=66, null=True, validators=[django.core.validators.RegexValidator(message='Invalid hash.', regex='^0x([A-Fa-f0-9]{64})$')]),
),
]
| 65.568421
| 234
| 0.632686
|
89e9e6c7f00a5a24a4065c8166003486c11ca018
| 9,567
|
py
|
Python
|
myven/lib/python3.8/site-packages/ansible/modules/network/f5/bigip_traffic_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1
|
2021-04-02T08:08:39.000Z
|
2021-04-02T08:08:39.000Z
|
myven/lib/python3.8/site-packages/ansible/modules/network/f5/bigip_traffic_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | null | null | null |
myven/lib/python3.8/site-packages/ansible/modules/network/f5/bigip_traffic_group.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1
|
2020-05-03T01:13:16.000Z
|
2020-05-03T01:13:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_traffic_group
short_description: Manages traffic groups on BIG-IP
description:
- Supports managing traffic groups and their attributes on a BIG-IP.
version_added: "2.5"
options:
name:
description:
- The name of the traffic group.
required: True
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the traffic group exists.
- When C(absent), ensures the traffic group is removed.
default: present
choices:
- present
- absent
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a traffic group
bigip_traffic_group:
name: foo
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
]
updatables = [
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def partition(self):
raise F5ModuleError(
"Partition cannot be changed for a traffic group. Only /Common is allowed."
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.cm.traffic_groups.traffic_group.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.partition.lower().strip('/') != 'common':
raise F5ModuleError(
"Traffic groups can only be created in the /Common partition"
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.cm.traffic_groups.traffic_group.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.cm.traffic_groups.traffic_group.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.cm.traffic_groups.traffic_group.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.cm.traffic_groups.traffic_group.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 28.221239
| 91
| 0.621616
|
d814f7b0291017950bb93fd73c29647f71579dc2
| 2,556
|
py
|
Python
|
tests/unit/test_models.py
|
ixc/wagtail-personalisation
|
956c1bf4f5846ad86470c41df8b8364bc99ab99b
|
[
"MIT"
] | null | null | null |
tests/unit/test_models.py
|
ixc/wagtail-personalisation
|
956c1bf4f5846ad86470c41df8b8364bc99ab99b
|
[
"MIT"
] | 2
|
2021-03-11T01:25:00.000Z
|
2022-02-10T23:17:24.000Z
|
tests/unit/test_models.py
|
ixc/wagtail-personalisation
|
956c1bf4f5846ad86470c41df8b8364bc99ab99b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import datetime
import pytest
from django.db.models import ProtectedError
from tests.factories.page import ContentPageFactory
from tests.factories.segment import SegmentFactory
from tests.site.pages import models
from wagtail_personalisation.models import PersonalisablePageMetadata
from wagtail_personalisation.rules import TimeRule
@pytest.mark.django_db
def test_segment_create():
segment = SegmentFactory()
TimeRule(
start_time=datetime.time(8, 0, 0),
end_time=datetime.time(23, 0, 0),
segment=segment)
@pytest.mark.django_db
def test_metadata_page_has_variants(segmented_page):
assert not segmented_page.personalisation_metadata.is_canonical
assert not segmented_page.personalisation_metadata.has_variants
canonical = segmented_page.personalisation_metadata.canonical_page
assert canonical.personalisation_metadata.is_canonical
assert canonical.personalisation_metadata.has_variants
@pytest.mark.django_db
def test_content_page_model():
page = ContentPageFactory()
qs = models.ContentPage.objects.all()
assert page in qs
@pytest.mark.django_db
def test_variant_can_be_deleted_without_error(segmented_page):
segmented_page.delete()
# Make sure the metadata gets deleted because of models.CASCADE.
with pytest.raises(PersonalisablePageMetadata.DoesNotExist):
segmented_page._personalisable_page_metadata.refresh_from_db()
@pytest.mark.django_db
def test_canonical_page_deletion_is_protected(segmented_page):
# When deleting canonical page without deleting variants, it should return
# an error. All variants should be deleted beforehand.
with pytest.raises(ProtectedError):
segmented_page.personalisation_metadata.canonical_page.delete()
@pytest.mark.django_db
def test_page_protection_when_deleting_segment(segmented_page):
segment = segmented_page.personalisation_metadata.segment
assert len(segment.get_used_pages())
with pytest.raises(ProtectedError):
segment.delete()
@pytest.mark.django_db
def test_sitemap_generation_for_canonical_pages_is_enabled(segmented_page):
canonical = segmented_page.personalisation_metadata.canonical_page
assert canonical.personalisation_metadata.is_canonical
assert canonical.get_sitemap_urls()
@pytest.mark.django_db
def test_sitemap_generation_for_variants_is_disabled(segmented_page):
assert not segmented_page.personalisation_metadata.is_canonical
assert not segmented_page.get_sitemap_urls()
| 33.631579
| 78
| 0.81651
|
bae7ddc1dcb0e1045147077d33b8173211e4d9dd
| 5,179
|
py
|
Python
|
kdaHDFE/kdaHDFE.py
|
sbaker-dev/kdaHDFE
|
8e442f24002deb8b90f6b358777572f271e1df20
|
[
"BSD-3-Clause"
] | null | null | null |
kdaHDFE/kdaHDFE.py
|
sbaker-dev/kdaHDFE
|
8e442f24002deb8b90f6b358777572f271e1df20
|
[
"BSD-3-Clause"
] | null | null | null |
kdaHDFE/kdaHDFE.py
|
sbaker-dev/kdaHDFE
|
8e442f24002deb8b90f6b358777572f271e1df20
|
[
"BSD-3-Clause"
] | null | null | null |
from kdaHDFE import formula_transform, clustered_error, is_nested, robust_err, Result, demean
import statsmodels.api as sm
import pandas as pd
import numpy as np
class HDFE:
def __init__(self, data_frame, formula, robust=False, cm="cgm", ps_def=True, epsilon=1e-8, max_iter=1e6,
mean_squared_error=10):
# Setup the database reference
self.df = data_frame
self.obs = len(self.df)
# Extract variable names from formula, check they all exist in dataframe
self.phenotype, self.covariants, self.fixed_effects, self.clusters = formula_transform(formula)
for variable in self.phenotype + self.covariants + self.fixed_effects + self.clusters:
assert variable in self.df.columns, f"{variable} is not in DataFrame!"
# Some standard variables to be used for demeaning / clustering
self.mean_squared_error = mean_squared_error
self.epsilon = epsilon
self.max_iter = max_iter
self.robust = robust
# todo: IF clusters > 1 then cgm2 is preferred
self.cm = cm
self.ps_def = ps_def
def reg_hdfe(self, rank, demean_data=True):
"""
Run a demeaned version of the data frame via absorption of FE's and adjust results relative to the demeaned data
:param rank: Degrees of freedom after demeaning
:type rank: int
:param demean_data: Will Demean based on the variables selected in the formula, defaults to True. If false,
assumes data is demeaned and necessary intercepts exist if required.
:type demean_data: bool
:return: Results of the regression
:rtype: Result
"""
if demean_data:
demeaned = self._reg_demean()
else:
demeaned = self.df
# Calculate the base unadjusted OLS results, add residuals to result for clustering and update degrees of
# freedom from demeaning
result = sm.OLS(demeaned[self.phenotype], demeaned[self.covariants], missing='drop').fit()
demeaned['resid'] = result.resid # Ever used?
result.df_resid = result.df_resid - rank
std_error, covariant_matrix = self._reg_std(result, rank, demeaned)
return Result(result, std_error, covariant_matrix)
def _reg_demean(self):
"""
Certain model specifications may require use to add an intercept such as when there is no need to demean as
there are no fixed effects yet demeaning was selected. If we have fixed effects, demean the data
:return: Demeaned DataFrame, rank of degrees of freedom
:rtype: pd.DataFrame
"""
# Add a constant if the model lacks any covariants / fe or lacks both covariants and fe but not clusters
if len(self.covariants) == 0 or len(self.fixed_effects) == 0 or \
(len(self.covariants) == 0 and len(self.fixed_effects) == 0 and len(self.clusters) > 0):
# Demean == DataFrame
demeaned = self.df.copy()
demeaned["Const"] = [1.0 for _ in range(len(demeaned))]
self.covariants = self.covariants + ["Const"]
return demeaned
else:
# Demean the whole dataframe
return demean(self.phenotype + self.covariants, self.df, self.fixed_effects, self.obs,
self.epsilon, self.max_iter, self.mean_squared_error)
def _reg_std(self, result, rank, demeaned_df):
"""
If we have clusters, we need to cluster the standard error depending on the clustering method of self.cm
Otherwise, we need to create robust or non robust standard errors from the standard errors calculated adjusted
for de-meaning
:param result: OLS result
:param rank: rank of degrees of freedom
:param demeaned_df: Demeaned Database for clustering if required
:return: The standard error and the covariance matrix
"""
# Now we need to update the standard errors of the OLS based on robust and clustering
if (len(self.clusters) == 0) & (self.robust is False):
std_error = result.bse * np.sqrt((result.nobs - len(self.covariants)) / (result.nobs - len(self.covariants)
- rank))
covariance_matrix = result.normalized_cov_params * result.scale * result.df_resid / result.df_resid
elif (len(self.clusters) == 0) & (self.robust is True):
covariance_matrix = robust_err(demeaned_df, self.covariants, result.nobs, len(self.covariants), rank)
std_error = np.sqrt(np.diag(covariance_matrix))
else:
nested = is_nested(demeaned_df, self.fixed_effects, self.clusters, self.covariants)
covariance_matrix = clustered_error(demeaned_df, self.covariants, self.clusters, result.nobs,
len(self.covariants), rank, nested=nested, c_method=self.cm,
psdef=self.ps_def)
std_error = np.sqrt(np.diag(covariance_matrix))
return std_error, covariance_matrix
| 45.831858
| 120
| 0.63912
|
cfe5fd52542800bd7b4d1bc9e2acaf9ee50d5d1f
| 4,140
|
py
|
Python
|
retriever/modifiedBM25/getEntityMatch/entity_match.py
|
dheeraj7596/HashNews
|
2c849bf6bdda40e8caa328754d5ba3cee2ce507f
|
[
"MIT"
] | null | null | null |
retriever/modifiedBM25/getEntityMatch/entity_match.py
|
dheeraj7596/HashNews
|
2c849bf6bdda40e8caa328754d5ba3cee2ce507f
|
[
"MIT"
] | null | null | null |
retriever/modifiedBM25/getEntityMatch/entity_match.py
|
dheeraj7596/HashNews
|
2c849bf6bdda40e8caa328754d5ba3cee2ce507f
|
[
"MIT"
] | 1
|
2021-04-28T21:54:39.000Z
|
2021-04-28T21:54:39.000Z
|
# get entity_matches.json and entity_matches_random.json
import pandas as pd
from collections import defaultdict
import json
import argparse
import time
from datetime import datetime, timedelta
# entity_path = "../data/"
# tweet_path = "ne_tweets_ner.txt"
# news_path = "ne_randomnews.txt"
data_path = "/data1/xiuwen/twitter/"
result_path = "/home/xiuwen/tweetAnalyze/"
# result_path = common_result_path
# news_csv = "randomnews.csv"
# tweet_json = "tweet.json"
def extract_entity_set(df):
ner_set = set()
for en in df.entity.to_list():
ner_set.update(en)
return ner_set
def extract_entity_set_from_json(df, key):
ner = [['id', 'entity']]
ner_set = set()
for index, row in df.iterrows():
temp = [i['ner'].lower() for i in row[key]]
ner_set.update(temp)
ner.append([row['id'] , temp])
return pd.DataFrame(ner[1:], columns=ner[0]), ner_set
def entity_match(t_entity, n_entity):
matches = defaultdict(list)
for t in t_entity:
t_temp = t.split()
for n in n_entity:
n_temp = n.split()
if len([1 for i in t_temp if i in n_temp])/len(t_temp) >= 0.65:
matches[t].append(n)
return matches
# parser = argparse.ArgumentParser(description='pass day')
# parser.add_argument('--day', '-d')
# args = parser.parse_args()
if __name__ == '__main__':
# use this for first dataset
# day = datetime(2018, 10, int(args.day)).date()
# if (int(args.day) > 24):
# day = datetime(2020, 5, int(args.day)).date()
# else:
# day = datetime(2020, 6, int(args.day)).date()
# print(str(day))
# rand_news = pd.read_pickle(data_path+"news_random.pkl")
# news = pd.read_pickle(data_path + "tweet_2020/news.pkl")
year = "2020"
news = pd.read_json(data_path+"tweet"+year+"/ne_news.txt", orient="records", lines=True)
# rand_news = pd.read_json(data_path + "ne_news.txt", orient="records", lines=True)
rand_news = pd.read_pickle(data_path + "news_random.pkl")
tweet = pd.read_pickle(data_path + "tweet" + year + "/tweets.pkl")
# news = pd.read_csv(data_path+news_csv, encoding="utf-8", error_bad_lines=False)
# news['publishdate'] = pd.to_datetime(news.publishdate, errors='coerce').dt.date
# news = news.dropna()
# tweets = pd.read_json(data_path + tweet_json)
# tweets.created_at = tweets.created_at.dt.date
# tweets_entity_data = pd.read_json(entity_path + tweet_path, orient="records", lines=True)
# news_entity_data = pd.read_json(entity_path + news_path, orient="records", lines=True)
#
# tweets_index, news_index = get_news_5days_before_tweet(day)
# is_date = tweets.created_at == day
# tweets_this_day = tweets[is_date]
# tweets_index = tweets_this_day.id.to_list()
tweet_entity = extract_entity_set(tweet)
news_pkl, news_entity = extract_entity_set_from_json(news, 'news')
rand_news_entity = extract_entity_set(rand_news)
# rand_pkl, rand_news_entity = extract_entity_set_from_json(rand_news, 'news')
news_pkl.to_pickle(data_path+"tweet"+year+"/news_entity.pkl")
# rand_news.to_pickle(data_path + "news_random.pkl")
# print(len(quries))
# print(len(corpus))
print(len(tweet_entity))
print(len(news_entity))
print(len(rand_news_entity))
# match = 1
# mismatch = -1
# scoring = swalign.NucleotideScoringMatrix(match, mismatch)
# test = list(tweet_entity)[:20]
# sw = swalign.LocalAlignment(scoring)
matches = entity_match(tweet_entity, news_entity)
random_matches = entity_match(tweet_entity, rand_news_entity)
match_json = json.dumps(matches)
rand_match_json = json.dumps(random_matches)
# print(json)
# quries.to_pickle("./result/tweets.pkl")
# corpus.to_pickle("./result/news.pkl")
f = open(result_path+"result"+year+"/entity_matches.json", "w")
f.write(match_json)
f.close()
f = open(result_path + "result"+year+"/entity_matches_random.json", "w")
f.write(rand_match_json)
f.close()
| 36.637168
| 96
| 0.657246
|
89441d5a4c0586d49803d1e74637d42f324020cd
| 5,989
|
py
|
Python
|
pybind/slxos/v16r_1_00b/igmp_snooping/ip/igmp/igmp_snooping/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/igmp_snooping/ip/igmp/igmp_snooping/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/igmp_snooping/ip/igmp/igmp_snooping/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class igmp_snooping(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-igmp-snooping - based on the path /igmp-snooping/ip/igmp/igmp-snooping. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__igmps_enable',)
_yang_name = 'igmp-snooping'
_rest_name = 'snooping'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__igmps_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="igmps-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IGMP Enable', u'cli-full-command': None, u'cli-suppress-show-conf-path': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'igmp-snooping', u'ip', u'igmp', u'igmp-snooping']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'ip', u'igmp', u'snooping']
def _get_igmps_enable(self):
"""
Getter method for igmps_enable, mapped from YANG variable /igmp_snooping/ip/igmp/igmp_snooping/igmps_enable (empty)
"""
return self.__igmps_enable
def _set_igmps_enable(self, v, load=False):
"""
Setter method for igmps_enable, mapped from YANG variable /igmp_snooping/ip/igmp/igmp_snooping/igmps_enable (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmps_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmps_enable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="igmps-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IGMP Enable', u'cli-full-command': None, u'cli-suppress-show-conf-path': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmps_enable must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="igmps-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IGMP Enable', u'cli-full-command': None, u'cli-suppress-show-conf-path': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='empty', is_config=True)""",
})
self.__igmps_enable = t
if hasattr(self, '_set'):
self._set()
def _unset_igmps_enable(self):
self.__igmps_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="igmps-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IGMP Enable', u'cli-full-command': None, u'cli-suppress-show-conf-path': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='empty', is_config=True)
igmps_enable = __builtin__.property(_get_igmps_enable, _set_igmps_enable)
_pyangbind_elements = {'igmps_enable': igmps_enable, }
| 48.691057
| 494
| 0.711638
|
b0581eba78a64403ab0b340626f84ab97ed016d7
| 1,005
|
py
|
Python
|
pybamm/input/parameters/lithium-ion/electrolytes/lipf6_EC_EMC_3_7_Landesfeind2019/electrolyte_conductivity_EC_EMC_3_7_Landesfeind2019.py
|
YannickNoelStephanKuhn/PyBaMM
|
d90636a755b7b77bbc75ae7bc2728c8ee2fa730a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T15:10:34.000Z
|
2021-03-06T15:10:34.000Z
|
pybamm/input/parameters/lithium_ion/electrolytes/lipf6_EC_EMC_3_7_Landesfeind2019/electrolyte_conductivity_EC_EMC_3_7_Landesfeind2019.py
|
masoodtamaddon/PyBaMM
|
a31e2095600bb92e913598ac4d02b2b6b77b31c1
|
[
"BSD-3-Clause"
] | 1
|
2021-01-23T08:54:49.000Z
|
2021-01-23T08:54:49.000Z
|
pybamm/input/parameters/lithium_ion/electrolytes/lipf6_EC_EMC_3_7_Landesfeind2019/electrolyte_conductivity_EC_EMC_3_7_Landesfeind2019.py
|
masoodtamaddon/PyBaMM
|
a31e2095600bb92e913598ac4d02b2b6b77b31c1
|
[
"BSD-3-Clause"
] | 2
|
2020-05-21T23:16:29.000Z
|
2020-06-22T10:11:40.000Z
|
from electrolyte_base_Landesfeind2019 import (
electrolyte_conductivity_base_Landesfeind2019,
)
import numpy as np
def electrolyte_conductivity_EC_EMC_3_7_Landesfeind2019(c_e, T):
"""
Conductivity of LiPF6 in EC:EMC (3:7 w:w) as a function of ion concentration and
temperature. The data comes from [1].
References
----------
.. [1] Landesfeind, J. and Gasteiger, H.A., 2019. Temperature and Concentration
Dependence of the Ionic Transport Properties of Lithium-Ion Battery Electrolytes.
Journal of The Electrochemical Society, 166(14), pp.A3079-A3097.
Parameters
----------
c_e: :class:`pybamm.Symbol`
Dimensional electrolyte concentration
T: :class:`pybamm.Symbol`
Dimensional temperature
Returns
-------
:class:`pybamm.Symbol`
Electrolyte conductivity
"""
coeffs = np.array([5.21e-1, 2.28e2, -1.06, 3.53e-1, -3.59e-3, 1.48e-3])
return electrolyte_conductivity_base_Landesfeind2019(c_e, T, coeffs)
| 30.454545
| 85
| 0.689552
|
1bf4a42baff4f7c431b10e2523657a977dd9e992
| 954
|
py
|
Python
|
django_school/classroom/views/classroom.py
|
Juan-Bogota/Django-Chat-Educatech
|
146701fcf10e949fdc08114bff284a164e870be6
|
[
"MIT"
] | 1
|
2020-06-11T04:00:02.000Z
|
2020-06-11T04:00:02.000Z
|
django_school/classroom/views/classroom.py
|
Juan-Bogota/Django-Chat-Educatech
|
146701fcf10e949fdc08114bff284a164e870be6
|
[
"MIT"
] | 4
|
2021-09-08T02:09:11.000Z
|
2022-03-12T00:34:57.000Z
|
django_school/classroom/views/classroom.py
|
Juan-Bogota/Django-Chat-Educatech
|
146701fcf10e949fdc08114bff284a164e870be6
|
[
"MIT"
] | 2
|
2020-06-16T16:28:21.000Z
|
2020-08-03T15:13:34.000Z
|
# Django
# standard library
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from django.contrib import messages
from django.contrib.auth import authenticate
"""
- Redirecting to the html template
- Model view to be controled by the urls and based on the models
"""
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
def home(request):
""" Deploy a message depending of the authentication"""
if request.user.is_authenticated:
if request.user.is_teacher:
messages.success(request, 'Welcome Teacher {} {}'.format(request.user.first_name, request.user.last_name))
return redirect('teachers:teachers')
else:
messages.success(request, 'Welcome Student {} {}'.format(request.user.first_name, request.user.last_name))
return redirect('students:students')
return render(request, 'classroom/home.html')
| 34.071429
| 118
| 0.716981
|
bbb285a72ffc85fa7197ae91658cd9115d75f353
| 67,298
|
py
|
Python
|
sympy/core/basic.py
|
pecan-pine/sympy
|
3219093a5ae34abd549acf0f4c1b67d419bddc2a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-04T11:36:24.000Z
|
2020-05-04T11:36:24.000Z
|
sympy/core/basic.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 1
|
2020-04-22T12:45:26.000Z
|
2020-04-22T12:45:26.000Z
|
sympy/core/basic.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | null | null | null |
"""Base class for all the objects in SymPy"""
from collections import defaultdict
from itertools import chain, zip_longest
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import iterable, ordered, Mapping
from .singleton import S
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(metaclass=ManagedProperties):
"""
Base class for all SymPy objects.
Notes and conventions
=====================
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
3) By "SymPy object" we mean something that can be returned by
``sympify``. But not all objects one encounters using SymPy are
subclasses of Basic. For example, mutable objects are not:
>>> from sympy import Basic, Matrix, sympify
>>> A = Matrix([[1, 2], [3, 4]]).as_mutable()
>>> isinstance(A, Basic)
False
>>> B = sympify(A)
>>> isinstance(B, Basic)
True
"""
__slots__ = ('_mhash', # hash value
'_args', # arguments
'_assumptions'
)
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
# As long as we have the ordering of classes (sympy.core),
# comparing types will be slow in Python 2, because it uses
# __cmp__. Until we can remove it
# (https://github.com/sympy/sympy/issues/4269), we only compare
# types in Python 2 directly if they actually have __ne__.
if type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.subs(dummy, tmp) == o.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
"""Method to return the string representation.
Return the expression as a string.
"""
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
# We don't define _repr_png_ here because it would add a large amount of
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
"""
IPython/Jupyter LaTeX printing
To change the behavior of this (e.g., pass in some settings to LaTeX),
use init_printing(). init_printing() will also enable LaTeX printing
for built in numeric types like ints and container types that contain
SymPy objects, like lists and dictionaries of expressions.
"""
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
nodes = preorder_traversal(self)
if types:
result = {node for node in nodes if isinstance(node, types)}
else:
result = {node for node in nodes if not node.args}
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
return set()
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x, y
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
Notes
=====
Any object that has structural dummy variables should have
a property, `bound_symbols` that returns a list of structural
dummy symbols of the object itself.
Lambda and Subs have bound symbols, but because of how they
are cached, they already compare the same regardless of their
bound symbols:
>>> from sympy import Lambda
>>> Lambda(x, x + 1) == Lambda(y, y + 1)
True
"""
def can(x):
d = {i: i.as_dummy() for i in x.bound_symbols}
# mask free that shadow bound
x = x.subs(d)
c = x.canonical_variables
# replace bound
x = x.xreplace(c)
# undo masking
x = x.xreplace({v: k for k, v in d.items()})
return x
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
lambda x: can(x))
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any existing symbol in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
v = self.bound_symbols
# this free will include bound symbols that are not part of
# self's bound symbols
free = {i.name for i in self.atoms(Symbol) - set(v)}
for v in v:
d = next(dums)
if v.is_Symbol:
while v.name == d.name or d.name in free:
d = next(dums)
reps[v] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities.iterables import sift
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], str):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, str))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
# skip if there is no change
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
atoms, nonatoms = sift(list(sequence),
lambda x: x.is_Atom, binary=True)
sequence = [(k, sequence[k]) for k in
list(reversed(list(ordered(nonatoms)))) + list(ordered(atoms))]
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy.sets import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = _sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy, Wild
from sympy.simplify.simplify import bottom_up
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this change during rebuilding;
# XXX this may fail if the object being replaced
# cannot be represented as a Dummy in the expression
# tree, e.g. an ExprConditionPair in Piecewise
# cannot be represented with a Dummy
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy('rec_replace', commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
# if a sub-expression could not be replaced with
# a Dummy then this will fail; either filter
# against such sub-expressions or figure out a
# way to carry out simultaneous replacement
# in this situation.
rv = rv.xreplace(r) # if this fails, see above
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = {k.xreplace(r): v.xreplace(r)
for k, v in mapping.items()}
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def simplify(self, **kwargs):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
return simplify(self, **kwargs)
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args, **hints)
if rewritten is not None:
return rewritten
return self.func(*args) if hints.get('evaluate', True) else self
def _accept_eval_derivative(self, s):
# This method needs to be overridden by array-like objects
return s._visit_eval_derivative_scalar(self)
def _visit_eval_derivative_scalar(self, base):
# Base is a scalar
# Types are (base: scalar, self: scalar)
return base._eval_derivative(self)
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: scalar)
# Base is some kind of array/matrix,
# it should have `.applyfunc(lambda x: x.diff(self)` implemented:
return base._eval_derivative_array(self)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._accept_eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], str):
rule = '_eval_rewrite_as_' + args[-1]
else:
# rewrite arg is usually a class but can also be a
# singleton (e.g. GoldenRatio) so we check
# __name__ or __class__.__name__
clsname = getattr(args[-1], "__name__", None)
if clsname is None:
clsname = args[-1].__class__.__name__
rule = '_eval_rewrite_as_' + clsname
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
_constructor_postprocessor_mapping = {} # type: ignore
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal:
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
yield from self._preorder_traversal(arg, keys)
elif iterable(node):
for item in node:
yield from self._preorder_traversal(item, keys)
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| 32.558297
| 103
| 0.541636
|
622ad4b4f05351c221876743af67071d37ed4608
| 1,098
|
py
|
Python
|
app/widgets/autotooltipdelegate.py
|
apardyl/kraksat-receiver
|
9d9f17853e2a19c657a096f0dc395142df29cc98
|
[
"MIT"
] | 1
|
2019-10-01T19:04:28.000Z
|
2019-10-01T19:04:28.000Z
|
app/widgets/autotooltipdelegate.py
|
apardyl/kraksat-receiver
|
9d9f17853e2a19c657a096f0dc395142df29cc98
|
[
"MIT"
] | null | null | null |
app/widgets/autotooltipdelegate.py
|
apardyl/kraksat-receiver
|
9d9f17853e2a19c657a096f0dc395142df29cc98
|
[
"MIT"
] | null | null | null |
import html
from PyQt5.QtCore import QEvent, Qt
from PyQt5.QtWidgets import QStyledItemDelegate, QToolTip
class AutoToolTipDelegate(QStyledItemDelegate):
"""
Item delegate that automatically displays a ToolTip for an item if it is
too long to be displayed.
"""
def helpEvent(self, event, view, option, index):
if not (event and view):
return False
if event.type() == QEvent.ToolTip and not index.data(Qt.ToolTipRole):
rect = view.visualRect(index)
size = self.sizeHint(option, index)
# Compare actual cell width and text width
if rect.width() < size.width():
text = index.data(Qt.DisplayRole)
QToolTip.showText(event.globalPos(),
'<div>{}</div>'.format(html.escape(text)),
view)
return True
if not super().helpEvent(event, view, option, index):
QToolTip.hideText()
return True
return super().helpEvent(event, view, option, index)
| 32.294118
| 77
| 0.577413
|
86157feefd3062dbbce8e21d86871d2d51df6771
| 7,126
|
py
|
Python
|
lib/datasets/imdb.py
|
svebk/py-faster-rcnn
|
1d0c40c42930f8e89634c057a0ed902aace395bd
|
[
"BSD-2-Clause"
] | null | null | null |
lib/datasets/imdb.py
|
svebk/py-faster-rcnn
|
1d0c40c42930f8e89634c057a0ed902aace395bd
|
[
"BSD-2-Clause"
] | null | null | null |
lib/datasets/imdb.py
|
svebk/py-faster-rcnn
|
1d0c40c42930f8e89634c057a0ed902aace395bd
|
[
"BSD-2-Clause"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import os.path as osp
import PIL
from utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
from fast_rcnn.config import cfg
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._obj_proposer = 'selective_search'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def append_flipped_images(self):
num_images = self.num_images
widths = [PIL.Image.open(self.image_path_at(i)).size[0]
for i in xrange(num_images)]
for i in xrange(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes' : boxes,
'gt_overlaps' : self.roidb[i]['gt_overlaps'],
'gt_classes' : self.roidb[i]['gt_classes'],
'flipped' : True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def evaluate_recall(self, candidate_boxes=None, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
if candidate_boxes is None:
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in xrange(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({'boxes' : boxes,
'gt_classes' : np.zeros((num_boxes,),
dtype=np.int32),
'gt_overlaps' : overlaps,
'flipped' : False})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
| 35.277228
| 80
| 0.544345
|
9283c4d9e5efbedd7adb3cfd219f84db2d0d7af2
| 520
|
py
|
Python
|
.buildozer/android/platform/build-armeabi-v7a/build/venv/lib/python3.7/site-packages/cython.py
|
VPetras/mobile-test-app
|
6708dade6873ae2fb1ecb13aa70662f95fb42dc6
|
[
"MIT"
] | 6,663
|
2015-01-02T06:06:43.000Z
|
2022-03-31T10:35:02.000Z
|
cython.py
|
holzschu/cython
|
8f6c7f707b28cb8e76ecbf5fcd089c6255dacdb8
|
[
"Apache-2.0"
] | 3,094
|
2015-01-01T15:44:13.000Z
|
2022-03-31T19:49:57.000Z
|
cython.py
|
scoder/cython
|
ddaaa7b8bfe9885b7bed432cd0a5ab8191d112cd
|
[
"Apache-2.0"
] | 1,425
|
2015-01-12T07:21:27.000Z
|
2022-03-30T14:10:40.000Z
|
#!/usr/bin/env python
#
# Cython -- Main Program, generic
#
if __name__ == '__main__':
import os
import sys
# Make sure we import the right Cython
cythonpath, _ = os.path.split(os.path.realpath(__file__))
sys.path.insert(0, cythonpath)
from Cython.Compiler.Main import main
main(command_line = 1)
else:
# Void cython.* directives.
from Cython.Shadow import *
## and bring in the __version__
from Cython import __version__
from Cython import load_ipython_extension
| 20.8
| 61
| 0.684615
|
69913efb05036960daa0985af40721d5f3cb2e2f
| 483
|
py
|
Python
|
.jenkins/remove_invisible_code_block_from_ipynb.py
|
rgommers/tutorials
|
9341570d4d8ed2c77371eac3b8520f7038d731ee
|
[
"BSD-3-Clause"
] | 6,424
|
2017-01-18T17:57:30.000Z
|
2022-03-31T11:43:48.000Z
|
.jenkins/remove_invisible_code_block_from_ipynb.py
|
rgommers/tutorials
|
9341570d4d8ed2c77371eac3b8520f7038d731ee
|
[
"BSD-3-Clause"
] | 1,713
|
2017-01-18T18:50:08.000Z
|
2022-03-31T14:57:25.000Z
|
.jenkins/remove_invisible_code_block_from_ipynb.py
|
rgommers/tutorials
|
9341570d4d8ed2c77371eac3b8520f7038d731ee
|
[
"BSD-3-Clause"
] | 3,932
|
2017-01-18T21:11:46.000Z
|
2022-03-31T10:24:24.000Z
|
import sys
from bs4 import BeautifulSoup
ipynb_file_path = sys.argv[1]
output_file_path = sys.argv[2]
with open(ipynb_file_path, 'r', encoding='utf-8') as ipynb_file:
ipynb_lines = ipynb_file.readlines()
ipynb_out_lines = []
for line in ipynb_lines:
if not '%%%%%%INVISIBLE_CODE_BLOCK%%%%%%' in line:
ipynb_out_lines.append(line)
with open(output_file_path, "w", encoding='utf-8') as output_file:
for line in ipynb_out_lines:
output_file.write(line)
| 25.421053
| 66
| 0.714286
|
e3949948a19c0babfb11d1a390c1c6a5caed8216
| 2,405
|
py
|
Python
|
app/grandchallenge/products/templatetags/products_tags.py
|
nlessmann/grand-challenge.org
|
36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8
|
[
"Apache-2.0"
] | 101
|
2018-04-11T14:48:04.000Z
|
2022-03-28T00:29:48.000Z
|
app/grandchallenge/products/templatetags/products_tags.py
|
nlessmann/grand-challenge.org
|
36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8
|
[
"Apache-2.0"
] | 1,733
|
2018-03-21T11:56:16.000Z
|
2022-03-31T14:58:30.000Z
|
app/grandchallenge/products/templatetags/products_tags.py
|
nlessmann/grand-challenge.org
|
36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8
|
[
"Apache-2.0"
] | 42
|
2018-06-08T05:49:07.000Z
|
2022-03-29T08:43:01.000Z
|
from django import template
from django.template.defaultfilters import stringfilter
from django.templatetags.static import static
from grandchallenge.products.models import Product
register = template.Library()
@register.inclusion_tag("products/partials/navbar.html", takes_context=True)
def navbar(context):
url = context.request.resolver_match.url_name
return {
"items": [
{
"url": "product-list",
"active": url in ["product-list", "product-detail"],
"title": "Products",
},
{
"url": "company-list",
"active": url in ["company-list", "company-detail"],
"title": "Companies",
},
{
"url": "blogs-list",
"active": url in ["blogs-list", "blogs-detail"],
"title": "Blogs",
},
{
"title": "About",
"active": url
in ["project-air", "about", "about-faq", "about-add-product"],
"subitems": [
{
"url": "about",
"active": url == "about",
"title": "About",
},
{
"url": "about-faq",
"active": url == "about-faq",
"title": "FAQ",
},
{
"url": "about-add-product",
"active": url == "about-add-product",
"title": "Add your product",
},
{
"url": "project-air",
"active": url == "project-air",
"title": "Project AIR",
},
],
},
{
"url": "contact",
"active": url == "contact",
"title": "Contact",
},
],
}
@register.simple_tag
def icon(obj, field):
value = getattr(obj, field, None)
icon = Product.ICONS.get(value)
if icon:
return static(f"products/images/{icon}")
@register.filter
@stringfilter
def short(value, max_char):
if len(value) > max_char:
return value[:max_char].rsplit(" ", 1)[0] + " ..."
return value
| 30.0625
| 78
| 0.412474
|
b028018661b0929da5b6a926d65bb750a50efe57
| 444
|
py
|
Python
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 22
|
2018-04-25T22:03:53.000Z
|
2021-07-13T18:43:23.000Z
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 17
|
2018-04-30T14:04:08.000Z
|
2022-02-13T19:52:44.000Z
|
oldtoronto/test/toronto_archives_test.py
|
patcon/oldto
|
44c099550a4e3cfafa85afbaebd3cd6c33325891
|
[
"Apache-2.0"
] | 7
|
2018-05-08T23:32:44.000Z
|
2022-01-27T17:49:30.000Z
|
from nose.tools import eq_
from oldtoronto.toronto_archives import get_citation_hierarchy # noqa
def test_get_citation_hierarchy():
eq_([
'Fonds 200, Series 123',
'Fonds 200'
], get_citation_hierarchy('Fonds 200, Series 123, Item 456'))
eq_([
'Fonds 257, Series 12, File 1983',
'Fonds 257, Series 12',
'Fonds 257'
], get_citation_hierarchy('Fonds 257, Series 12, File 1983, 52","'))
| 26.117647
| 73
| 0.646396
|
c2d312239b275568cb1b0a8a43d8134e332a8999
| 15,649
|
py
|
Python
|
google/ads/googleads/v4/services/services/campaign_draft_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/campaign_draft_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/campaign_draft_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import campaign_draft
from google.ads.googleads.v4.services.types import campaign_draft_service
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import CampaignDraftServiceTransport, DEFAULT_CLIENT_INFO
class CampaignDraftServiceGrpcTransport(CampaignDraftServiceTransport):
"""gRPC backend transport for CampaignDraftService.
Service to manage campaign drafts.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def get_campaign_draft(
self,
) -> Callable[
[campaign_draft_service.GetCampaignDraftRequest],
campaign_draft.CampaignDraft,
]:
r"""Return a callable for the get campaign draft method over gRPC.
Returns the requested campaign draft in full detail.
Returns:
Callable[[~.GetCampaignDraftRequest],
~.CampaignDraft]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_campaign_draft" not in self._stubs:
self._stubs["get_campaign_draft"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignDraftService/GetCampaignDraft",
request_serializer=campaign_draft_service.GetCampaignDraftRequest.serialize,
response_deserializer=campaign_draft.CampaignDraft.deserialize,
)
return self._stubs["get_campaign_draft"]
@property
def mutate_campaign_drafts(
self,
) -> Callable[
[campaign_draft_service.MutateCampaignDraftsRequest],
campaign_draft_service.MutateCampaignDraftsResponse,
]:
r"""Return a callable for the mutate campaign drafts method over gRPC.
Creates, updates, or removes campaign drafts.
Operation statuses are returned.
Returns:
Callable[[~.MutateCampaignDraftsRequest],
~.MutateCampaignDraftsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_campaign_drafts" not in self._stubs:
self._stubs[
"mutate_campaign_drafts"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignDraftService/MutateCampaignDrafts",
request_serializer=campaign_draft_service.MutateCampaignDraftsRequest.serialize,
response_deserializer=campaign_draft_service.MutateCampaignDraftsResponse.deserialize,
)
return self._stubs["mutate_campaign_drafts"]
@property
def promote_campaign_draft(
self,
) -> Callable[
[campaign_draft_service.PromoteCampaignDraftRequest],
operations.Operation,
]:
r"""Return a callable for the promote campaign draft method over gRPC.
Promotes the changes in a draft back to the base campaign.
This method returns a Long Running Operation (LRO) indicating if
the Promote is done. Use [Operations.GetOperation] to poll the
LRO until it is done. Only a done status is returned in the
response. See the status in the Campaign Draft resource to
determine if the promotion was successful. If the LRO failed,
use
[CampaignDraftService.ListCampaignDraftAsyncErrors][google.ads.googleads.v4.services.CampaignDraftService.ListCampaignDraftAsyncErrors]
to view the list of error reasons.
Returns:
Callable[[~.PromoteCampaignDraftRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "promote_campaign_draft" not in self._stubs:
self._stubs[
"promote_campaign_draft"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignDraftService/PromoteCampaignDraft",
request_serializer=campaign_draft_service.PromoteCampaignDraftRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["promote_campaign_draft"]
@property
def list_campaign_draft_async_errors(
self,
) -> Callable[
[campaign_draft_service.ListCampaignDraftAsyncErrorsRequest],
campaign_draft_service.ListCampaignDraftAsyncErrorsResponse,
]:
r"""Return a callable for the list campaign draft async
errors method over gRPC.
Returns all errors that occurred during CampaignDraft
promote. Throws an error if called before campaign draft
is promoted. Supports standard list paging.
Returns:
Callable[[~.ListCampaignDraftAsyncErrorsRequest],
~.ListCampaignDraftAsyncErrorsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_campaign_draft_async_errors" not in self._stubs:
self._stubs[
"list_campaign_draft_async_errors"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.CampaignDraftService/ListCampaignDraftAsyncErrors",
request_serializer=campaign_draft_service.ListCampaignDraftAsyncErrorsRequest.serialize,
response_deserializer=campaign_draft_service.ListCampaignDraftAsyncErrorsResponse.deserialize,
)
return self._stubs["list_campaign_draft_async_errors"]
__all__ = ("CampaignDraftServiceGrpcTransport",)
| 42.409214
| 143
| 0.638635
|
a973ea72975b1781675b132cd22a5699afca9e75
| 1,641
|
py
|
Python
|
image_classification/models/cnn.py
|
AmanDaVinci/research
|
05bbca2fce13ab5d06e43e4f5e0309a87b467d43
|
[
"MIT"
] | null | null | null |
image_classification/models/cnn.py
|
AmanDaVinci/research
|
05bbca2fce13ab5d06e43e4f5e0309a87b467d43
|
[
"MIT"
] | null | null | null |
image_classification/models/cnn.py
|
AmanDaVinci/research
|
05bbca2fce13ab5d06e43e4f5e0309a87b467d43
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from typing import Any, List, Tuple, Dict, Optional, Callable
class CNN(nn.Module):
def __init__(self, in_channels: int, out_dim: int,
device: torch.device, **kwargs):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc1 = nn.Linear(in_features=64*6*6, out_features=600)
self.drop = nn.Dropout2d(0.25)
self.fc2 = nn.Linear(in_features=600, out_features=120)
self.fc3 = nn.Linear(in_features=120, out_features=out_dim)
self.criterion = nn.CrossEntropyLoss()
self.device = device
self.to(device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv1(x)
out = self.conv2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.drop(out)
out = self.fc2(out)
out = self.fc3(out)
return out
def step(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, float]:
x, y = batch
x = x.to(self.device)
y_pred = self.forward(x)
loss = self.criterion(y_pred, y)
accuracy = (y_pred.argmax(axis=1)==y).float().mean().item()
return loss, accuracy
| 34.914894
| 91
| 0.573431
|
adc6e3f448d9840e2d8f4186157680cec57e6c4b
| 5,397
|
py
|
Python
|
cinder/volume/drivers/netapp/dataontap/fc_cmode.py
|
alexisries/openstack-cinder
|
7cc6e45c5ddb8bf771bdb01b867628e41761ae11
|
[
"Apache-2.0"
] | 2
|
2019-05-24T14:13:50.000Z
|
2019-05-24T14:21:13.000Z
|
cinder/volume/drivers/netapp/dataontap/fc_cmode.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 3
|
2020-03-02T01:36:30.000Z
|
2021-12-13T20:27:46.000Z
|
cinder/volume/drivers/netapp/dataontap/fc_cmode.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 1
|
2020-03-02T01:32:26.000Z
|
2020-03-02T01:32:26.000Z
|
# Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
def initialize_connection(self, volume, connector):
conn_info = self.library.initialize_connection_fc(volume, connector)
fczm_utils.add_fc_zone(conn_info)
return conn_info
def terminate_connection(self, volume, connector, **kwargs):
conn_info = self.library.terminate_connection_fc(volume, connector,
**kwargs)
fczm_utils.remove_fc_zone(conn_info)
return conn_info
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_group(self, context, group):
return self.library.create_group(group)
def delete_group(self, context, group, volumes):
return self.library.delete_group(group, volumes)
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
return self.library.update_group(group, add_volumes=None,
remove_volumes=None)
def create_group_snapshot(self, context, group_snapshot, snapshots):
return self.library.create_group_snapshot(group_snapshot, snapshots)
def delete_group_snapshot(self, context, group_snapshot, snapshots):
return self.library.delete_group_snapshot(group_snapshot, snapshots)
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
return self.library.create_group_from_src(
group, volumes, group_snapshot=group_snapshot, snapshots=snapshots,
source_group=source_group, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
return self.library.failover_host(
context, volumes, secondary_id=secondary_id)
| 39.108696
| 79
| 0.698536
|
95308d4eeb07c6eeb3e00d7bd0014a65e611d07a
| 38,538
|
py
|
Python
|
tests/forms_tests/tests/test_input_formats.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:42:08.000Z
|
2015-11-08T11:42:08.000Z
|
tests/forms_tests/tests/test_input_formats.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
tests/forms_tests/tests/test_input_formats.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import time, date, datetime
from django import forms
from django.test.utils import override_settings
from django.utils.translation import activate, deactivate
from django.test import SimpleTestCase
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True)
class LocalizedTimeTests(SimpleTestCase):
def setUp(self):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ('%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M')
activate('nl')
def tearDown(self):
deactivate()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean('13:30:05.000155')
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
class CustomTimeInputFormatsTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('01:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# # Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
class SimpleTimeFormatTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"Localized TimeFields in a non-localized environment act as unlocalized widgets"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True)
class LocalizedDateTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# # Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
class CustomDateInputFormatsTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# # Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
class SimpleDateFormatTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"Localized DateFields in a non-localized environment act as unlocalized widgets"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True)
class LocalizedDateTimeTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# # Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"])
class CustomDateTimeInputFormatsTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# # Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
class SimpleDateTimeFormatTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"Localized DateTimeFields in a non-localized environment act as unlocalized widgets"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| 44.449827
| 107
| 0.648892
|
63ca8ec0886983411f04824c6ca872fad9a0d548
| 399
|
py
|
Python
|
misitiotest/asgi.py
|
cristianllanca/django_local_library
|
ba278bef82d53014ae7a84056c91a3f2d24c0d9c
|
[
"MIT"
] | null | null | null |
misitiotest/asgi.py
|
cristianllanca/django_local_library
|
ba278bef82d53014ae7a84056c91a3f2d24c0d9c
|
[
"MIT"
] | null | null | null |
misitiotest/asgi.py
|
cristianllanca/django_local_library
|
ba278bef82d53014ae7a84056c91a3f2d24c0d9c
|
[
"MIT"
] | null | null | null |
"""
ASGI config for misitiotest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'misitiotest.settings')
application = get_asgi_application()
| 23.470588
| 78
| 0.789474
|
fee36336c5ac71bf5d34da3a0e51aefe7e7e7ad4
| 280
|
py
|
Python
|
tests/conftest.py
|
sapporojones/eyeson_flask
|
f1e5e6a52f58d0bc9c977644d0f544c22b415bf0
|
[
"MIT"
] | 2
|
2021-01-14T05:35:41.000Z
|
2021-01-15T23:35:37.000Z
|
tests/conftest.py
|
sapporojones/eyeson_flask
|
f1e5e6a52f58d0bc9c977644d0f544c22b415bf0
|
[
"MIT"
] | 44
|
2021-07-09T23:15:14.000Z
|
2022-03-28T21:32:21.000Z
|
tests/conftest.py
|
sapporojones/eyeson_flask
|
f1e5e6a52f58d0bc9c977644d0f544c22b415bf0
|
[
"MIT"
] | 1
|
2021-01-14T03:43:17.000Z
|
2021-01-14T03:43:17.000Z
|
"""
Dummy conftest.py for eyeson_flask.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
- https://docs.pytest.org/en/stable/fixture.html
- https://docs.pytest.org/en/stable/writing_plugins.html
"""
# import pytest
| 25.454545
| 60
| 0.692857
|
bc90d9fd53204f4187e38ac83ad6091fc1a51056
| 478
|
py
|
Python
|
day4/my_jinja2_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
day4/my_jinja2_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
day4/my_jinja2_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
import jinja2
bgp_vars = {
'local_as': 10,
'peer1_ip': '10.1.20.2',
'peer1_as': 20,
'peer2_ip': '10.1.30.2',
'peer2_as': 30,
}
bgp_template = """
router bgp {{ local_as }}
neighbor {{ peer1_ip }} remote-as {{ peer1_as }}
update-source loopback99
ebgp-multihop 2
address-family ipv4 unicast
neighbor {{ peer2_ip }} remote-as {{ peer2_as }}
address-family ipv4 unicast
"""
t = jinja2.Template(bgp_template)
print(t.render(**bgp_vars))
| 19.12
| 50
| 0.629707
|
42cd337814a63ddad219e861d5abcf0973527b87
| 10,753
|
py
|
Python
|
tools/SDKTool/src/ui/tree/applications_tree/ui_explore_tree/explore_result.py
|
Passer-D/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 1,210
|
2020-08-18T07:57:36.000Z
|
2022-03-31T15:06:05.000Z
|
tools/SDKTool/src/ui/tree/applications_tree/ui_explore_tree/explore_result.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 37
|
2020-08-24T02:48:38.000Z
|
2022-01-30T06:41:52.000Z
|
tools/SDKTool/src/ui/tree/applications_tree/ui_explore_tree/explore_result.py
|
guokaiSama/GameAISDK
|
a089330a30b7bfe1f6442258a12d8c0086240606
|
[
"Apache-2.0"
] | 275
|
2020-08-18T08:35:16.000Z
|
2022-03-31T15:06:07.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import sys
import json
import math
import logging
import cv2
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QHeaderView
from PyQt5.QtCore import Qt
from .graph import UIGraph, UIGRAPH
from ....canvas.ui_canvas import canvas
from ....dialog.tip_dialog import show_warning_tips
from ....main_window.tool_window import ui
from .....common.define import UMING_TTC
from .....common.utils import get_font
from ....utils import cvimg_to_qtimg
platform = sys.platform
plt.rcParams['font.sans-serif'] = get_font() # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
class ExploreResult(object):
def __init__(self, path=None):
self.__logger = logging.getLogger('sdktool')
# self.__canvas = canvas
self.__ui = ui
self.__image_list = []
self.__image_index = -1
self.__explore_ret_path = path
self.__table_widget = None
def set_path(self, path):
self.__explore_ret_path = path
@staticmethod
def load_image_file(ui_graph):
w, h = ui_graph.get_painter_size()
if w > 0 and h > 0:
max_w = max(canvas.geometry().width(), w)
max_h = max(canvas.geometry().height(), h)
img_data = cv2.imread("Resource/White.jpeg")
if img_data is None:
raise Exception('Resource/White.jpeg is not found')
img_data = cv2.resize(img_data, (max_w, max_h))
qtimg = cvimg_to_qtimg(img_data)
canvas.load_pixmap(qtimg)
canvas.add_ui_graph(ui_graph)
canvas.current_model.append(UIGRAPH)
canvas.update()
# 优化显示,否则当界面元素过多时,元素显示很小
if canvas.scale < 1:
canvas.scale = 1.0
canvas.adjustSize()
canvas.update()
def ui_graph(self):
file_list = os.listdir(self.__explore_ret_path)
img_list = [item for item in file_list if item.split('.')[1] in ['jpg']]
json_list = [item for item in file_list if '.json' in item]
if 0 in [len(file_list), len(img_list), len(json_list)]:
show_warning_tips("files count wrong, img count {}, json count{}".format(len(img_list), len(json_list)))
return
image_label_dict = dict()
for item in img_list:
key = item.split('.')[0]
jsonfile = "{}.json".format(key)
if jsonfile in json_list:
image_label_dict[key] = dict()
image_label_dict[key]["image"] = item
image_label_dict[key]["label"] = jsonfile
self.__logger.info("images count %s, json count %s, pairs count %s", len(img_list),
len(json_list), len(image_label_dict.keys()))
ui_graph = UIGraph()
ui_graph.set_canvas_scale(canvas.get_scale())
# create graph
for key, value in image_label_dict.items():
with open("{0}/{1}".format(self.__explore_ret_path, value.get("label")), 'r') as ui_label_file:
content = json.load(ui_label_file)
label_list = content.get("labels")
cur_image = content.get("fileName")
if not label_list:
self.__logger.error("%s label is none", ui_label_file)
continue
for button in label_list:
next_ui = button.get("nextUI")
# labelName = button.get("label")
number = int(button.get("clickNum"))
ui_graph.add_node_button(cur_image, (button.get("x"), button.get("y"),
button.get("w"), button.get("h")), next_ui, number)
# if (nextUI is not '') and (labelName not in ['return']):
if next_ui != '':
# uiGraph.add_edge(value.get("image"), nextUI)
ui_graph.add_edge(cur_image, next_ui)
self.__logger.info("edges num %s node num %s", len(ui_graph.edges()), len(ui_graph.nodes()))
for node in ui_graph.nodes():
img_path = self.__explore_ret_path + '/' + str(node)
ui_graph.add_node_image(node, img_path)
ui_graph.process()
ui_graph.set_text_edit(self.__ui.textEdit)
# 加载图像文件
self.load_image_file(ui_graph)
@staticmethod
def _plt_set(y_value, x_label, y_label, title):
y_max_value = math.ceil(max(y_value) * 1.2)
plt.ylim(0, y_max_value)
if platform == 'win32':
plt.xlabel(str(x_label))
plt.ylabel(str(y_label))
plt.title(str(title))
plt.legend()
else:
chinesefont = FontProperties(fname=UMING_TTC)
plt.xlabel(str(x_label), fontproperties=chinesefont)
plt.ylabel(str(y_label), fontproperties=chinesefont)
plt.title(str(title), fontproperties=chinesefont)
plt.legend(prop=chinesefont)
plt.tight_layout()
@staticmethod
def _set_bar(index, height, width, alpha, color, label):
plt.bar(index, height, width, alpha=alpha, color=color, label=label)
@staticmethod
def _set_bar_text(x, y, s, ha='center'):
plt.text(x, y, s, ha=ha)
def coverage(self):
# clear previous figures
plt.figure(1)
json_file = self.__explore_ret_path + '/coverage.json'
if not os.path.exists(json_file):
self.__logger.error("file %s not exists", json_file)
return
try:
with open(json_file) as f:
value = json.load(f)
except IOError as e:
self.__logger.error("load json file %s failed, err: %s", json_file, e)
return
button_value = value.get("button")
scene_value = value.get("scene")
if None in [button_value, scene_value]:
self.__logger.error("read button or scene from file %s failed", json_file)
return
plt.cla()
n_groups = 1
index = np.arange(n_groups)
bar_width = 0.3
_, (ax1, ax2) = plt.subplots(1, 2)
# ax2 = plt.subplot(1, 2, 2)
plt.sca(ax2)
opacity = 0.4
self._set_bar(index, height=button_value.get("sampleNum"), width=bar_width,
alpha=opacity, color='b', label='游戏按钮')
self._set_bar_text(x=index, y=button_value.get("sampleNum"), s=str(button_value.get("sampleNum")), ha='center')
self._set_bar(index + bar_width, button_value.get("coverNum"), bar_width, alpha=opacity,
color='r', label='探索覆盖')
self._set_bar_text(x=index + bar_width, y=button_value.get("coverNum"),
s=str(button_value.get("coverNum")), ha='center')
y_value = (button_value.get("sampleNum"), button_value.get("coverNum"))
self._plt_set(y_value, x_label=str('按钮'), y_label=str('按钮数量'), title=str('按钮覆盖率'))
# ax1 = plt.subplot(1, 2, 1)
plt.sca(ax1)
self._set_bar(index, height=scene_value.get("sampleNum"), width=bar_width,
alpha=opacity, color='g', label='游戏场景')
self._set_bar_text(x=index, y=scene_value.get("sampleNum"), s=str(scene_value.get("sampleNum")), ha='center')
self._set_bar(index + bar_width, height=scene_value.get("coverNum"), width=bar_width,
alpha=opacity, color='y', label='探索覆盖')
self._set_bar_text(x=index + bar_width, y=scene_value.get("coverNum"),
s=str(scene_value.get("coverNum")), ha='center')
y_value = (scene_value.get("sampleNum"), scene_value.get("coverNum"))
self._plt_set(y_value, x_label=str('场景'), y_label=str('场景数量'), title=str('场景覆盖率'))
plt.subplots_adjust(left=0.1, right=0.97, top=0.92, bottom=0.12, wspace=0.31, hspace=0.2)
name = 'coverage.jpg'
plt.savefig(name)
# 加载图像文件
frame = QImage(name)
pix = QPixmap.fromImage(frame)
canvas.load_pixmap(pix)
canvas.update()
plt.close(1)
@staticmethod
def _set_widget_item(table_widget, name, row=0, col=0, set_edit=True):
new_item = QTableWidgetItem(name)
if set_edit:
new_item.setData(Qt.EditRole, name)
table_widget.setItem(row, col, new_item)
def ui_coverage(self):
json_file = self.__explore_ret_path + '/coverage.json'
if not os.path.exists(json_file):
self.__logger.error("file %s not exists", json_file)
return
try:
with open(json_file) as f:
value = json.load(f)
except IOError as e:
self.__logger.error("load json file %s failed, err: %s", json_file, e)
return
cover_list = value.get('coverList') or []
self.__table_widget = QTableWidget()
self.__table_widget.setSortingEnabled(True)
self.__table_widget.setRowCount(len(cover_list))
self.__table_widget.setColumnCount(4)
self.__table_widget.setHorizontalHeaderLabels(['图像名', '按钮数', '覆盖数', '覆盖率'])
self.__table_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
index = 0
for item in cover_list:
self._set_widget_item(self.__table_widget, item.get("fileName"), index, 0, set_edit=False)
self._set_widget_item(self.__table_widget, int(item.get("sampleNum")), index, 1)
self._set_widget_item(self.__table_widget, int(item.get("coverNum")), index, 2)
self._set_widget_item(self.__table_widget, str(item.get("coverage")), index, 3)
index += 1
for i in range(self.__ui.graph_view_layout.count()):
item = self.__ui.graph_view_layout.itemAt(i)
item.widget().hide()
# self.__ui.horizontalLayout_4.addWidget(self.__table_widget)
self.__ui.graph_view_layout.addWidget(self.__table_widget)
def reset(self):
if self.__table_widget is not None:
self.__table_widget.deleteLater()
self.__table_widget = None
for i in range(self.__ui.graph_view_layout.count()):
item = self.__ui.graph_view_layout.itemAt(i)
item.widget().show()
| 38.960145
| 119
| 0.606993
|
21bb34d71827935ab5e4b0ac1772bf8d4a2868d8
| 629
|
py
|
Python
|
working-library/background/PictureAPI/views.py
|
FredCof/Fresh-supermarket-Online
|
25c1cb28f5b5dc1f85e53ee7de5b055de438c491
|
[
"Apache-2.0"
] | 2
|
2021-03-12T16:35:27.000Z
|
2021-03-12T16:35:34.000Z
|
working-library/background/PictureAPI/views.py
|
FredCof/Fresh-supermarket-Online
|
25c1cb28f5b5dc1f85e53ee7de5b055de438c491
|
[
"Apache-2.0"
] | 4
|
2021-03-19T13:16:08.000Z
|
2021-06-09T19:26:37.000Z
|
working-library/background/PictureAPI/views.py
|
FredCof/Fresh-supermarket-Online
|
25c1cb28f5b5dc1f85e53ee7de5b055de438c491
|
[
"Apache-2.0"
] | null | null | null |
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from filetype import filetype
from Fresh_market_online.model import User
from LoginAPI.token_module import get_token, out_token
import json
# Create your views here.
@csrf_exempt
def PictureGet(request):
size = request.POST.get("size")
url=request.path
end_pos = url.rfind('/') - 1
start_pos = url.rfind('/', 0, end_pos)
filename = url[start_pos + 1:]
#print(filename)
path = r'./static/goods_pic/' + filename
avatar = open(path, "rb")
return HttpResponse(avatar.read(), content_type='image/png')
| 29.952381
| 64
| 0.720191
|
7223cd135afe5a9e6e1c20fa22ccee0967d1d603
| 14,761
|
py
|
Python
|
games/stocks.py
|
jobyid/muzero-general
|
8b8b9a66f5a86ea5f7ab2041d3838dfac514a9db
|
[
"MIT"
] | null | null | null |
games/stocks.py
|
jobyid/muzero-general
|
8b8b9a66f5a86ea5f7ab2041d3838dfac514a9db
|
[
"MIT"
] | null | null | null |
games/stocks.py
|
jobyid/muzero-general
|
8b8b9a66f5a86ea5f7ab2041d3838dfac514a9db
|
[
"MIT"
] | null | null | null |
import datetime
import os
import numpy
import torch
from .abstract_game import AbstractGame
class MuZeroConfig:
def __init__(self):
# More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization
self.seed = 0 # Seed for numpy, torch and the game
self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available
### Game
self.observation_shape = (3, 6, 7) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = list(range(7)) # Fixed list of all possible actions. You should only edit the length
self.players = list(range(2)) # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
# Evaluate
self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)
self.opponent = "expert" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, "random" or "expert" if implemented in the Game class
### Self-Play
self.num_workers = 1 # Number of simultaneous threads/workers self-playing to feed the replay buffer
self.selfplay_on_gpu = False
self.max_moves = 42 # Maximum number of moves if game is not finished before
self.num_simulations = 200 # Number of future moves self-simulated
self.discount = 1 # Chronological discount of the reward
self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time
# Root prior exploration noise
self.root_dirichlet_alpha = 0.3
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "resnet" # "resnet" / "fullyconnected"
self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))
# Residual Network
self.downsample = False # Downsample observations before representation network, False / "CNN" (lighter) / "resnet" (See paper appendix Network Architecture)
self.blocks = 3 # Number of blocks in the ResNet
self.channels = 64 # Number of channels in the ResNet
self.reduced_channels_reward = 2 # Number of channels in reward head
self.reduced_channels_value = 2 # Number of channels in value head
self.reduced_channels_policy = 4 # Number of channels in policy head
self.resnet_fc_reward_layers = [64] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [64] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [64] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 32
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [64] # Define the hidden layers in the dynamics network
self.fc_reward_layers = [64] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
### Training
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_model = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = 100000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = 64 # Number of parts of games to train on at each training step
self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.005 # Initial learning rate
self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 10000
### Replay Buffer
self.replay_buffer_size = 10000 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 42 # Number of game moves to keep for every batch element
self.td_steps = 42 # Number of steps in the future to take into account for calculating the target value
self.PER = True # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network
self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
# Reanalyze (See paper appendix Reanalyse)
self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
self.reanalyse_on_gpu = False
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
return 1
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.env = Connect4()
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
observation, reward, done = self.env.step(action)
return observation, reward * 10, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return self.env.to_play()
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return self.env.legal_actions()
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
return self.env.reset()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
def human_to_action(self):
"""
For multiplayer games, ask the user for a legal action
and return the corresponding action number.
Returns:
An integer from the action space.
"""
choice = input(f"Enter the column to play for the player {self.to_play()}: ")
while choice not in [str(action) for action in self.legal_actions()]:
choice = input("Enter another column : ")
return int(choice)
def expert_agent(self):
"""
Hard coded agent that MuZero faces to assess his progress in multiplayer games.
It doesn't influence training
Returns:
Action as an integer to take in the current game state
"""
return self.env.expert_action()
def action_to_string(self, action_number):
"""
Convert an action number to a string representing the action.
Args:
action_number: an integer from the action space.
Returns:
String representing the action.
"""
return f"Play column {action_number + 1}"
class Stocks:
def __init__(self):
self.player = 1
def stockList(self):
print("stocks")
epics = []
return epics
def to_play(self):
return 1
def get_observation(self):
print('Observations')
def step(self):
print("Review rewards, then move to moves")
def legal_actions(self):
legal = []
return legal
def have_winner(self):
print('Winner')
def take_action(self):
print("Make the move")
class Connect4:
def __init__(self):
self.board = numpy.zeros((6, 7), dtype="int32")
self.player = 1
def to_play(self):
return 0 if self.player == 1 else 1
def reset(self):
self.board = numpy.zeros((6, 7), dtype="int32")
self.player = 1
return self.get_observation()
def step(self, action):
for i in range(6):
if self.board[i][action] == 0:
self.board[i][action] = self.player
break
done = self.have_winner() or len(self.legal_actions()) == 0
reward = 1 if self.have_winner() else 0
self.player *= -1
return self.get_observation(), reward, done
def get_observation(self):
board_player1 = numpy.where(self.board == 1, 1.0, 0.0)
board_player2 = numpy.where(self.board == -1, 1.0, 0.0)
board_to_play = numpy.full((6, 7), self.player, dtype="int32")
return numpy.array([board_player1, board_player2, board_to_play])
def legal_actions(self):
legal = []
for i in range(7):
if self.board[5][i] == 0:
legal.append(i)
return legal
def have_winner(self):
# Horizontal check
for i in range(4):
for j in range(6):
if (
self.board[j][i] == self.player
and self.board[j][i + 1] == self.player
and self.board[j][i + 2] == self.player
and self.board[j][i + 3] == self.player
):
return True
# Vertical check
for i in range(7):
for j in range(3):
if (
self.board[j][i] == self.player
and self.board[j + 1][i] == self.player
and self.board[j + 2][i] == self.player
and self.board[j + 3][i] == self.player
):
return True
# Positive diagonal check
for i in range(4):
for j in range(3):
if (
self.board[j][i] == self.player
and self.board[j + 1][i + 1] == self.player
and self.board[j + 2][i + 2] == self.player
and self.board[j + 3][i + 3] == self.player
):
return True
# Negative diagonal check
for i in range(4):
for j in range(3, 6):
if (
self.board[j][i] == self.player
and self.board[j - 1][i + 1] == self.player
and self.board[j - 2][i + 2] == self.player
and self.board[j - 3][i + 3] == self.player
):
return True
return False
def expert_action(self):
board = self.board
action = numpy.random.choice(self.legal_actions())
for k in range(3):
for l in range(4):
sub_board = board[k : k + 4, l : l + 4]
# Horizontal and vertical checks
for i in range(4):
if abs(sum(sub_board[i, :])) == 3:
ind = numpy.where(sub_board[i, :] == 0)[0][0]
if numpy.count_nonzero(board[:, ind + l]) == i + k:
action = ind + l
if self.player * sum(sub_board[i, :]) > 0:
return action
if abs(sum(sub_board[:, i])) == 3:
action = i + l
if self.player * sum(sub_board[:, i]) > 0:
return action
# Diagonal checks
diag = sub_board.diagonal()
anti_diag = numpy.fliplr(sub_board).diagonal()
if abs(sum(diag)) == 3:
ind = numpy.where(diag == 0)[0][0]
if numpy.count_nonzero(board[:, ind + l]) == ind + k:
action = ind + l
if self.player * sum(diag) > 0:
return action
if abs(sum(anti_diag)) == 3:
ind = numpy.where(anti_diag == 0)[0][0]
if numpy.count_nonzero(board[:, 3 - ind + l]) == ind + k:
action = 3 - ind + l
if self.player * sum(anti_diag) > 0:
return action
return action
def render(self):
print(self.board[::-1])
| 39.787062
| 244
| 0.590543
|
4e4e42b003eadda5673e6a9b32c8f6e73b584986
| 1,666
|
py
|
Python
|
setup.py
|
StackStorm/pyangbind
|
4fb5275e850c5174e67c4691d41cf6f1cd526c62
|
[
"Apache-2.0"
] | 1
|
2016-11-10T05:07:53.000Z
|
2016-11-10T05:07:53.000Z
|
setup.py
|
StackStorm/pyangbind
|
4fb5275e850c5174e67c4691d41cf6f1cd526c62
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
StackStorm/pyangbind
|
4fb5275e850c5174e67c4691d41cf6f1cd526c62
|
[
"Apache-2.0"
] | 3
|
2016-11-01T23:51:35.000Z
|
2018-05-23T10:09:08.000Z
|
from setuptools import setup, find_packages
from pip.req import parse_requirements
from codecs import open
from os import path
thisdir = path.abspath(path.dirname(__file__))
pip_reqs = parse_requirements(path.join(thisdir, "requirements.txt"), session=False)
inst_reqs = [str(ir.req) for ir in pip_reqs]
import pyangbind
with open(path.join(thisdir, "README.rst"), encoding='utf-8') as readme:
long_description = readme.read()
setup(
name='pyangbind-brcd',
# PyangBind uses the same versioning approach as OpenConfig - see
# http://www.openconfig.net/file-cabinet/Semantic_Versioning_for_OpenConfig.pdf?attredirects=0&d=1
version=pyangbind.__version__,
description="PyangBind is a plugin for pyang which converts YANG data" + \
"models into a Python class hierarchy, such that Python " + \
"can be used to manipulate data that conforms with a YANG" + \
" model.",
long_description=long_description,
url="https://github.com/StackStorm/pyangbind",
author="Rob Shakir",
author_email="rjs@rob.sh",
license="Apache",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Telecommunications Industry',
'Intended Audience :: Developers',
'Topic :: Software Development :: Code Generators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only'
],
include_package_data=True,
keywords="yang pyang",
packages=find_packages(exclude=['lib']),
install_requires=inst_reqs,
zip_safe = False,
)
| 34
| 102
| 0.683073
|
a1f4cd76aca96f41e904b53dce88276a60c944cd
| 3,201
|
py
|
Python
|
tests/compute/test_subgraph.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | 6
|
2020-04-27T16:31:53.000Z
|
2022-03-24T16:27:51.000Z
|
tests/compute/test_subgraph.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | null | null | null |
tests/compute/test_subgraph.py
|
vipermu/dgl
|
c9ac6c9889423019977e431c8b74a7b6c70cdc01
|
[
"Apache-2.0"
] | 4
|
2020-03-17T11:21:56.000Z
|
2020-07-02T09:42:24.000Z
|
import numpy as np
from dgl.graph import DGLGraph
import backend as F
D = 5
def generate_graph(grad=False, add_data=True):
g = DGLGraph()
g.add_nodes(10)
# create a graph where 0 is the source and 9 is the sink
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
if add_data:
ncol = F.randn((10, D))
ecol = F.randn((17, D))
if grad:
ncol = F.attach_grad(ncol)
ecol = F.attach_grad(ecol)
g.ndata['h'] = ncol
g.edata['l'] = ecol
return g
def test_basics1():
# Test when the graph has no node data and edge data.
g = generate_graph(add_data=False)
eid = [0, 2, 3, 6, 7, 9]
sg = g.edge_subgraph(eid)
sg.copy_from_parent()
sg.ndata['h'] = F.arange(0, sg.number_of_nodes())
sg.edata['h'] = F.arange(0, sg.number_of_edges())
def test_basics():
g = generate_graph()
h = g.ndata['h']
l = g.edata['l']
nid = [0, 2, 3, 6, 7, 9]
sg = g.subgraph(nid)
eid = {2, 3, 4, 5, 10, 11, 12, 13, 16}
assert set(F.zerocopy_to_numpy(sg.parent_eid)) == eid
eid = F.tensor(sg.parent_eid)
# the subgraph is empty initially
assert len(sg.ndata) == 0
assert len(sg.edata) == 0
# the data is copied after explict copy from
sg.copy_from_parent()
assert len(sg.ndata) == 1
assert len(sg.edata) == 1
sh = sg.ndata['h']
assert F.allclose(h[nid], sh)
'''
s, d, eid
0, 1, 0
1, 9, 1
0, 2, 2 1
2, 9, 3 1
0, 3, 4 1
3, 9, 5 1
0, 4, 6
4, 9, 7
0, 5, 8
5, 9, 9 3
0, 6, 10 1
6, 9, 11 1 3
0, 7, 12 1
7, 9, 13 1 3
0, 8, 14
8, 9, 15 3
9, 0, 16 1
'''
assert F.allclose(F.gather_row(l, eid), sg.edata['l'])
# update the node/edge features on the subgraph should NOT
# reflect to the parent graph.
sg.ndata['h'] = F.zeros((6, D))
assert F.allclose(h, g.ndata['h'])
def test_map_to_subgraph():
g = DGLGraph()
g.add_nodes(10)
g.add_edges(F.arange(0, 9), F.arange(1, 10))
h = g.subgraph([0, 1, 2, 5, 8])
v = h.map_to_subgraph_nid([0, 8, 2])
assert np.array_equal(F.asnumpy(v), np.array([0, 4, 2]))
def test_merge():
# FIXME: current impl cannot handle this case!!!
# comment out for now to test CI
return
"""
g = generate_graph()
g.set_n_repr({'h' : th.zeros((10, D))})
g.set_e_repr({'l' : th.zeros((17, D))})
# subgraphs
sg1 = g.subgraph([0, 2, 3, 6, 7, 9])
sg1.set_n_repr({'h' : th.ones((6, D))})
sg1.set_e_repr({'l' : th.ones((9, D))})
sg2 = g.subgraph([0, 2, 3, 4])
sg2.set_n_repr({'h' : th.ones((4, D)) * 2})
sg3 = g.subgraph([5, 6, 7, 8, 9])
sg3.set_e_repr({'l' : th.ones((4, D)) * 3})
g.merge([sg1, sg2, sg3])
h = g.ndata['h'][:,0]
l = g.edata['l'][:,0]
assert U.allclose(h, th.tensor([3., 0., 3., 3., 2., 0., 1., 1., 0., 1.]))
assert U.allclose(l,
th.tensor([0., 0., 1., 1., 1., 1., 0., 0., 0., 3., 1., 4., 1., 4., 0., 3., 1.]))
"""
if __name__ == '__main__':
test_basics()
test_basics1()
#test_merge()
| 26.89916
| 92
| 0.524836
|
9b92c0ec66d6bfd48f7c8e9b12af9c7587d1946c
| 139
|
py
|
Python
|
apps/terraform/terraform.py
|
brollin/knausj_talon
|
c7a6f3f5ab7c5696c9b137f8fdf03aae7e259250
|
[
"MIT"
] | 2
|
2021-12-01T03:25:48.000Z
|
2022-03-07T03:45:01.000Z
|
apps/terraform/terraform.py
|
brollin/knausj_talon
|
c7a6f3f5ab7c5696c9b137f8fdf03aae7e259250
|
[
"MIT"
] | 1
|
2022-03-26T15:27:18.000Z
|
2022-03-26T15:27:18.000Z
|
apps/terraform/terraform.py
|
brollin/knausj_talon
|
c7a6f3f5ab7c5696c9b137f8fdf03aae7e259250
|
[
"MIT"
] | null | null | null |
from talon import Module, Context
mod = Module()
mod.tag("terraform_client", desc="tag for enabling terraform commands in your terminal")
| 27.8
| 88
| 0.776978
|
b6de36e29a124e62901136e3dfe7672c3d9dabd4
| 2,967
|
py
|
Python
|
A3C/models/icm.py
|
sadeqa/Super-Mario-Bros-RL
|
e4d0a565359a16684d8617853bf4eb6f8cfc4721
|
[
"MIT"
] | 72
|
2019-01-19T13:45:08.000Z
|
2022-01-07T10:18:53.000Z
|
PPO/a2c_ppo_acktr/icm.py
|
wwxFromTju/Super-Mario-Bros-RL
|
e4d0a565359a16684d8617853bf4eb6f8cfc4721
|
[
"MIT"
] | 4
|
2019-05-03T16:36:23.000Z
|
2019-10-21T09:55:14.000Z
|
PPO/a2c_ppo_acktr/icm.py
|
wwxFromTju/Super-Mario-Bros-RL
|
e4d0a565359a16684d8617853bf4eb6f8cfc4721
|
[
"MIT"
] | 18
|
2019-01-24T23:11:16.000Z
|
2022-03-18T13:52:23.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalized_columns_initializer(weights, std=1.0):
out = torch.randn(weights.size())
out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))
return out
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class ICM(torch.nn.Module):
def __init__(self, num_inputs, num_actions):
super(ICM, self).__init__()
self.downsample = nn.AvgPool2d(2,2) #downsample to 42*42
self.conv1 = nn.Conv2d(num_inputs, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
lin1 = nn.Linear(288*2, 256)
lin2 = nn.Linear(256, num_actions)
lin3 = nn.Linear(num_actions + 288 , 256)
lin4 = nn.Linear(256, 288)
self.apply(weights_init)
lin1.weight.data = normalized_columns_initializer(
lin1.weight.data, 0.01)
lin1.bias.data.fill_(0)
lin2.weight.data = normalized_columns_initializer(
lin2.weight.data, 0.01)
lin2.bias.data.fill_(0)
lin3.weight.data = normalized_columns_initializer(
lin3.weight.data, 0.01)
lin3.bias.data.fill_(0)
lin4.weight.data = normalized_columns_initializer(
lin4.weight.data, 0.01)
lin4.bias.data.fill_(0)
self.inverse_model = nn.Sequential(lin1, nn.ReLU(), lin2)
self.forward_model = nn.Sequential(lin3, nn.ReLU(), lin4)
self.train()
def forward(self, inputs):
st, stp1, at = inputs
xt, xtp1 = self.downsample(st), self.downsample(stp1)
xt, xtp1 = F.elu(self.conv1(xt)), F.elu(self.conv1(xtp1))
xt, xtp1 = F.elu(self.conv2(xt)), F.elu(self.conv2(xtp1))
xt, xtp1 = F.elu(self.conv3(xt)), F.elu(self.conv3(xtp1))
xt, xtp1 = F.elu(self.conv4(xt)), F.elu(self.conv4(xtp1))
xt, xtp1 = xt.view(-1, 288), xtp1.view(-1, 288)
inverse_features, forward_features = torch.cat((xt, xtp1),1), torch.cat((xt, at),1)
return self.inverse_model(inverse_features), self.forward_model(forward_features), xtp1
| 38.038462
| 95
| 0.599259
|
2232a2020046bc7d8a4381bd6b87a72e2ba9da6e
| 227
|
py
|
Python
|
Python/BinomialCoefficient.py
|
patres270/AlgoLib
|
8b697e1e9348c559dcabdb6665e1031264c1032a
|
[
"MIT"
] | 19
|
2018-10-01T15:01:43.000Z
|
2022-03-01T01:28:23.000Z
|
Python/BinomialCoefficient.py
|
patres270/AlgoLib
|
8b697e1e9348c559dcabdb6665e1031264c1032a
|
[
"MIT"
] | 95
|
2018-10-01T14:43:45.000Z
|
2018-12-19T14:20:22.000Z
|
Python/BinomialCoefficient.py
|
patres270/AlgoLib
|
8b697e1e9348c559dcabdb6665e1031264c1032a
|
[
"MIT"
] | 93
|
2018-10-01T14:54:28.000Z
|
2020-10-02T08:51:29.000Z
|
""" computing binomial coefficient """
n = int(input("Enter the degree : "))
b = [[1 for j in range(i+1)] for i in range(n+1)]
for i in range(2,n+1):
for j in range(1,i):
b[i][j] = b[i-1][j] + b[i-1][j-1]
print(b)
| 25.222222
| 49
| 0.546256
|
5ffa2f46ffe8db80d0e2917311c0ad2ae4caf457
| 2,848
|
py
|
Python
|
src/vbLib/GetOSVersion.py
|
mehrdad-shokri/macro_pack
|
bcc39728ae70f99e95998cbb48a8beb9e7697031
|
[
"Apache-2.0"
] | 1,550
|
2017-10-04T13:29:15.000Z
|
2022-03-30T20:53:25.000Z
|
src/vbLib/GetOSVersion.py
|
mehrdad-shokri/macro_pack
|
bcc39728ae70f99e95998cbb48a8beb9e7697031
|
[
"Apache-2.0"
] | 75
|
2017-10-19T18:55:35.000Z
|
2022-02-16T20:17:33.000Z
|
src/vbLib/GetOSVersion.py
|
mehrdad-shokri/macro_pack
|
bcc39728ae70f99e95998cbb48a8beb9e7697031
|
[
"Apache-2.0"
] | 358
|
2017-10-07T21:16:02.000Z
|
2022-03-25T03:36:17.000Z
|
"""
Get the windows OS version
"""
r"""
#If VBA7 Then
Declare PtrSafe Function RtlGetVersion Lib "NTDLL" (ByRef lpVersionInformation As Long) As Long
#Else
Declare Function RtlGetVersion Lib "NTDLL" (ByRef lpVersionInformation As Long) As Long
#End If
Public Function GetOSVersion() As String
Dim tOSVw(&H54) As Long
tOSVw(0) = &H54 * &H4
Call RtlGetVersion(tOSVw(0))
'GetOSVersion = Join(Array(tOSVw(1), tOSVw(2), tOSVw(3)), ".")
GetOSVersion = VersionToName(Join(Array(tOSVw(1), tOSVw(2)), "."))
End Function
Private Function VersionToName(ByRef sVersion As String) As String
Select Case sVersion
Case "5.1": VersionToName = "Windows XP"
Case "5.3": VersionToName = "Windows 2003 (SERVER)"
Case "6.0": VersionToName = "Windows Vista"
Case "6.1": VersionToName = "Windows 7"
Case "6.2": VersionToName = "Windows 8"
Case "6.3": VersionToName = "Windows 8.1"
Case "10.0": VersionToName = "Windows 10"
Case Else: VersionToName = "Unknown"
End Select
End Function
"""
VBA = \
r'''
Public Function GetOSVersion() As String
Dim prodType As String
Dim version As String
Dim desktopProductType As String
desktopProductType = "1"
For Each objItem in GetObject("winmgmts://./root/cimv2").ExecQuery("Select * from Win32_OperatingSystem",,48)
version = objItem.Version
prodType = objItem.ProductType & ""
Next
Select Case Left(version, Instr(version, ".") + 1)
Case "10.0"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows 10"
Else
GetOSVersion = "Windows Server 2016"
End If
Case "6.3"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows 8.1"
Else
GetOSVersion = "Windows Server 2012 R2"
End If
Case "6.2"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows 8"
Else
GetOSVersion = "Windows Server 2012"
End If
Case "6.1"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows 7"
Else
GetOSVersion = "Windows Server 2008 R2"
End If
Case "6.0"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows Vista"
Else
GetOSVersion = "Windows Server 2008"
End If
Case "5.2"
If (prodType = desktopProductType) Then
GetOSVersion = "Windows XP 64-Bit Edition"
ElseIf (Left(Version, 5) = "5.2.3") Then
GetOSVersion = "Windows Server 2003 R2"
Else
GetOSVersion = "Windows Server 2003"
End If
Case "5.1"
GetOSVersion = "Windows XP"
Case "5.0"
GetOSVersion = "Windows 2000"
End Select
End Function
'''
| 29.360825
| 113
| 0.607093
|
7fdbdd0f49f4d6819a63470845c2d579b7974277
| 3,259
|
py
|
Python
|
src/mano-framework/plugins/service-lifecycle-manager/slm/main.py
|
CN-UPB/Cloud-NFV-Orchestration
|
28a6852f529ac73fe28f4448597f455b2d2fe552
|
[
"Apache-2.0"
] | 10
|
2019-01-09T06:32:58.000Z
|
2021-11-16T11:36:22.000Z
|
src/mano-framework/plugins/service-lifecycle-manager/slm/main.py
|
CN-UPB/Cloud-NFV-Orchestration
|
28a6852f529ac73fe28f4448597f455b2d2fe552
|
[
"Apache-2.0"
] | 14
|
2019-11-13T06:51:51.000Z
|
2021-12-09T02:01:29.000Z
|
src/mano-framework/plugins/service-lifecycle-manager/slm/main.py
|
CN-UPB/Cloud-NFV-Orchestration
|
28a6852f529ac73fe28f4448597f455b2d2fe552
|
[
"Apache-2.0"
] | 7
|
2019-02-06T05:46:56.000Z
|
2021-08-21T13:56:07.000Z
|
import logging
from typing import Dict
from appcfg import get_config
from mongoengine import DoesNotExist, connect
from manobase.messaging import Message
from manobase.plugin import ManoBasePlugin
from slm import version
from slm.exceptions import (
DeployRequestValidationError,
InstantiationError,
TerminationError,
)
from slm.slm import ServiceLifecycleManager
from slm.util import create_status_message
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
config = get_config(__name__)
MONGO_HOST = config["mongo"]
class ServiceLifecycleManagerPlugin(ManoBasePlugin):
"""
Service Lifecycle Manager main class. Instantiate this class to run the SLM.
"""
def __init__(self, *args, **kwargs):
# Connect to MongoDB
LOG.debug(f"Connecting to MongoDB at {MONGO_HOST}")
connect(host=MONGO_HOST)
LOG.info("Connected to MongoDB")
# Map service ids to ServiceLifecycleManager instances
self.managers: Dict[str, ServiceLifecycleManager] = {}
super().__init__(*args, version=version, **kwargs)
def declare_subscriptions(self):
super().declare_subscriptions()
self.conn.register_async_endpoint(
self.on_service_instance_create, "service.instances.create"
)
self.conn.register_async_endpoint(
self.on_service_instance_terminate, "service.instance.terminate"
)
def on_lifecycle_start(self, message: Message):
super().on_lifecycle_start(message)
LOG.info("SLM started and operational.")
async def on_service_instance_create(self, message: Message):
"""
Instantiate a service
"""
try:
manager = ServiceLifecycleManager.from_deploy_request(message, self.conn)
self.managers[manager.service_id] = manager
# Notify gatekeeper
self.conn.notify(
message.topic,
create_status_message(status="INSTANTIATING"),
correlation_id=message.correlation_id,
)
await manager.instantiate()
return create_status_message(
status="READY", payload={"nsr": {"id": manager.service_id}}
)
except (DeployRequestValidationError, InstantiationError) as e:
return create_status_message(error=e)
async def on_service_instance_terminate(self, message: Message):
"""
Destory a service instance
"""
service_id = message.payload["instance_id"]
try:
if service_id in self.managers:
manager = self.managers[service_id]
else:
try:
manager = ServiceLifecycleManager.from_database(
service_id, self.conn
)
except DoesNotExist:
raise TerminationError(
f"A service with instance id {service_id} is not known to the SLM"
)
await manager.terminate()
return create_status_message(status="TERMINATED")
except TerminationError as e:
return create_status_message(error=e)
| 31.038095
| 90
| 0.641608
|
10646d9ee5a6e27d44aec1d79c0edb48a9fe510a
| 4,253
|
py
|
Python
|
loss_functions.py
|
mmonfort/moments_models
|
c233603fd47625354ad66c4e89521e21206f4980
|
[
"BSD-2-Clause"
] | 272
|
2018-01-09T05:30:18.000Z
|
2020-03-03T12:30:45.000Z
|
loss_functions.py
|
mmonfort/moments_models
|
c233603fd47625354ad66c4e89521e21206f4980
|
[
"BSD-2-Clause"
] | 20
|
2018-01-11T05:52:51.000Z
|
2020-02-12T05:47:34.000Z
|
loss_functions.py
|
mmonfort/moments_models
|
c233603fd47625354ad66c4e89521e21206f4980
|
[
"BSD-2-Clause"
] | 62
|
2018-01-10T10:51:18.000Z
|
2020-03-04T17:31:43.000Z
|
'''
-- Our loss implementations for wlsep, lsep, warp, bp_mll and bce with optional weighted learning
--
-- If you use these implementations in your paper please cite our paper https://arxiv.org/abs/1911.00232
--
-- scores is the output of the model
-- labels is a binary vector with a 1 indicating a positive class for each batch member
-- Both scores and labels have size BxC with B = batch size and C = number of classes
-- weights is an optional tensor of size C used to weight the learning for unbalanced training sets
-- We used w_i = min(count)/count_i for the weights to train the Multi-Moments model where
count_i is the number of examples in the training set with a positive label for class i
and min(count) is the number of examples with a positive label for the least common class.
--
-- By Mathew Monfort, mmonfort@mit.edu
'''
import torch
from torch.nn import functional as F
# https://arxiv.org/abs/1911.00232
def wlsep(scores, labels, weights=None):
mask = ((labels.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)) -
labels.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1))) > 0).float()
diffs = (scores.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1)) -
scores.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)))
if weights is not None:
return F.pad(diffs.add(-(1-mask)*1e10),
pad=(0,0,0,1)).logsumexp(dim=1).mul(weights).masked_select(labels.bool()).mean()
else:
return F.pad(diffs.add(-(1-mask)*1e10),
pad=(0,0,0,1)).logsumexp(dim=1).masked_select(labels.bool()).mean()
# http://openaccess.thecvf.com/content_cvpr_2017/html/Li_Improving_Pairwise_Ranking_CVPR_2017_paper.html
def lsep(scores, labels, weights=None):
mask = ((labels.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)) -
labels.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1))) > 0).float()
diffs = (scores.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1)) -
scores.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)))
return diffs.exp().mul(mask).sum().add(1).log().mean()
""" https://www.aaai.org/ocs/index.php/IJCAI/IJCAI11/paper/viewPaper/2926
We pre-compute the rank weights (rank_w) into a tensor as below:
rank_w = torch.zeros(num_classes)
sum = 0.
for i in range(num_classes):
sum += 1./(i+1)
rank_w[i] = sum
"""
def warp(scores, labels, rank_w, weights=None):
mask = ((labels.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)) -
labels.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1))) > 0).float()
diffs = (scores.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1)) -
scores.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1))).add(1)
if weights is not None:
return (diffs.clamp(0,1e10).mul(mask).sum(1).div(mask.sum(1)).mul(weights).masked_select(labels.bool())
.mul(rank_w.index_select(0,scores.sort(descending=True)[1].masked_select(labels.bool()))).mean())
else:
return (diffs.clamp(0,1e10).mul(mask).sum(1).div(mask.sum(1)).masked_select(labels.bool())
.mul(rank_w.index_select(0,scores.sort(descending=True)[1].masked_select(labels.bool()))).mean())
#https://ieeexplore.ieee.org/abstract/document/1683770
def bp_mll(scores, labels, weights=None):
mask = ((labels.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)) -
labels.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1))) > 0).float()
diffs = (scores.unsqueeze(2).expand(labels.size(0), labels.size(1), labels.size(1)) -
scores.unsqueeze(1).expand(labels.size(0), labels.size(1), labels.size(1)))
if weights is not None:
return diffs.exp().mul(mask).sum(1).mul(weights).sum(1).mean()
else:
return diffs.exp().mul(mask).sum(1).sum(1).mean()
def bce(output, labels, weights=None):
if weights is not None:
return (((1.-weights)*labels + weights*(1.-labels))*
bceCriterion(output, torch.autograd.Variable(labels))).sum(1).mean()
else:
return bceCriterion(output, torch.autograd.Variable(labels)).sum(1).mean()
| 54.525641
| 114
| 0.685163
|
e56c4a9ddb4b3ee812eab414620aad63f2b0dc9a
| 799
|
py
|
Python
|
app/urls.py
|
Sonray/The-share-IP3
|
3783ea73736e8c7a1e3962eb82d0d2a77e92e293
|
[
"MIT"
] | null | null | null |
app/urls.py
|
Sonray/The-share-IP3
|
3783ea73736e8c7a1e3962eb82d0d2a77e92e293
|
[
"MIT"
] | null | null | null |
app/urls.py
|
Sonray/The-share-IP3
|
3783ea73736e8c7a1e3962eb82d0d2a77e92e293
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url('^$',views.home, name = 'home'),
url(r'^profile/(\w+)',views.profile,name = 'profile'),
url(r'^accounts/profile/$',views.home,name = 'home'),
url(r'^new/project$',views.new_project,name = 'new_project'),
url(r'^search/',views.search,name='search'),
url(r'^project/(\w+)',views.project,name='project'),
url(r'^rate/(\d+)',views.rate,name='rate'),
url(r'^api/profile/$', views.ProfApi.as_view()),
url(r'^api/project/$', views.ProjApi.as_view()),
url(r'^new/profile$',views.new_profile,name = 'new_profile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 42.052632
| 81
| 0.670839
|
c840b8429f01edc0ddfd5c0cfcdf0dc8485c5b9f
| 10,891
|
py
|
Python
|
models/vqvae.py
|
lisiyao21/Bailando
|
125b529b3c596b58b9d70f22c05dc3f04a01895c
|
[
"MIT"
] | 31
|
2022-03-21T11:46:33.000Z
|
2022-03-31T17:59:21.000Z
|
models/vqvae.py
|
lisiyao21/Bailando
|
125b529b3c596b58b9d70f22c05dc3f04a01895c
|
[
"MIT"
] | 1
|
2022-03-27T08:44:32.000Z
|
2022-03-28T02:01:55.000Z
|
models/vqvae.py
|
lisiyao21/Bailando
|
125b529b3c596b58b9d70f22c05dc3f04a01895c
|
[
"MIT"
] | 2
|
2022-03-28T09:35:03.000Z
|
2022-03-31T18:18:36.000Z
|
import numpy as np
import torch as t
import torch.nn as nn
from .encdec import Encoder, Decoder, assert_shape
from .bottleneck import NoBottleneck, Bottleneck
from .utils.logger import average_metrics
# from .utils.audio_utils import audio_postprocess
def dont_update(params):
for param in params:
param.requires_grad = False
def update(params):
for param in params:
param.requires_grad = True
def calculate_strides(strides, downs):
return [stride ** down for stride, down in zip(strides, downs)]
# def _loss_fn(loss_fn, x_target, x_pred, hps):
# if loss_fn == 'l1':
# return t.mean(t.abs(x_pred - x_target)) / hps.bandwidth['l1']
# elif loss_fn == 'l2':
# return t.mean((x_pred - x_target) ** 2) / hps.bandwidth['l2']
# elif loss_fn == 'linf':
# residual = ((x_pred - x_target) ** 2).reshape(x_target.shape[0], -1)
# values, _ = t.topk(residual, hps.linf_k, dim=1)
# return t.mean(values) / hps.bandwidth['l2']
# elif loss_fn == 'lmix':
# loss = 0.0
# if hps.lmix_l1:
# loss += hps.lmix_l1 * _loss_fn('l1', x_target, x_pred, hps)
# if hps.lmix_l2:
# loss += hps.lmix_l2 * _loss_fn('l2', x_target, x_pred, hps)
# if hps.lmix_linf:
# loss += hps.lmix_linf * _loss_fn('linf', x_target, x_pred, hps)
# return loss
# else:
# assert False, f"Unknown loss_fn {loss_fn}"
def _loss_fn(x_target, x_pred):
return t.mean(t.abs(x_pred - x_target))
class VQVAE(nn.Module):
def __init__(self, hps, input_dim=72):
super().__init__()
self.hps = hps
input_shape = (hps.sample_length, input_dim)
levels = hps.levels
downs_t = hps.downs_t
strides_t = hps.strides_t
emb_width = hps.emb_width
l_bins = hps.l_bins
mu = hps.l_mu
commit = hps.commit
# spectral = hps.spectral
# multispectral = hps.multispectral
multipliers = hps.hvqvae_multipliers
use_bottleneck = hps.use_bottleneck
if use_bottleneck:
print('We use bottleneck!')
else:
print('We do not use bottleneck!')
if not hasattr(hps, 'dilation_cycle'):
hps.dilation_cycle = None
block_kwargs = dict(width=hps.width, depth=hps.depth, m_conv=hps.m_conv, \
dilation_growth_rate=hps.dilation_growth_rate, \
dilation_cycle=hps.dilation_cycle, \
reverse_decoder_dilation=hps.vqvae_reverse_decoder_dilation)
self.sample_length = input_shape[0]
x_shape, x_channels = input_shape[:-1], input_shape[-1]
self.x_shape = x_shape
self.downsamples = calculate_strides(strides_t, downs_t)
self.hop_lengths = np.cumprod(self.downsamples)
self.z_shapes = z_shapes = [(x_shape[0] // self.hop_lengths[level],) for level in range(levels)]
self.levels = levels
if multipliers is None:
self.multipliers = [1] * levels
else:
assert len(multipliers) == levels, "Invalid number of multipliers"
self.multipliers = multipliers
def _block_kwargs(level):
this_block_kwargs = dict(block_kwargs)
this_block_kwargs["width"] *= self.multipliers[level]
this_block_kwargs["depth"] *= self.multipliers[level]
return this_block_kwargs
encoder = lambda level: Encoder(x_channels, emb_width, level + 1,
downs_t[:level+1], strides_t[:level+1], **_block_kwargs(level))
decoder = lambda level: Decoder(x_channels, emb_width, level + 1,
downs_t[:level+1], strides_t[:level+1], **_block_kwargs(level))
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
for level in range(levels):
self.encoders.append(encoder(level))
self.decoders.append(decoder(level))
if use_bottleneck:
self.bottleneck = Bottleneck(l_bins, emb_width, mu, levels)
else:
self.bottleneck = NoBottleneck(levels)
self.downs_t = downs_t
self.strides_t = strides_t
self.l_bins = l_bins
self.commit = commit
self.reg = hps.reg if hasattr(hps, 'reg') else 0
self.acc = hps.acc if hasattr(hps, 'acc') else 0
self.vel = hps.vel if hasattr(hps, 'vel') else 0
if self.reg is 0:
print('No motion regularization!')
# self.spectral = spectral
# self.multispectral = multispectral
def preprocess(self, x):
# x: NTC [-1,1] -> NCT [-1,1]
assert len(x.shape) == 3
x = x.permute(0,2,1).float()
return x
def postprocess(self, x):
# x: NTC [-1,1] <- NCT [-1,1]
x = x.permute(0,2,1)
return x
def _decode(self, zs, start_level=0, end_level=None):
# Decode
if end_level is None:
end_level = self.levels
assert len(zs) == end_level - start_level
xs_quantised = self.bottleneck.decode(zs, start_level=start_level, end_level=end_level)
assert len(xs_quantised) == end_level - start_level
# Use only lowest level
decoder, x_quantised = self.decoders[start_level], xs_quantised[0:1]
x_out = decoder(x_quantised, all_levels=False)
x_out = self.postprocess(x_out)
return x_out
def decode(self, zs, start_level=0, end_level=None, bs_chunks=1):
z_chunks = [t.chunk(z, bs_chunks, dim=0) for z in zs]
x_outs = []
for i in range(bs_chunks):
zs_i = [z_chunk[i] for z_chunk in z_chunks]
x_out = self._decode(zs_i, start_level=start_level, end_level=end_level)
x_outs.append(x_out)
return t.cat(x_outs, dim=0)
def _encode(self, x, start_level=0, end_level=None):
# Encode
if end_level is None:
end_level = self.levels
x_in = self.preprocess(x)
xs = []
for level in range(self.levels):
encoder = self.encoders[level]
x_out = encoder(x_in)
xs.append(x_out[-1])
zs = self.bottleneck.encode(xs)
return zs[start_level:end_level]
def encode(self, x, start_level=0, end_level=None, bs_chunks=1):
x_chunks = t.chunk(x, bs_chunks, dim=0)
zs_list = []
for x_i in x_chunks:
zs_i = self._encode(x_i, start_level=start_level, end_level=end_level)
zs_list.append(zs_i)
zs = [t.cat(zs_level_list, dim=0) for zs_level_list in zip(*zs_list)]
return zs
def sample(self, n_samples):
zs = [t.randint(0, self.l_bins, size=(n_samples, *z_shape), device='cuda') for z_shape in self.z_shapes]
return self.decode(zs)
def forward(self, x):
metrics = {}
N = x.shape[0]
# Encode/Decode
x_in = self.preprocess(x)
xs = []
for level in range(self.levels):
encoder = self.encoders[level]
x_out = encoder(x_in)
xs.append(x_out[-1])
zs, xs_quantised, commit_losses, quantiser_metrics = self.bottleneck(xs)
x_outs = []
for level in range(self.levels):
decoder = self.decoders[level]
x_out = decoder(xs_quantised[level:level+1], all_levels=False)
assert_shape(x_out, x_in.shape)
x_outs.append(x_out)
# Loss
# def _spectral_loss(x_target, x_out, self.hps):
# if hps.use_nonrelative_specloss:
# sl = spectral_loss(x_target, x_out, self.hps) / hps.bandwidth['spec']
# else:
# sl = spectral_convergence(x_target, x_out, self.hps)
# sl = t.mean(sl)
# return sl
# def _multispectral_loss(x_target, x_out, self.hps):
# sl = multispectral_loss(x_target, x_out, self.hps) / hps.bandwidth['spec']
# sl = t.mean(sl)
# return sl
recons_loss = t.zeros(()).to(x.device)
regularization = t.zeros(()).to(x.device)
velocity_loss = t.zeros(()).to(x.device)
acceleration_loss = t.zeros(()).to(x.device)
# spec_loss = t.zeros(()).to(x.device)
# multispec_loss = t.zeros(()).to(x.device)
# x_target = audio_postprocess(x.float(), self.hps)
x_target = x.float()
for level in reversed(range(self.levels)):
x_out = self.postprocess(x_outs[level])
# x_out = audio_postprocess(x_out, self.hps)
# this_recons_loss = _loss_fn(loss_fn, x_target, x_out, hps)
this_recons_loss = _loss_fn(x_target, x_out)
# this_spec_loss = _spectral_loss(x_target, x_out, hps)
# this_multispec_loss = _multispectral_loss(x_target, x_out, hps)
metrics[f'recons_loss_l{level + 1}'] = this_recons_loss
# metrics[f'spectral_loss_l{level + 1}'] = this_spec_loss
# metrics[f'multispectral_loss_l{level + 1}'] = this_multispec_loss
recons_loss += this_recons_loss
# spec_loss += this_spec_loss
# multispec_loss += this_multispec_loss
regularization += t.mean((x_out[:, 2:] + x_out[:, :-2] - 2 * x_out[:, 1:-1])**2)
velocity_loss += _loss_fn( x_out[:, 1:] - x_out[:, :-1], x_target[:, 1:] - x_target[:, :-1])
acceleration_loss += _loss_fn(x_out[:, 2:] + x_out[:, :-2] - 2 * x_out[:, 1:-1], x_target[:, 2:] + x_target[:, :-2] - 2 * x_target[:, 1:-1])
# if not hasattr(self.)
commit_loss = sum(commit_losses)
# loss = recons_loss + self.spectral * spec_loss + self.multispectral * multispec_loss + self.commit * commit_loss
loss = recons_loss + commit_loss * self.commit + self.reg * regularization + self.vel * velocity_loss + self.acc * acceleration_loss
with t.no_grad():
# sc = t.mean(spectral_convergence(x_target, x_out, hps))
# l2_loss = _loss_fn("l2", x_target, x_out, hps)
l1_loss = _loss_fn(x_target, x_out)
# linf_loss = _loss_fn("linf", x_target, x_out, hps)
quantiser_metrics = average_metrics(quantiser_metrics)
metrics.update(dict(
recons_loss=recons_loss,
# spectral_loss=spec_loss,
# multispectral_loss=multispec_loss,
# spectral_convergence=sc,
# l2_loss=l2_loss,
l1_loss=l1_loss,
# linf_loss=linf_loss,
commit_loss=commit_loss,
regularization=regularization,
velocity_loss=velocity_loss,
acceleration_loss=acceleration_loss,
**quantiser_metrics))
for key, val in metrics.items():
metrics[key] = val.detach()
return x_out, loss, metrics
| 39.748175
| 153
| 0.59214
|
e006bc80a0e6d8d9c5552ca644bac856cb5ce63e
| 1,967
|
py
|
Python
|
hooks/clang_tidy.py
|
rambo/pre-commit-hooks
|
4c6731ed39da9fe55d0d671d51ddd06123e933fc
|
[
"Apache-2.0"
] | null | null | null |
hooks/clang_tidy.py
|
rambo/pre-commit-hooks
|
4c6731ed39da9fe55d0d671d51ddd06123e933fc
|
[
"Apache-2.0"
] | null | null | null |
hooks/clang_tidy.py
|
rambo/pre-commit-hooks
|
4c6731ed39da9fe55d0d671d51ddd06123e933fc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Wrapper script for clang-tidy."""
#############################################################################
import re
import sys
from hooks.utils import ClangAnalyzerCmd
class ClangTidyCmd(ClangAnalyzerCmd):
"""Class for the clang-tidy command."""
command = "clang-tidy"
lookbehind = "LLVM version "
def __init__(self, args):
super().__init__(self.command, self.lookbehind, args)
self.parse_args(args)
self.edit_in_place = "-fix" in self.args or "--fix-errors" in self.args
self.parse_ddash_args()
# If a compilation database is not used, suppress errors
if "-p" not in self.args:
self.add_if_missing(["--", "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON"])
# Enable all of the checks
self.add_if_missing(["-checks=*"])
def run(self):
"""Run clang-tidy"""
for filename in self.files:
self.run_command(filename)
sys.stdout.write(self.stdout)
# The number of warnings depends on errors in system files
self.stderr = re.sub(r"\d+ warnings and ", "", self.stderr)
# Don't output stderr if it's complaining about problems in system files
no_sysfile_warning = "non-user code" not in self.stderr
# On good clang-tidy checks, it will spew warnings to stderr
if len(self.stdout) > 0 and no_sysfile_warning:
sys.stderr.write(self.stderr)
else:
self.stderr = ""
has_errors = (
"error generated." in self.stderr
or "errors generated." in self.stderr
)
if has_errors: # Change return code if errors are generated
self.returncode = 1
if self.returncode != 0:
sys.exit(self.returncode)
def main(argv=None):
cmd = ClangTidyCmd(argv)
cmd.run()
if __name__ == "__main__":
main()
| 33.913793
| 84
| 0.571429
|
385e03aad268c5cd837d14c1e19b928c009963cf
| 594
|
py
|
Python
|
django_user_agents/templatetags/user_agents.py
|
avallbona/django-user_agents
|
b1fa72ee9f9a2ebff74abfb33b7ec6c7fd224554
|
[
"MIT"
] | 547
|
2015-01-13T15:01:39.000Z
|
2022-03-27T08:35:43.000Z
|
django_user_agents/templatetags/user_agents.py
|
avallbona/django-user_agents
|
b1fa72ee9f9a2ebff74abfb33b7ec6c7fd224554
|
[
"MIT"
] | 34
|
2015-04-21T00:27:59.000Z
|
2022-03-22T06:28:34.000Z
|
django_user_agents/templatetags/user_agents.py
|
avallbona/django-user_agents
|
b1fa72ee9f9a2ebff74abfb33b7ec6c7fd224554
|
[
"MIT"
] | 108
|
2015-01-29T07:37:11.000Z
|
2022-03-24T17:03:40.000Z
|
from django import template
from ..utils import get_and_set_user_agent
register = template.Library()
@register.filter()
def is_mobile(request):
return get_and_set_user_agent(request).is_mobile
@register.filter()
def is_pc(request):
return get_and_set_user_agent(request).is_pc
@register.filter()
def is_tablet(request):
return get_and_set_user_agent(request).is_tablet
@register.filter()
def is_bot(request):
return get_and_set_user_agent(request).is_bot
@register.filter()
def is_touch_capable(request):
return get_and_set_user_agent(request).is_touch_capable
| 18.5625
| 59
| 0.789562
|
a0b8bd638c37d51d1305cb3f6d7cb6beed8bab9a
| 2,532
|
py
|
Python
|
src/visualization/generate_notebook_outputs.py
|
jolfr/capstone-03
|
3095465170ea0e7e4947cff9edb012344e6a9f05
|
[
"MIT"
] | 1
|
2021-12-29T04:32:30.000Z
|
2021-12-29T04:32:30.000Z
|
src/visualization/generate_notebook_outputs.py
|
jolfr/capstone-03
|
3095465170ea0e7e4947cff9edb012344e6a9f05
|
[
"MIT"
] | null | null | null |
src/visualization/generate_notebook_outputs.py
|
jolfr/capstone-03
|
3095465170ea0e7e4947cff9edb012344e6a9f05
|
[
"MIT"
] | null | null | null |
import os
import nbformat
from traitlets.config import Config
from nbconvert import MarkdownExporter
from nbconvert.writers import FilesWriter
notebookDir = r'C:\Users\thoma\projects\datascience\capstone-03\notebooks'
outputDir = r'C:\Users\thoma\projects\datascience\jupyter-presentation-framework\src\notebooks'
def getsectionParams(sec):
sec = sec.replace('-', ' ').replace('_', ' ')
sec = sec.split(' ')
num = sec.pop(0)
newsec = ''
for word in sec:
newsec = newsec + ' ' + word
sec = newsec.lstrip()
return num, sec
def getnotebookparams(nb):
nb = nb.replace('-', ' ').replace('_', ' ').replace('.ipynb', '')
nb = nb.split(' ')
num = nb.pop(0).split('.')[1]
newnb = ''
for word in nb:
newnb = newnb + ' ' + word
nb = newnb.lstrip()
return num, nb
for entry in os.scandir(notebookDir):
if not entry.name.startswith('.'):
for notebook in os.scandir(entry.path):
if (not notebook.name.startswith('.')) and notebook.path.endswith('.ipynb'):
nb_node = nbformat.read(notebook.path, nbformat.NO_CONVERT)
me = MarkdownExporter()
(output, resource) = me.from_notebook_node(nb_node)
c = Config()
c.FilesWriter.build_directory = outputDir
fw = FilesWriter(config=c)
(secNum, section) = getsectionParams(entry.name)
secNum = 'secid : ' + secNum + '\n'
section = 'section : "' + section + '"\n'
(nbNum, title) = getnotebookparams(notebook.name)
nbNum = 'nbid : ' + nbNum + '\n'
title = 'title: "' + title + '"\n'
header = '---\n' + secNum + nbNum + section + title + '---\n'
fw.write(header + output, resource, notebook_name=notebook.name)
elif notebook.name == 'README.md':
with open(notebook.path, 'r', encoding='utf-8') as input_file:
text = input_file.read()
(secNum, section) = getsectionParams(entry.name)
section = 'section : "' + section + '"\n'
secNum = 'secid : ' + secNum + '\n'
title = 'title: readme\n'
header = '---\n' + secNum + section + title + '---\n'
newText = header + text
with open(outputDir + "\\" + entry.name + '.md', 'w', encoding='utf-8') as output_file:
output_file.write(newText)
| 40.83871
| 103
| 0.539494
|
8d27779cb44626964c688e5a003fbdf508ace3d2
| 3,789
|
py
|
Python
|
demo/demo_youtop.py
|
donglaiw/detectron2
|
35efa63d8c6b5a3d3f06c5ee216aef83a8c958e2
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_youtop.py
|
donglaiw/detectron2
|
35efa63d8c6b5a3d3f06c5ee216aef83a8c958e2
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_youtop.py
|
donglaiw/detectron2
|
35efa63d8c6b5a3d3f06c5ee216aef83a8c958e2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import multiprocessing as mp
import os
import time
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "COCO detections"
# python demo/demo_dw.py --config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml --input-template frame/image_%05d.png --input-index 1,25 --output seg/_s%05d.png --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
if 'pointrend' in args.config_file:
import sys; sys.path.insert(1, "/n/pfister_lab2/Lab/donglai/lib/pipeline/detectron2/projects/PointRend")
import point_rend
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--input-template",
help="input filename template. e.g., directory/%05d.png",
)
parser.add_argument(
"--input-index",
help="A list of comma separated input image index; ",
)
parser.add_argument(
"--output-template",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
input_indices = [int(x) for x in args.input_index.split(',')]
output_folder = os.path.dirname(args.output_template)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for index in tqdm.tqdm(input_indices):
# use PIL, to be consistent with evaluation
output_name = args.output_template % index
if not os.path.exists(output_name):
path = args.input_template % index
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img, mask_only=True)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
visualized_output.save(output_name)
| 34.445455
| 295
| 0.66825
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.