gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""Test the Bond config flow."""
from __future__ import annotations
from http import HTTPStatus
from typing import Any
from unittest.mock import MagicMock, Mock, patch
from aiohttp import ClientConnectionError, ClientResponseError
from homeassistant import config_entries, core
from homeassistant.components import zeroconf
from homeassistant.components.bond.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from .common import (
patch_bond_bridge,
patch_bond_device,
patch_bond_device_ids,
patch_bond_device_properties,
patch_bond_token,
patch_bond_version,
)
from tests.common import MockConfigEntry
async def test_user_form(hass: core.HomeAssistant):
"""Test we get the user initiated form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(
return_value={"bondid": "test-bond-id"}
), patch_bond_device_ids(
return_value=["f6776c11", "f6776c12"]
), patch_bond_bridge(), patch_bond_device_properties(), patch_bond_device(), _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "bond-name"
assert result2["data"] == {
CONF_HOST: "some host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_with_non_bridge(hass: core.HomeAssistant):
"""Test setup a smart by bond fan."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(
return_value={"bondid": "test-bond-id"}
), patch_bond_device_ids(
return_value=["f6776c11"]
), patch_bond_device_properties(), patch_bond_device(
return_value={
"name": "New Fan",
}
), patch_bond_bridge(
return_value={}
), _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "New Fan"
assert result2["data"] == {
CONF_HOST: "some host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_invalid_auth(hass: core.HomeAssistant):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"bond_id": "test-bond-id"}
), patch_bond_bridge(), patch_bond_device_ids(
side_effect=ClientResponseError(Mock(), Mock(), status=401),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_form_cannot_connect(hass: core.HomeAssistant):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
side_effect=ClientConnectionError()
), patch_bond_bridge(), patch_bond_device_ids():
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_user_form_old_firmware(hass: core.HomeAssistant):
"""Test we handle unsupported old firmware."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"no_bond_id": "present"}
), patch_bond_bridge(), patch_bond_device_ids():
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "old_firmware"}
async def test_user_form_unexpected_client_error(hass: core.HomeAssistant):
"""Test we handle unexpected client error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_USER,
user_input={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
error=ClientResponseError(Mock(), Mock(), status=500),
)
async def test_user_form_unexpected_error(hass: core.HomeAssistant):
"""Test we handle unexpected error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_USER,
user_input={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
error=Exception(),
)
async def test_user_form_one_entry_per_device_allowed(hass: core.HomeAssistant):
"""Test that only one entry allowed per unique ID reported by Bond hub device."""
MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"bondid": "already-registered-bond-id"}
), patch_bond_bridge(), patch_bond_device_ids(), _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
async def test_zeroconf_form(hass: core.HomeAssistant):
"""Test we get the discovery form."""
with patch_bond_version(), patch_bond_token():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="test-host",
hostname="mock_hostname",
name="test-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(
return_value={"bondid": "test-bond-id"}
), patch_bond_bridge(), patch_bond_device_ids(), _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "bond-name"
assert result2["data"] == {
CONF_HOST: "test-host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form_token_unavailable(hass: core.HomeAssistant):
"""Test we get the discovery form and we handle the token being unavailable."""
with patch_bond_version(), patch_bond_token():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="test-host",
hostname="mock_hostname",
name="test-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(), patch_bond_bridge(), patch_bond_device_ids(), _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "bond-name"
assert result2["data"] == {
CONF_HOST: "test-host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form_with_token_available(hass: core.HomeAssistant):
"""Test we get the discovery form when we can get the token."""
with patch_bond_version(return_value={"bondid": "test-bond-id"}), patch_bond_token(
return_value={"token": "discovered-token"}
), patch_bond_bridge(
return_value={"name": "discovered-name"}
), patch_bond_device_ids():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="test-host",
hostname="mock_hostname",
name="test-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["errors"] == {}
with _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "discovered-name"
assert result2["data"] == {
CONF_HOST: "test-host",
CONF_ACCESS_TOKEN: "discovered-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form_with_token_available_name_unavailable(
hass: core.HomeAssistant,
):
"""Test we get the discovery form when we can get the token but the name is unavailable."""
with patch_bond_version(
side_effect=ClientResponseError(Mock(), (), status=HTTPStatus.BAD_REQUEST)
), patch_bond_token(return_value={"token": "discovered-token"}):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="test-host",
hostname="mock_hostname",
name="test-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["errors"] == {}
with _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-bond-id"
assert result2["data"] == {
CONF_HOST: "test-host",
CONF_ACCESS_TOKEN: "discovered-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured(hass: core.HomeAssistant):
"""Test starting a flow from discovery when already configured."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "stored-host", CONF_ACCESS_TOKEN: "test-token"},
)
entry.add_to_hass(hass)
with _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="updated-host",
hostname="mock_hostname",
name="already-registered-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "updated-host"
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured_refresh_token(hass: core.HomeAssistant):
"""Test starting a flow from zeroconf when already configured and the token is out of date."""
entry2 = MockConfigEntry(
domain=DOMAIN,
unique_id="not-the-same-bond-id",
data={CONF_HOST: "stored-host", CONF_ACCESS_TOKEN: "correct-token"},
)
entry2.add_to_hass(hass)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "stored-host", CONF_ACCESS_TOKEN: "incorrect-token"},
)
entry.add_to_hass(hass)
with patch_bond_version(
side_effect=ClientResponseError(MagicMock(), MagicMock(), status=401)
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.SETUP_ERROR
with _patch_async_setup_entry() as mock_setup_entry, patch_bond_token(
return_value={"token": "discovered-token"}
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="updated-host",
hostname="mock_hostname",
name="already-registered-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "updated-host"
assert entry.data[CONF_ACCESS_TOKEN] == "discovered-token"
# entry2 should not get changed
assert entry2.data[CONF_ACCESS_TOKEN] == "correct-token"
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured_no_reload_same_host(
hass: core.HomeAssistant,
):
"""Test starting a flow from zeroconf when already configured does not reload if the host is the same."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "stored-host", CONF_ACCESS_TOKEN: "correct-token"},
)
entry.add_to_hass(hass)
with _patch_async_setup_entry() as mock_setup_entry, patch_bond_token(
return_value={"token": "correct-token"}
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="stored-host",
hostname="mock_hostname",
name="already-registered-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert len(mock_setup_entry.mock_calls) == 0
async def test_zeroconf_form_unexpected_error(hass: core.HomeAssistant):
"""Test we handle unexpected error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_ZEROCONF,
initial_input=zeroconf.ZeroconfServiceInfo(
host="test-host",
hostname="mock_hostname",
name="test-bond-id.some-other-tail-info",
port=None,
properties={},
type="mock_type",
),
user_input={CONF_ACCESS_TOKEN: "test-token"},
error=Exception(),
)
async def _help_test_form_unexpected_error(
hass: core.HomeAssistant,
*,
source: str,
initial_input: dict[str, Any] = None,
user_input: dict[str, Any],
error: Exception,
):
"""Test we handle unexpected error gracefully."""
with patch_bond_token():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=initial_input
)
with patch_bond_version(
return_value={"bond_id": "test-bond-id"}
), patch_bond_device_ids(side_effect=error):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
def _patch_async_setup_entry():
return patch(
"homeassistant.components.bond.async_setup_entry",
return_value=True,
)
|
|
# module for the <eadheader/> or <control/> portion
import xml.etree.cElementTree as ET
import globals
from messages import error
from mixed_content import mixed_content
import wx
def eadheader(EAD, CSheet):
eadheader_root = EAD[0]
#update GUI progress bar
from wx.lib.pubsub import pub
wx.CallAfter(pub.sendMessage, "update_spread", msg="Reading <eadheader>...")
# <eadheader> @findaidstatus
if "findaidstatus" in eadheader_root.attrib:
CSheet.find('DraftStatus').text = eadheader_root.attrib['findaidstatus']
#<eadid> @countrycode and @url
if 'countrycode' in eadheader_root.find('eadid').attrib:
CSheet.find('CountryCode').text = eadheader_root.find('eadid').attrib['countrycode']
if 'url' in eadheader_root.find('eadid').attrib:
CSheet.find('URL').text = eadheader_root.find('eadid').attrib['url']
#<filedesc>
if eadheader_root.find('filedesc') is None:
error("Your EAD finding aid does not contain a <filedesc> tag. Since this is a required element in EAD2002, EADMachine is unable to convert the file. Please correct your EAD file and try again.", True)
#<titlestmt>
if eadheader_root.find('filedesc/titlestmt') is None:
error("Your EAD finding aid does not contain a <titlestmt> tag. Since this is a required element in EAD2002, EADMachine is unable to convert the file. Please correct your EAD file and try again.", True)
titlestmt_element = ET.Element('titlestmt')
#<titleproper>
if eadheader_root.find('filedesc/titlestmt/titleproper') is None:
error("Your EAD finding aid does not contain a <titleproper> tag. Since this is a required element in EAD2002, EADMachine is unable to convert the file. Please correct your EAD file and try again.", True)
#<date>
if eadheader_root.find('filedesc/titlestmt/titleproper/date') is None:
pass
else:
if 'type' in eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib:
if eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib['type'].lower() == 'bulk':
CSheet.find('DateBulk').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').text
if 'normal' in eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib:
CSheet.find('DateBulkNormal').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib['normal']
else:
CSheet.find('DateInclusive').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').text
if 'normal' in eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib:
CSheet.find('DateInclusiveNormal').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib['normal']
else:
CSheet.find('DateInclusive').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').text
if 'normal' in eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib:
CSheet.find('DateInclusiveNormal').text = eadheader_root.find('filedesc/titlestmt/titleproper/date').attrib['normal']
#<subtitle>
if eadheader_root.find('filedesc/titlestmt/subtitle') is None:
if EAD.find('frontmatter/titlepage/subtitle') is None:
pass
else:
CSheet.find('Subtitle').text = EAD.find('frontmatter/titlepage/subtitle').text
else:
CSheet.find('Subtitle').text = eadheader_root.find('filedesc/titlestmt/subtitle').text
#<author>
if eadheader_root.find('filedesc/titlestmt/author') is None:
if EAD.find('frontmatter/titlepage/author') is None:
pass
else:
CSheet.find('ProcessedBy').text = EAD.find('frontmatter/titlepage/author').text
else:
CSheet.find('ProcessedBy').text = eadheader_root.find('filedesc/titlestmt/author').text
#<sponsor>
if eadheader_root.find('filedesc/titlestmt/sponsor') is None:
if EAD.find('frontmatter/titlepage/sponsor') is None:
pass
else:
CSheet.find('Sponsor').text = EAD.find('frontmatter/titlepage/sponsor').text
else:
CSheet.find('Sponsor').text = eadheader_root.find('filedesc/titlestmt/sponsor').text
# Edition Statement Section
from editionstmt import editionstmt
editionstmt(eadheader_root, CSheet)
# Publication Statement Section
from publicationstmt import publicationstmt
publicationstmt(eadheader_root, CSheet)
if CSheet.find('Publisher/PublisherName') is None:
if EAD.find('frontmatter/titlepage/publisher') is None:
pass
else:
if EAD.find('frontmatter/titlepage/publisher').text:
CSheet.find('Publisher/PublisherName').text = EAD.find('frontmatter/titlepage/publisher').text
else:
if CSheet.find('Publisher/PublisherName').text:
pass
else:
if EAD.find('frontmatter/titlepage/publisher') is None:
pass
else:
CSheet.find('Publisher/PublisherName').text = EAD.find('frontmatter/titlepage/publisher').text
if CSheet.find('PublicationDate').text:
pass
else:
if EAD.find('frontmatter/titlepage/date') is None:
pass
else:
CSheet.find('PublicationDate').text = EAD.find('frontmatter/titlepage/date').text
if 'normal' in EAD.find('frontmatter/titlepage/date').attrib:
CSheet.find('PublicationDateNormal').text = EAD.find('frontmatter/titlepage/date').attrib['normal']
# Series Statement Section
from seriesstmt import seriesstmt
seriesstmt(eadheader_root, CSheet)
# Note Statement Section
if eadheader_root.find('filedesc/notestmt') is None:
pass
else:
for note in eadheader_root.find('filedesc/notestmt').iter('p'):
if note.text:
note_element = ET.Element('NoteStatement')
CSheet.find('NoteStatements').append(note_element)
note_element.text = note.text
# Profile Description
if eadheader_root.find('profiledesc') is None:
pass
else:
# EAD creation and EAD creation date
if eadheader_root.find('profiledesc/creation') is None:
pass
else:
CSheet.find('EADCreator').text = eadheader_root.find('profiledesc/creation').text
if eadheader_root.find('profiledesc/creation/date') is None:
pass
else:
if eadheader_root.find('profiledesc/creation/date').tail:
CSheet.find('EADCreator').text = CSheet.find('EADCreator').text + eadheader_root.find('profiledesc/creation/date').tail
CSheet.find('EADCreationDate').text = eadheader_root.find('profiledesc/creation/date').text
if 'normal' in eadheader_root.find('profiledesc/creation/date').attrib:
CSheet.find('EADCreationDateNormal').text = eadheader_root.find('profiledesc/creation/date').attrib['normal']
# Finding Aid Languages
if eadheader_root.find('profiledesc/langusage') is None:
pass
else:
if eadheader_root.find('profiledesc/langusage/language') is None:
if eadheader_root.find('profiledesc/langusage').text:
CSheet.find('FindingAidLanguages/FALanguage/Lang').text = eadheader_root.find('profiledesc/langusage').text
else:
CSheet.find('FindingAidLanguages').clear()
for lang in eadheader_root.find('profiledesc/langusage'):
if lang.tag == "language":
FALanguage_element = ET.Element('FALanguage')
CSheet.find('FindingAidLanguages').append(FALanguage_element)
Lang_element = ET.Element('Lang')
FALanguage_element.append(Lang_element)
Lang_element.text = lang.text
LangCode_element = ET.Element('LangCode')
FALanguage_element.append(LangCode_element)
if "langcode" in lang.attrib:
LangCode_element.text = lang.attrib['langcode']
Script_element = ET.Element('Script')
FALanguage_element.append(Script_element)
ScriptCode_element = ET.Element('ScriptCode')
FALanguage_element.append(ScriptCode_element)
LangNote_element = ET.Element('LangNote')
FALanguage_element.append(LangNote_element)
if eadheader_root.find('profiledesc/langusage').text:
LangNote_element.text = eadheader_root.find('profiledesc/langusage').text
if len(eadheader_root.find('profiledesc/langusage').tail.strip()) >= 1:
LangNote_element.text = LangNote_element.text + eadheader_root.find('profiledesc/langusage').tail
# Description Rules
if eadheader_root.find('profiledesc/descrules') is None:
pass
else:
CSheet.find('LocalConventions/Convention/Citation').text = mixed_content(eadheader_root.find('profiledesc/descrules'))
# Revisions
if eadheader_root.find('revisiondesc') is None:
pass
else:
revision_root = eadheader_root.find('revisiondesc')
if revision_root.find('change') is None:
if revision_root.find('list') is None:
pass
else: #if list
CSheet.find('Revisions').clear()
for item in revision_root.find('list'):
Event_element = ET.Element('Event')
CSheet.find('Revisions').append(Event_element)
Type_element = ET.Element('Type')
Event_element.append(Type_element)
Date_element = ET.Element('Date')
Event_element.append(Date_element)
DateNormal_element = ET.Element('DateNormal')
Event_element.append(DateNormal_element)
AgentType_element = ET.Element('AgentType')
Event_element.append(AgentType_element)
Agent_element = ET.Element('Agent')
Event_element.append(Agent_element)
Description_element = ET.Element('Description')
Event_element.append(Description_element)
if item.find('date') is None:
pass
else:
Date_element.text = item.find('date').text
if 'normal' in item.find('date').attrib:
DateNormal_element.text = item.find('date').attrib['normal']
if item.text:
Description_element.text = item.text
if item.tail:
Description_element.text = Description_element.text + item.tail
else: # if change
if revision_root.find('list') is None:
CSheet.find('Revisions').clear()
for change in revision_root:
if change.tag == "change":
Event_element = ET.Element('Event')
CSheet.find('Revisions').append(Event_element)
Type_element = ET.Element('Type')
Event_element.append(Type_element)
Date_element = ET.Element('Date')
Event_element.append(Date_element)
DateNormal_element = ET.Element('DateNormal')
Event_element.append(DateNormal_element)
AgentType_element = ET.Element('AgentType')
Event_element.append(AgentType_element)
Agent_element = ET.Element('Agent')
Event_element.append(Agent_element)
Description_element = ET.Element('Description')
Event_element.append(Description_element)
if change.find('date') is None:
pass
else:
Date_element.text = change.find('date').text
if 'normal' in change.find('date').attrib:
DateNormal_element.text = change.find('date').attrib['normal']
if change.find('item') is None:
pass
else:
Description_element.text = change.find('item').text
else: #if list and change
CSheet.find('Revisions').clear()
for change in revision_root.find('change'):
Event_element = ET.Element('Event')
CSheet.find('Revisions').append(Event_element)
Type_element = ET.Element('Type')
Event_element.append(Type_element)
Date_element = ET.Element('Date')
Event_element.append(Date_element)
DateNormal_element = ET.Element('DateNormal')
Event_element.append(DateNormal_element)
AgentType_element = ET.Element('AgentType')
Event_element.append(AgentType_element)
Agent_element = ET.Element('Agent')
Event_element.append(Agent_element)
Description_element = ET.Element('Description')
Event_element.append(Description_element)
if change.find('date') is None:
pass
else:
Date_element.text = change.find('date').text
if 'normal' in change.find('date').attrib:
DateNormal_element.text = change.find('date').attrib['normal']
if change.find('item') is None:
pass
else:
Description_element.text = change.find('item').text
for item in revision_root.find('list'):
Event_element = ET.Element('Event')
CSheet.find('Revisions').append(Event_element)
Type_element = ET.Element('Type')
Event_element.append(Type_element)
Date_element = ET.Element('Date')
Event_element.append(Date_element)
DateNormal_element = ET.Element('DateNormal')
Event_element.append(DateNormal_element)
AgentType_element = ET.Element('AgentType')
Event_element.append(AgentType_element)
Agent_element = ET.Element('Agent')
Event_element.append(Agent_element)
Description_element = ET.Element('Description')
Event_element.append(Description_element)
if item.find('date') is None:
pass
else:
Date_element.text = item.find('date').text
if 'normal' in item.find('date').attrib:
DateNormal_element.text = item.find('date').attrib['normal']
if item.text:
Description_element.text = item.text
if item.tail:
Description_element.text = Description_element.text + item.tail
|
|
from unittest.mock import patch
from hc.payments.models import Subscription
from hc.test import BaseTestCase
class UpdateSubscriptionTestCase(BaseTestCase):
def _setup_mock(self, mock):
""" Set up Braintree calls that the controller will use. """
mock.Subscription.create.return_value.is_success = True
mock.Subscription.create.return_value.subscription.id = "t-sub-id"
def run_update(self, plan_id="P20", nonce="fake-nonce"):
form = {"plan_id": plan_id, "nonce": nonce}
self.client.login(username="alice@example.org", password="password")
return self.client.post("/pricing/update/", form, follow=True)
@patch("hc.payments.models.braintree")
def test_it_works(self, mock):
self._setup_mock(mock)
self.profile.sms_limit = 0
self.profile.sms_sent = 1
self.profile.call_limit = 0
self.profile.calls_sent = 1
self.profile.save()
r = self.run_update()
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertContains(r, "Your billing plan has been updated!")
# Subscription should be filled out:
sub = Subscription.objects.get(user=self.alice)
self.assertEqual(sub.subscription_id, "t-sub-id")
self.assertEqual(sub.plan_id, "P20")
self.assertEqual(sub.plan_name, "Business ($20 / month)")
# User's profile should have a higher limits
self.profile.refresh_from_db()
self.assertEqual(self.profile.ping_log_limit, 1000)
self.assertEqual(self.profile.check_limit, 100)
self.assertEqual(self.profile.team_limit, 9)
self.assertEqual(self.profile.sms_limit, 50)
self.assertEqual(self.profile.sms_sent, 0)
self.assertEqual(self.profile.call_limit, 20)
self.assertEqual(self.profile.calls_sent, 0)
# braintree.Subscription.cancel should have not been called
# because there was no previous subscription
self.assertFalse(mock.Subscription.cancel.called)
self.assertTrue(mock.Subscription.create.called)
@patch("hc.payments.models.braintree")
def test_supporter_works(self, mock):
self._setup_mock(mock)
self.profile.sms_limit = 0
self.profile.sms_sent = 1
self.profile.call_limit = 0
self.profile.calls_sent = 1
self.profile.save()
r = self.run_update("S5")
self.assertRedirects(r, "/accounts/profile/billing/")
# Subscription should be filled out:
sub = Subscription.objects.get(user=self.alice)
self.assertEqual(sub.subscription_id, "t-sub-id")
self.assertEqual(sub.plan_id, "S5")
self.assertEqual(sub.plan_name, "Supporter ($5 / month)")
# User's profile should have adjusted limits
self.profile.refresh_from_db()
self.assertEqual(self.profile.ping_log_limit, 1000)
self.assertEqual(self.profile.check_limit, 20)
self.assertEqual(self.profile.team_limit, 2)
self.assertEqual(self.profile.sms_limit, 5)
self.assertEqual(self.profile.sms_sent, 0)
self.assertEqual(self.profile.call_limit, 5)
self.assertEqual(self.profile.calls_sent, 0)
# braintree.Subscription.cancel should have not been called
assert not mock.Subscription.cancel.called
@patch("hc.payments.models.braintree")
def test_yearly_works(self, mock):
self._setup_mock(mock)
self.profile.sms_limit = 0
self.profile.sms_sent = 1
self.profile.call_limit = 0
self.profile.calls_sent = 1
self.profile.save()
r = self.run_update("Y192")
self.assertRedirects(r, "/accounts/profile/billing/")
# Subscription should be filled out:
sub = Subscription.objects.get(user=self.alice)
self.assertEqual(sub.subscription_id, "t-sub-id")
self.assertEqual(sub.plan_id, "Y192")
self.assertEqual(sub.plan_name, "Business ($192 / year)")
# User's profile should have a higher limits
self.profile.refresh_from_db()
self.assertEqual(self.profile.ping_log_limit, 1000)
self.assertEqual(self.profile.check_limit, 100)
self.assertEqual(self.profile.team_limit, 9)
self.assertEqual(self.profile.sms_limit, 50)
self.assertEqual(self.profile.sms_sent, 0)
self.assertEqual(self.profile.call_limit, 20)
self.assertEqual(self.profile.calls_sent, 0)
# braintree.Subscription.cancel should have not been called
assert not mock.Subscription.cancel.called
@patch("hc.payments.models.braintree")
def test_plus_works(self, mock):
self._setup_mock(mock)
self.profile.sms_limit = 0
self.profile.sms_sent = 1
self.profile.call_limit = 0
self.profile.calls_sent = 1
self.profile.save()
r = self.run_update("P80")
self.assertRedirects(r, "/accounts/profile/billing/")
# Subscription should be filled out:
sub = Subscription.objects.get(user=self.alice)
self.assertEqual(sub.subscription_id, "t-sub-id")
self.assertEqual(sub.plan_id, "P80")
self.assertEqual(sub.plan_name, "Business Plus ($80 / month)")
# User's profile should have a higher limits
self.profile.refresh_from_db()
self.assertEqual(self.profile.ping_log_limit, 1000)
self.assertEqual(self.profile.check_limit, 1000)
self.assertEqual(self.profile.team_limit, 500)
self.assertEqual(self.profile.sms_limit, 500)
self.assertEqual(self.profile.sms_sent, 0)
self.assertEqual(self.profile.call_limit, 100)
self.assertEqual(self.profile.calls_sent, 0)
# braintree.Subscription.cancel should have not been called
assert not mock.Subscription.cancel.called
@patch("hc.payments.models.braintree")
def test_it_cancels(self, mock):
self._setup_mock(mock)
self.sub = Subscription(user=self.alice)
self.sub.subscription_id = "test-id"
self.sub.plan_id = "P20"
self.sub.plan_name = "Business ($20/mo)"
self.sub.save()
self.profile.sms_limit = 1
self.profile.sms_sent = 1
self.profile.call_limit = 1
self.profile.calls_sent = 1
self.profile.save()
r = self.run_update("")
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertContains(r, "Your billing plan has been updated!")
# Subscription should be cleared
sub = Subscription.objects.get(user=self.alice)
self.assertEqual(sub.subscription_id, "")
self.assertEqual(sub.plan_id, "")
self.assertEqual(sub.plan_name, "")
# User's profile should have standard limits
self.profile.refresh_from_db()
self.assertEqual(self.profile.ping_log_limit, 100)
self.assertEqual(self.profile.check_limit, 20)
self.assertEqual(self.profile.team_limit, 2)
self.assertEqual(self.profile.sms_limit, 5)
self.assertEqual(self.profile.call_limit, 0)
self.assertTrue(mock.Subscription.cancel.called)
def test_bad_plan_id(self):
r = self.run_update(plan_id="this-is-wrong")
self.assertEqual(r.status_code, 400)
@patch("hc.payments.models.braintree")
def test_it_cancels_previous_subscription(self, mock):
self._setup_mock(mock)
sub = Subscription(user=self.alice)
sub.subscription_id = "prev-sub"
sub.save()
r = self.run_update()
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertTrue(mock.Subscription.cancel.called)
@patch("hc.payments.models.braintree")
def test_subscription_creation_failure(self, mock):
mock.Subscription.create.return_value.is_success = False
mock.Subscription.create.return_value.message = "sub failure"
r = self.run_update()
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertContains(r, "sub failure")
@patch("hc.payments.models.braintree")
def test_failed_plan_change_resets_limits(self, mock):
# Initial state: the user has a subscription and a high check limit:
sub = Subscription.objects.for_user(self.alice)
sub.subscription_id = "old-sub-id"
sub.save()
self.profile.check_limit = 1000
self.profile.save()
# Simulate a subscription creation failure:
mock.Subscription.create.return_value.is_success = False
mock.Subscription.create.return_value.message = "sub failure"
r = self.run_update()
# It should cancel the current plan
self.assertTrue(mock.Subscription.cancel.called)
# It should clear out the limits:
self.profile.refresh_from_db()
self.assertEqual(self.profile.check_limit, 20)
# And it should show the error message from API:
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertContains(r, "sub failure")
@patch("hc.payments.models.braintree")
def test_it_updates_payment_method(self, mock):
# Initial state: the user has a subscription and a high check limit:
sub = Subscription.objects.for_user(self.alice)
sub.plan_id = "P20"
sub.subscription_id = "old-sub-id"
sub.save()
r = self.run_update()
# It should update the existing subscription
self.assertTrue(mock.Subscription.update.called)
self.assertRedirects(r, "/accounts/profile/billing/")
self.assertContains(r, "Your payment method has been updated!")
|
|
from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.from_model import _LearntSelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, _LearntSelectorMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None):
super(LinearSVC, self).__init__(
penalty=penalty, loss=loss, dual=dual, tol=tol, C=C,
multi_class=multi_class, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose,
random_state=random_state)
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimaton.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, None, verbose,
max_iter, random_state)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
|
|
# -*- test-case-name: twisted.web.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface definitions for L{twisted.web}.
@var UNKNOWN_LENGTH: An opaque object which may be used as the value of
L{IBodyProducer.length} to indicate that the length of the entity
body is not known in advance.
"""
from zope.interface import Interface, Attribute
from twisted.python.compat import _PY3
from twisted.internet.interfaces import IPushProducer
if not _PY3:
# Re-enable when cred is ported to Python 3. Fix as part of #6176:
from twisted.cred.credentials import IUsernameDigestHash
class IRequest(Interface):
"""
An HTTP request.
@since: 9.0
"""
method = Attribute("A C{str} giving the HTTP method that was used.")
uri = Attribute(
"A C{str} giving the full encoded URI which was requested (including "
"query arguments).")
path = Attribute(
"A C{str} giving the encoded query path of the request URI.")
args = Attribute(
"A mapping of decoded query argument names as C{str} to "
"corresponding query argument values as C{list}s of C{str}. "
"For example, for a URI with C{'foo=bar&foo=baz&quux=spam'} "
"for its query part, C{args} will be C{{'foo': ['bar', 'baz'], "
"'quux': ['spam']}}.")
received_headers = Attribute(
"Backwards-compatibility access to C{requestHeaders}. Use "
"C{requestHeaders} instead. C{received_headers} behaves mostly "
"like a C{dict} and does not provide access to all header values.")
requestHeaders = Attribute(
"A L{http_headers.Headers} instance giving all received HTTP request "
"headers.")
content = Attribute(
"A file-like object giving the request body. This may be a file on "
"disk, a C{StringIO}, or some other type. The implementation is free "
"to decide on a per-request basis.")
headers = Attribute(
"Backwards-compatibility access to C{responseHeaders}. Use"
"C{responseHeaders} instead. C{headers} behaves mostly like a "
"C{dict} and does not provide access to all header values nor "
"does it allow multiple values for one header to be set.")
responseHeaders = Attribute(
"A L{http_headers.Headers} instance holding all HTTP response "
"headers to be sent.")
def getHeader(key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
def getCookie(key):
"""
Get a cookie that was sent from the network.
"""
def getAllHeaders():
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{requestHeaders.getAllRawHeaders()} may be preferred.
"""
def getRequestHostname():
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
def getHost():
"""
Get my originally requesting transport's host.
@return: An L{IAddress<twisted.internet.interfaces.IAddress>}.
"""
def getClientIP():
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address or C{None} if the request was submitted
over a transport where IP addresses do not make sense.
@rtype: L{str} or C{NoneType}
"""
def getClient():
"""
Return the hostname of the IP address of the client who submitted this
request, if possible.
This method is B{deprecated}. See L{getClientIP} instead.
@rtype: C{NoneType} or L{str}
@return: The canonical hostname of the client, as determined by
performing a name lookup on the IP address of the client.
"""
def getUser():
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
def getPassword():
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
def isSecure():
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
def getSession(sessionInterface=None):
"""
Look up the session associated with this request or create a new one if
there is not one.
@return: The L{Session} instance identified by the session cookie in
the request, or the C{sessionInterface} component of that session
if C{sessionInterface} is specified.
"""
def URLPath():
"""
@return: A L{URLPath} instance which identifies the URL for which this
request is.
"""
def prePathURL():
"""
@return: At any time during resource traversal, a L{str} giving an
absolute URL to the most nested resource which has yet been
reached.
"""
def rememberRootURL():
"""
Remember the currently-processed part of the URL for later
recalling.
"""
def getRootURL():
"""
Get a previously-remembered URL.
"""
# Methods for outgoing response
def finish():
"""
Indicate that the response to this request is complete.
"""
def write(data):
"""
Write some data to the body of the response to this request. Response
headers are written the first time this method is called, after which
new response headers may not be added.
"""
def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
def setResponseCode(code, message=None):
"""
Set the HTTP response code.
"""
def setHeader(k, v):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
def redirect(url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
def setLastModified(when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set Last-Modified
earlier, only replacing the Last-Modified time if it is to a later
value.
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED<http.NOT_MODIFIED>} if appropriate for the time given.
@param when: The last time the resource being returned was modified, in
seconds since the epoch.
@type when: L{int}, L{long} or L{float}
@return: If I am a C{If-Modified-Since} conditional request and the time
given is not newer than the condition, I return
L{CACHED<http.CACHED>} to indicate that you should write no body.
Otherwise, I return a false value.
"""
def setETag(etag):
"""
Set an C{entity tag} for the outgoing response.
That's "entity tag" as in the HTTP/1.1 I{ETag} header, "used for
comparing two or more entities from the same requested resource."
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED<http.NOT_MODIFIED>} or
L{PRECONDITION_FAILED<http.PRECONDITION_FAILED>}, if appropriate for the
tag given.
@param etag: The entity tag for the resource being returned.
@type etag: C{str}
@return: If I am a C{If-None-Match} conditional request and the tag
matches one in the request, I return L{CACHED<http.CACHED>} to
indicate that you should write no body. Otherwise, I return a
false value.
"""
def setHost(host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g. both
Squid and Apache's mod_proxy can do this), when the address the HTTP
client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and
then forwarding requests to http://localhost:8080, but we don't want
HTML produced by Twisted to say 'http://localhost:8080', they should
say 'https://www.example.com', so we do::
request.setHost('www.example.com', 443, ssl=1)
"""
class ICredentialFactory(Interface):
"""
A credential factory defines a way to generate a particular kind of
authentication challenge and a way to interpret the responses to these
challenges. It creates
L{ICredentials<twisted.cred.credentials.ICredentials>} providers from
responses. These objects will be used with L{twisted.cred} to authenticate
an authorize requests.
"""
scheme = Attribute(
"A C{str} giving the name of the authentication scheme with which "
"this factory is associated. For example, C{'basic'} or C{'digest'}.")
def getChallenge(request):
"""
Generate a new challenge to be sent to a client.
@type peer: L{twisted.web.http.Request}
@param peer: The request the response to which this challenge will be
included.
@rtype: C{dict}
@return: A mapping from C{str} challenge fields to associated C{str}
values.
"""
def decode(response, request):
"""
Create a credentials object from the given response.
@type response: C{str}
@param response: scheme specific response string
@type request: L{twisted.web.http.Request}
@param request: The request being processed (from which the response
was taken).
@raise twisted.cred.error.LoginFailed: If the response is invalid.
@rtype: L{twisted.cred.credentials.ICredentials} provider
@return: The credentials represented by the given response.
"""
class IBodyProducer(IPushProducer):
"""
Objects which provide L{IBodyProducer} write bytes to an object which
provides L{IConsumer<twisted.internet.interfaces.IConsumer>} by calling its
C{write} method repeatedly.
L{IBodyProducer} providers may start producing as soon as they have an
L{IConsumer<twisted.internet.interfaces.IConsumer>} provider. That is, they
should not wait for a C{resumeProducing} call to begin writing data.
L{IConsumer.unregisterProducer<twisted.internet.interfaces.IConsumer.unregisterProducer>}
must not be called. Instead, the
L{Deferred<twisted.internet.defer.Deferred>} returned from C{startProducing}
must be fired when all bytes have been written.
L{IConsumer.write<twisted.internet.interfaces.IConsumer.write>} may
synchronously invoke any of C{pauseProducing}, C{resumeProducing}, or
C{stopProducing}. These methods must be implemented with this in mind.
@since: 9.0
"""
# Despite the restrictions above and the additional requirements of
# stopProducing documented below, this interface still needs to be an
# IPushProducer subclass. Providers of it will be passed to IConsumer
# providers which only know about IPushProducer and IPullProducer, not
# about this interface. This interface needs to remain close enough to one
# of those interfaces for consumers to work with it.
length = Attribute(
"""
C{length} is a C{int} indicating how many bytes in total this
L{IBodyProducer} will write to the consumer or L{UNKNOWN_LENGTH}
if this is not known in advance.
""")
def startProducing(consumer):
"""
Start producing to the given
L{IConsumer<twisted.internet.interfaces.IConsumer>} provider.
@return: A L{Deferred<twisted.internet.defer.Deferred>} which fires with
C{None} when all bytes have been produced or with a
L{Failure<twisted.python.failure.Failure>} if there is any problem
before all bytes have been produced.
"""
def stopProducing():
"""
In addition to the standard behavior of
L{IProducer.stopProducing<twisted.internet.interfaces.IProducer.stopProducing>}
(stop producing data), make sure the
L{Deferred<twisted.internet.defer.Deferred>} returned by
C{startProducing} is never fired.
"""
class IRenderable(Interface):
"""
An L{IRenderable} is an object that may be rendered by the
L{twisted.web.template} templating system.
"""
def lookupRenderMethod(name):
"""
Look up and return the render method associated with the given name.
@type name: C{str}
@param name: The value of a render directive encountered in the
document returned by a call to L{IRenderable.render}.
@return: A two-argument callable which will be invoked with the request
being responded to and the tag object on which the render directive
was encountered.
"""
def render(request):
"""
Get the document for this L{IRenderable}.
@type request: L{IRequest} provider or C{NoneType}
@param request: The request in response to which this method is being
invoked.
@return: An object which can be flattened.
"""
class ITemplateLoader(Interface):
"""
A loader for templates; something usable as a value for
L{twisted.web.template.Element}'s C{loader} attribute.
"""
def load():
"""
Load a template suitable for rendering.
@return: a C{list} of C{list}s, C{unicode} objects, C{Element}s and
other L{IRenderable} providers.
"""
class IResponse(Interface):
"""
An object representing an HTTP response received from an HTTP server.
@since: 11.1
"""
version = Attribute(
"A three-tuple describing the protocol and protocol version "
"of the response. The first element is of type C{str}, the second "
"and third are of type C{int}. For example, C{('HTTP', 1, 1)}.")
code = Attribute("The HTTP status code of this response, as a C{int}.")
phrase = Attribute(
"The HTTP reason phrase of this response, as a C{str}.")
headers = Attribute("The HTTP response L{Headers} of this response.")
length = Attribute(
"The C{int} number of bytes expected to be in the body of this "
"response or L{UNKNOWN_LENGTH} if the server did not indicate how "
"many bytes to expect. For I{HEAD} responses, this will be 0; if "
"the response includes a I{Content-Length} header, it will be "
"available in C{headers}.")
def deliverBody(protocol):
"""
Register an L{IProtocol<twisted.internet.interfaces.IProtocol>} provider
to receive the response body.
The protocol will be connected to a transport which provides
L{IPushProducer}. The protocol's C{connectionLost} method will be
called with:
- ResponseDone, which indicates that all bytes from the response
have been successfully delivered.
- PotentialDataLoss, which indicates that it cannot be determined
if the entire response body has been delivered. This only occurs
when making requests to HTTP servers which do not set
I{Content-Length} or a I{Transfer-Encoding} in the response.
- ResponseFailed, which indicates that some bytes from the response
were lost. The C{reasons} attribute of the exception may provide
more specific indications as to why.
"""
class _IRequestEncoder(Interface):
"""
An object encoding data passed to L{IRequest.write}, for example for
compression purpose.
@since: 12.3
"""
def encode(data):
"""
Encode the data given and return the result.
@param data: The content to encode.
@type data: C{str}
@return: The encoded data.
@rtype: C{str}
"""
def finish():
"""
Callback called when the request is closing.
@return: If necessary, the pending data accumulated from previous
C{encode} calls.
@rtype: C{str}
"""
class _IRequestEncoderFactory(Interface):
"""
A factory for returing L{_IRequestEncoder} instances.
@since: 12.3
"""
def encoderForRequest(request):
"""
If applicable, returns a L{_IRequestEncoder} instance which will encode
the request.
"""
UNKNOWN_LENGTH = u"twisted.web.iweb.UNKNOWN_LENGTH"
__all__ = [
"IUsernameDigestHash", "ICredentialFactory", "IRequest",
"IBodyProducer", "IRenderable", "IResponse", "_IRequestEncoder",
"_IRequestEncoderFactory",
"UNKNOWN_LENGTH"]
|
|
#-*- encoding: utf8 -*-
import ConfigParser
class Opt(object):
"""Opt is a Abstract Base Class which is used to wrap the options in configuration file.
"""
def __init__(self, key, group='default', default=None):
self.group = group
self.key = key
self.default = default
def parse(self, value):
""" The method is a abstract method which should be overrided in derived class.
:param value: The value load from configration file.
:return: The value after being parsed.
"""
raise NotImplementedError("Please implement the Class")
class BoolOpt(Opt):
""" The class is used to parse value to Boolean.
"""
def __init__(self, key, group='default', default=False):
super(BoolOpt, self).__init__(key, group, default)
def parse(self, value):
if isinstance(value, bool):
return value
elif isinstance(value, str):
if value.upper() in ('NO', '0', 'FALSE', 'WRONG'):
return False
else:
return True
elif isinstance(value, int):
return True if value else False
else:
return False
class StrOpt(Opt):
""" The class is used to parse value to String.
"""
def __init__(self, key, group='default', default=''):
super(StrOpt, self).__init__(key, group, default)
def parse(self, value):
if value is None:
return ''
try:
return str(value)
except Exception as e:
return str(self.default)
class IntOpt(Opt):
""" The class is used to parse value to Int.
"""
def __init__(self, key, group='default', default=0):
super(IntOpt, self).__init__(key, group, default)
def parse(self, value):
try:
return int(value)
except Exception as e:
return int(self.default)
class FloatOpt(Opt):
""" The class is used to parse value to Float.
"""
def __init__(self, key, group='default', default=0):
super(FloatOpt, self).__init__(key, group, default)
def parse(self, value):
try:
return float(value)
except Exception as e:
return float(self.default)
class ListOpt(Opt):
""" The class is used to parse value to Python List.
"""
def __init__(self, key, group='default', default=[], sep=','):
super(ListOpt, self).__init__(key, group, default)
self.sep = sep
def parse(self, value):
if value is None:
if self.default is not None:
return self.parse(self.default)
else:
return list()
if isinstance(value, list):
return value
elif isinstance(value, str):
return value.split(self.sep)
else:
rc = list()
rc.append(value)
return rc
class DictOpt(Opt):
""" The class is used to parse value to Python Dict.
"""
def __init__(self, key, group='default', default={}, sep=','):
super(DictOpt, self).__init__(key, group, default)
self.sep = sep
def parse(self, value):
if value is None:
if self.default is not None:
return self.parse(self.default)
else:
return dict()
if isinstance(value, dict):
return value
elif isinstance(value, str):
tmps = value.split(self.sep)
rc = dict()
for tmp in tmps:
try:
key, value = tmp.split(":")
except Exception as e:
key = tmp
value = None
rc[key] = value
return rc
else:
return dict()
class ConfigOpts(object):
""" The class used to parse the configuration file which is based on python standard module ConfigParser.
The class is not only to load the configuration file, but also support to override the value in
configuration file and parse the value to some known type such as dict, list via register_opts/
register_opt. The unregister_opt is used to eliminate the options(registered by register_opts/
register_opt) in the cache.
"""
def __init__(self):
self.__cache = dict()
self.fp = None
self.path = None
def setup(self, path):
self.path = path
self.fp = None
self._reload()
def _reload(self):
if self.fp:
self.fp = None
self.fp = ConfigParser.ConfigParser()
self.fp.read(self.path)
def _reload_group(self, group):
self.__cache[group] = dict()
ops = self.fp.options(group)
for op in ops:
self.__cache[group][op] = self.fp.get(group, op)
def __getitem__(self, group='default'):
if not self.fp:
raise Exception("Please invoke method setup first!")
if self.fp.has_section(group):
if group not in self.__cache:
self._reload_group(group)
return self.__cache[group]
else:
return None
def __iter__(self):
return self.__cache.__iter__()
def __len__(self):
return len(self.__cache)
def __getattr__(self, group='default'):
return self.__getitem__(group)
def register_opts(self, opts):
"""The method is used to register the options.
:param opts: opts must be a list or tuple and its elements must be derived class of Opt
:return:
"""
for opt in opts:
self.register_opt(opt)
def register_opt(self, opt):
if not isinstance(opt, Opt):
raise TypeError("Options type ERROR")
if opt.group not in self.__cache:
if self.fp.has_section(opt.group):
self._reload_group(opt.group)
else:
return
if not self.fp.has_option(opt.group, opt.key):
self.__cache[opt.group][opt.key] = opt.parse(opt.default)
else:
self.__cache[opt.group][opt.key] = opt.parse(self.fp.get(opt.group, opt.key))
def unregister_opt(self, key, group='default'):
""" The method is used to unregister the options
:param key: key in section of configuration file
:param group: section in configuration file, default is 'default'
:return: True: execute successfully; False: execute failure
"""
if group not in self.__cache:
return True
if key not in self.__cache[group]:
return True
try:
del self.__cache[group][key]
if not self.__cache[group]:
del self.__cache[group]
except Exception as e:
return False
return True
def get(self, key, group='default', default=None):
""" The method is used to get the corresponding value of the key.
:param key: key in section of configuration file
:param group: section in configuration file, default is 'default'
:param default: default value corresponding to the key given by client
:return: the corresponding value of the key.
"""
if not self.fp:
raise Exception("Please invoke method setup first!")
if group not in self.__cache:
self._reload_group(group)
try:
return self.__cache[group][key]
except KeyError as e:
if self.fp.has_option(group, key):
self.__cache[group][key] = self.fp.get(group, key)
else:
self.__cache[group][key] = default
return self.__cache[group][key]
CONF = ConfigOpts()
if __name__ == "__main__":
CONF['abc']
path = "../etc/bsl.conf"
CONF.setup(path)
opts = [BoolOpt('bool', 'default'),
IntOpt('abc', 'default'),
FloatOpt('zip', 'default'),
ListOpt('s', 'skp'),
DictOpt('d', 'skp'),
ListOpt('unknown', 'skp', default=['a','b','c'])]
CONF.register_opts(opts)
print type(CONF['default']['bool']), CONF['default']['bool']
print type(CONF['default']['abc']), CONF['default']['abc']
print type(CONF['default']['zip']), CONF['default']['zip']
print type(CONF['skp']['s']), CONF['skp']['s']
print type(CONF['skp']['d']), CONF['skp']['d']
print type(CONF['skp']['unknown']), CONF['skp']['unknown']
CONF.unregister_opt('unknown', 'skp')
print CONF.get('unknown', 'skp')
|
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import socket
import sys
import time
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_constants
from neutron_lib import context
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from osprofiler import profiler
import six
from neutron._i18n import _
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as agent_sg_rpc
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import profiler as setup_profiler
from neutron.common import topics
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC (works with NoopFirewallDriver)
# 1.2 Support DVR (Distributed Virtual Router) RPC (not supported)
# 1.3 Added param devices_to_update to security_groups_provider_updated
# (works with NoopFirewallDriver)
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
vnic_type = port.get(portbindings.VNIC_TYPE)
if vnic_type and vnic_type == portbindings.VNIC_DIRECT_PHYSICAL:
LOG.debug("The SR-IOV agent doesn't handle %s ports.",
portbindings.VNIC_DIRECT_PHYSICAL)
return
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
mac = port['mac_address']
pci_slot = None
if port.get(portbindings.PROFILE):
pci_slot = port[portbindings.PROFILE].get('pci_slot')
if pci_slot:
self.agent.updated_devices.add((mac, pci_slot))
LOG.debug("port_update RPC received for port: %(id)s with MAC "
"%(mac)s and PCI slot %(pci_slot)s slot",
{'id': port['id'], 'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; "
"skipping", {'id': port['id'], 'mac': mac,
'pci_slot': pci_slot})
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
LOG.debug("network_update message received for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.agent.network_ports[network_id]})
for port_data in self.agent.network_ports[network_id]:
self.agent.updated_devices.add(port_data['device'])
@profiler.trace_cls("rpc")
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.network_ports = collections.defaultdict(list)
self.conf = cfg.CONF
self.device_mappings = physical_devices_mappings
self.exclude_devices = exclude_devices
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)
configurations = {'device_mappings': physical_devices_mappings,
'extensions': self.ext_manager.names()}
#TODO(mangelajo): optimize resource_versions (see ovs agent)
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': self.conf.host,
'topic': n_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH,
'resource_versions': resources.LOCAL_RESOURCE_VERSIONS,
'start_flag': True}
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info("RPC agent_id: %s", self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context,
self.agent_state)
# we only want to update resource versions on startup
self.agent_state.pop('resource_versions', None)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception("Failed reporting state!")
def _create_agent_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
mgr = ext_manager.L2AgentExtensionsManager(self.conf)
mgr.initialize(connection, 'sriov')
return mgr
def setup_eswitch_mgr(self, device_mappings, exclude_devices=None):
exclude_devices = exclude_devices or {}
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices_info()
self.agent_state.get('configurations')['devices'] = len(curr_devices)
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = (updated_devices & curr_devices -
device_info['removed'])
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
spoofcheck)
except Exception:
LOG.warning("Failed to set spoofcheck for device %s",
device)
LOG.info("Device %(device)s spoofcheck %(spoofcheck)s",
{"device": device, "spoofcheck": spoofcheck})
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.IpCommandOperationNotSupportedError:
LOG.warning("Device %s does not support state change",
device)
except exc.SriovNicError:
LOG.warning("Failed to set device %s state", device)
return False
else:
LOG.info("No device with MAC %s defined on agent.", device)
return False
return True
def _update_network_ports(self, network_id, port_id, mac_pci_slot):
self._clean_network_ports(mac_pci_slot)
self.network_ports[network_id].append({
"port_id": port_id,
"device": mac_pci_slot})
def _clean_network_ports(self, mac_pci_slot):
for netid, ports_list in self.network_ports.items():
for port_data in ports_list:
if mac_pci_slot == port_data['device']:
ports_list.remove(port_data)
if ports_list == []:
self.network_ports.pop(netid)
return port_data['port_id']
def treat_devices_added_updated(self, devices_info):
try:
macs_list = set([device_info[0] for device_info in devices_info])
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, macs_list, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC addresses %(devices)s: %(e)s",
{'devices': macs_list, 'e': e})
# resync is needed
return True
devices_up = set()
devices_down = set()
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info("Port %(device)s updated. Details: %(details)s",
{'device': device, 'details': device_details})
port_id = device_details['port_id']
profile = device_details['profile']
spoofcheck = device_details.get('port_security_enabled', True)
if self.treat_device(device,
profile.get('pci_slot'),
device_details['admin_state_up'],
spoofcheck):
if device_details['admin_state_up']:
devices_up.add(device)
else:
devices_down.add(device)
self._update_network_ports(device_details['network_id'],
port_id,
(device, profile.get('pci_slot')))
self.ext_manager.handle_port(self.context, device_details)
else:
LOG.info("Device with MAC %s not defined on plugin",
device)
self.plugin_rpc.update_device_list(self.context,
devices_up,
devices_down,
self.agent_id,
self.conf.host)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
mac, pci_slot = device
LOG.info("Removing device with MAC address %(mac)s and "
"PCI slot %(pci_slot)s",
{'mac': mac, 'pci_slot': pci_slot})
try:
port_id = self._clean_network_ports(device)
if port_id:
port = {'port_id': port_id,
'device': mac,
'profile': {'pci_slot': pci_slot}}
self.ext_manager.delete_port(self.context, port)
else:
LOG.warning("port_id to device with MAC "
"%s not found", mac)
dev_details = self.plugin_rpc.update_device_down(self.context,
mac,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device with MAC address "
"%(mac)s and PCI slot %(pci_slot)s due to %(exc)s",
{'mac': mac, 'pci_slot': pci_slot, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info("Port with MAC %(mac)s and PCI slot "
"%(pci_slot)s updated.",
{'mac': mac, 'pci_slot': pci_slot})
else:
LOG.debug("Device with MAC %(mac)s and PCI slot "
"%(pci_slot)s not defined on plugin",
{'mac': mac, 'pci_slot': pci_slot})
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info("SRIOV NIC Agent RPC Daemon Started!")
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info("Agent out of sync with plugin!")
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
self.eswitch_mgr.discover_devices(self.device_mappings,
self.exclude_devices)
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception("Error in agent loop. Devices info: %s",
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = helpers.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(itertools.chain.from_iterable(
six.itervalues(self.device_mappings)))
for dev_name in self.exclude_devices.keys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception("Failed on Agent configuration parse. "
"Agent terminated!")
raise SystemExit(1)
LOG.info("Physical Devices mappings: %s", device_mappings)
LOG.info("Exclude Devices: %s", exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception("Agent Initialization Failed")
raise SystemExit(1)
# Start everything.
setup_profiler.setup("neutron-sriov-nic-agent", cfg.CONF.host)
LOG.info("Agent initialized successfully, now running... ")
agent.daemon_loop()
|
|
from globals import *
import life as lfe
import language
import graphics
import bad_numbers
import timers
import items
import alife
import logging
import random
def get_puncture_value(item, target_structure, target_structure_name='object', debug=True):
_damage = (((item['speed']/float(item['max_speed']))*item['damage']['sharp'])*\
(target_structure['max_thickness']/float(target_structure['thickness'])))*\
(item['size']/float(bad_numbers.get_surface_area(target_structure)))
if debug:
logging.debug('%s is pucturing %s.' % (item['name'], target_structure_name))
logging.debug('%s\'s max speed is %s and is currently traveling at speed %s.' % (item['name'], item['max_speed'], item['speed']))
logging.debug('The %s\'s material has a thickness of %s (with a max of %s).' % (target_structure_name, target_structure['thickness'], target_structure['max_thickness']))
logging.debug('%s has a puncture rating of %s.' % (item['name'], item['damage']['sharp']))
logging.debug('Size of %s: %s, size of %s: %s' % (item['name'], item['size'], target_structure_name, target_structure['size']))
logging.debug('The %s does %s points of damage to the %s.' % (item['name'], _damage, target_structure_name))
return _damage
def own_language(life, message):
_mentioned_name = False
_ret_string = ''
for txt in message:
if 'player' in life:
_ret_string += txt.replace('<own>', 'your')
else:
_name = txt.replace('<own>', language.get_name_ownership(life, pronoun=_mentioned_name))
if not _name == txt:
_mentioned_name = True
_ret_string += _name
return _ret_string
def bullet_hit(life, bullet, limb):
_owner = LIFE[bullet['shot_by']]
_actual_limb = lfe.get_limb(life, limb)
_items_to_check = []
_msg = []
#if 'player' in _owner:
# if bullet['aim_at_limb'] == limb:
# _hit = True
# _msg = ['The round hits']
# elif not limb in life['body']:
# return 'The round misses entirely!'
# else:
# _msg = ['The round misses slightly']
# _detailed = True
#
#elif 'player' in life:
# _msg = ['The round hits']
#else:
# _msg = ['%s hits %s\'s %s' % (items.get_name(bullet), life['name'][0], limb)]
for item_uid in lfe.get_items_attached_to_limb(life, limb):
_items_to_check.append({'item': item_uid, 'visible': True})
_item = items.get_item_from_uid(item_uid)
if 'storing' in _item:
for item_in_container_uid in _item['storing']:
_chance_of_hitting_item = _item['capacity']/float(_item['max_capacity'])
if random.uniform(0, 1)<_chance_of_hitting_item:
break
_items_to_check.append({'item': item_in_container_uid, 'visible': False})
for entry in _items_to_check:
_item = items.get_item_from_uid(entry['item'])
_item_damage = get_puncture_value(bullet, _item, target_structure_name=_item['name'])
_item['thickness'] = bad_numbers.clip(_item['thickness']-_item_damage, 0, _item['max_thickness'])
if 'material' in _item and not _item['material'] == 'cloth':
_speed_mod = _item_damage
_can_stop = True
bullet['speed'] *= _speed_mod
bullet['velocity'][0] *= _speed_mod
bullet['velocity'][1] *= _speed_mod
else:
_can_stop = False
if not _item['thickness']:
if _item['uid'] in lfe.get_all_visible_items(life):
if 'player' in _owner:
_msg.append('%s\'s %s is destroyed!' % (' '.join(life['name']), _item['name']))
if _item['type'] == 'explosive':
items.explode(_item)
else:
items.delete_item(_item)
#else:
# if bullet['speed']<=1 and _can_stop:
# #if 'player' in _owner:
# # _msg.append(', lodging itself in %s' % items.get_name(_item))
# #_ret_string = own_language(life, _msg)
#
# if _ret_string.endswith('!'):
# return _ret_string
# else:
# return _ret_string+'.'
# #else:
# # if 'material' in _item:
# # if _item['material'] == 'metal':
# # _msg.append(', puncturing the %s' % _item['name'])
# # else:
# # _msg.append(', ripping through the %s' % _item['name'])
_damage = get_puncture_value(bullet, _actual_limb, target_structure_name=limb)
_actual_limb['thickness'] = bad_numbers.clip(_actual_limb['thickness']-_damage, 0, _actual_limb['max_thickness'])
if not _actual_limb['thickness']:
lfe.sever_limb(life, limb, (0, 0, 0))
_damage_mod = 1-(_actual_limb['thickness']/float(_actual_limb['max_thickness']))
if limb in life['body']:
_msg.append(lfe.add_wound(life, limb, cut=_damage*_damage_mod, impact_velocity=bullet['velocity']))
#_ret_string = own_language(life, _msg)
return ' '.join(_msg)
#if _ret_string.endswith('!'):
# return _ret_string
#else:
# return _ret_string+'.'
def bite(life, target_id, limb):
logging.debug('%s bit %s in the %s.' % (' '.join(life['name']), ' '.join(LIFE[target_id]['name']), limb))
target = LIFE[target_id]
_msg = ['%s' % language.get_introduction(life)]
_bite_strength = random.randint(1, 3)
if bad_numbers.distance(life['pos'], target['pos'])>1:
_msg.append('bites the air')
return ' '.join(_msg)+'.'
_items_to_check = []
for _item in lfe.get_items_attached_to_limb(target, limb):
_items_to_check.append({'item': _item, 'visible': True})
_actual_item = items.get_item_from_uid(_item)
if 'storing' in _actual_item:
for _item_in_container in _actual_item['storing']:
_items_to_check.append({'item': _item_in_container, 'visible': False})
for entry in _items_to_check:
_item = items.get_item_from_uid(entry['item'])
if not 'thickness' in _item:
logging.warning('Item \'%s\' has no set thickness. Guessing...' % _item['name'])
_item['thickness'] = _item['size']/2
_thickness = _item['thickness']
_item['thickness'] = bad_numbers.clip(_item['thickness']-_bite_strength, 0, 100)
_bite_strength -= _thickness
_tear = _item['thickness']-_thickness
_limb_in_context = False
if _item['material'] == 'cloth':
if _thickness and not _item['thickness']:
_msg.append('rips through <own> %s' % _item['name'])
elif _tear<=-3:
_msg.append('rips <own> %s' % _item['name'])
elif _tear<=-2:
_msg.append('tears <own> %s' % _item['name'])
elif _tear<=-1:
_msg.append('slightly tears <own> %s' % _item['name'])
if _bite_strength <= 0 and _item['thickness']:
_msg.append('is stopped by <own> %s' % _item['name'])
return ' '.join(_msg)
#if not lfe.limb_is_cut(target, limb):
if _bite_strength==1:
_msg.append(', cutting <own> %s' % limb)
elif _bite_strength==2:
_msg.append(', tearing <own> %s' % limb)
elif _bite_strength==3:
_msg.append(', ripping open <own> %s' % limb)
if _bite_strength:
lfe.add_wound(target, limb, cut=_bite_strength)
#TODO: How thick is skin?
_bite_strength -= 1
#if not _bite_strength:
# return ' '.join(_msg)
_ret_string = own_language(target, _msg)
return _ret_string+'.'
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import eventlet
from eventlet.green import threading
from eventlet.green import time
from eventlet import greenpool
from eventlet import semaphore
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Context(context.RequestContext):
def __init__(self,
user_id=None,
tenant_id=None,
auth_token=None,
service_catalog=None,
username=None,
tenant_name=None,
roles=None,
is_admin=None,
remote_semaphore=None,
resource_uuid=None,
current_instance_info=None,
request_id=None,
auth_plugin=None,
overwrite=True,
**kwargs):
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: '
'{args}').format(args=kwargs))
super(Context, self).__init__(auth_token=auth_token,
user=user_id,
tenant=tenant_id,
is_admin=is_admin,
resource_uuid=resource_uuid,
request_id=request_id)
self.service_catalog = service_catalog
self.username = username
self.tenant_name = tenant_name
self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
CONF.cluster_remote_threshold)
self.auth_plugin = auth_plugin
self.roles = roles
if overwrite or not hasattr(context._request_store, 'context'):
self.update_store()
if current_instance_info is not None:
self.current_instance_info = current_instance_info
else:
self.current_instance_info = InstanceInfo()
def clone(self):
return Context(
self.user_id,
self.tenant_id,
self.auth_token,
self.service_catalog,
self.username,
self.tenant_name,
self.roles,
self.is_admin,
self.remote_semaphore,
self.resource_uuid,
self.current_instance_info,
self.request_id,
self.auth_plugin,
overwrite=False)
def to_dict(self):
return {
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'username': self.username,
'tenant_name': self.tenant_name,
'user_name': self.username,
'project_name': self.tenant_name,
'is_admin': self.is_admin,
'roles': self.roles,
'resource_uuid': self.resource_uuid,
'request_id': self.request_id,
}
def is_auth_capable(self):
return (self.service_catalog and self.auth_token and self.tenant and
self.user_id)
# NOTE(adrienverge): The Context class uses the 'user' and 'tenant'
# properties internally (inherited from oslo_context), but Sahara code
# often uses 'user_id' and 'tenant_id'.
@property
def user_id(self):
return self.user
@user_id.setter
def user_id(self, value):
self.user = value
@property
def tenant_id(self):
return self.tenant
@tenant_id.setter
def tenant_id(self, value):
self.tenant = value
def get_admin_context():
return Context(is_admin=True)
_CTX_STORE = threading.local()
_CTX_KEY = 'current_ctx'
def has_ctx():
return hasattr(_CTX_STORE, _CTX_KEY)
def ctx():
if not has_ctx():
raise ex.IncorrectStateError(_("Context isn't available here"))
return getattr(_CTX_STORE, _CTX_KEY)
def current():
return ctx()
def set_ctx(new_ctx):
if not new_ctx and has_ctx():
delattr(_CTX_STORE, _CTX_KEY)
if hasattr(context._request_store, 'context'):
delattr(context._request_store, 'context')
if new_ctx:
setattr(_CTX_STORE, _CTX_KEY, new_ctx)
setattr(context._request_store, 'context', new_ctx)
def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs):
try:
set_ctx(ctx)
func(*args, **kwargs)
except BaseException as e:
LOG.debug(
"Thread {thread} failed with exception: {exception}".format(
thread=thread_description, exception=e))
if thread_group and not thread_group.exc:
thread_group.exc = e
thread_group.exc_stacktrace = traceback.format_exc()
thread_group.failed_thread = thread_description
finally:
if thread_group:
thread_group._on_thread_exit()
set_ctx(None)
def spawn(thread_description, func, *args, **kwargs):
eventlet.spawn(_wrapper, current().clone(), thread_description,
None, func, *args, **kwargs)
class ThreadGroup(object):
"""ThreadGroup object.
It is advised to use TreadGroup as a context manager instead
of instantiating and calling _wait() manually. The __exit__()
guaranties to exit only after all child threads are done, even if
spawning code have thrown an exception
"""
def __init__(self, thread_pool_size=1000):
self.tg = greenpool.GreenPool(size=thread_pool_size)
self.exc = None
self.exc_stacktrace = None
self.failed_thread = None
self.threads = 0
self.cv = threading.Condition()
def spawn(self, thread_description, func, *args, **kwargs):
self.tg.spawn(_wrapper, current().clone(), thread_description,
self, func, *args, **kwargs)
with self.cv:
self.threads += 1
def _on_thread_exit(self):
with self.cv:
self.threads -= 1
if self.threads == 0:
self.cv.notifyAll()
# NOTE(dmitryme): A little rationale on why we reimplemented wait():
# * Eventlet's GreenPool.wait() can hung
# * Oslo's ThreadGroup.wait() can exit before all threads are done
#
def _wait(self):
"""Using of _wait() method.
It is preferred to use the class as a context manager and do not
use _wait() directly, see class docstring for an explanation.
"""
with self.cv:
while self.threads > 0:
self.cv.wait()
if self.exc:
raise ex.ThreadException(self.failed_thread, self.exc,
self.exc_stacktrace)
def __enter__(self):
return self
def __exit__(self, *ex):
if not any(ex):
self._wait()
else:
# If spawning code thrown an exception, it had higher priority
# for us than the one thrown inside child thread (if any)
try:
self._wait()
except Exception:
# that will make __exit__ throw original exception
pass
def sleep(seconds=0):
time.sleep(seconds)
class InstanceInfo(object):
def __init__(self, cluster_id=None, instance_id=None, instance_name=None,
node_group_id=None, step_type=None, step_id=None):
self.cluster_id = cluster_id
self.instance_id = instance_id
self.instance_name = instance_name
self.node_group_id = node_group_id
self.step_type = step_type
self.step_id = step_id
def set_step_type(step_type):
current().current_instance_info.step_type = step_type
class InstanceInfoManager(object):
def __init__(self, instance_info):
self.prev_instance_info = current().current_instance_info
if not instance_info.step_type:
instance_info.step_type = self.prev_instance_info.step_type
if not instance_info.step_id:
instance_info.step_id = self.prev_instance_info.step_id
current().current_instance_info = instance_info
def __enter__(self):
pass
def __exit__(self, *args):
current().current_instance_info = self.prev_instance_info
def set_current_cluster_id(cluster_id):
current().resource_uuid = 'none, cluster: %s' % cluster_id
def set_current_job_execution_id(je_id):
current().resource_uuid = 'none, job_execution: %s' % je_id
class SetCurrentInstanceId(object):
def __init__(self, instance_id):
ctx = current()
self.prev_uuid = ctx.resource_uuid
if ctx.resource_uuid:
ctx.resource_uuid = ctx.resource_uuid.replace('none', instance_id)
context.get_current().resource_uuid = ctx.resource_uuid
def __enter__(self):
pass
def __exit__(self, *ex):
current().resource_uuid = self.prev_uuid
context.get_current().resource_uuid = self.prev_uuid
def set_current_instance_id(instance_id):
return SetCurrentInstanceId(instance_id)
|
|
"""
Test for the harmonization operation
"""
import calendar
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import xarray as xr
from jdcal import gcal2jd
from numpy.testing import assert_array_almost_equal
from cate.core.op import OP_REGISTRY
from cate.core.opimpl import normalize_missing_time, normalize_coord_vars
from cate.ops.normalize import normalize, adjust_spatial_attrs, adjust_temporal_attrs
from cate.util.misc import object_to_qualified_name
# noinspection PyPep8Naming
def assertDatasetEqual(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it
# checks each aspect of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class TestNormalize(TestCase):
def test_normalize_lon_lat_2d(self):
"""
Test nominal execution
"""
dims = ('time', 'y', 'x')
attribs = {'valid_min': 0., 'valid_max': 1.}
t_size = 2
y_size = 3
x_size = 4
a_data = np.random.random_sample((t_size, y_size, x_size))
b_data = np.random.random_sample((t_size, y_size, x_size))
time_data = [1, 2]
lat_data = [[10., 10., 10., 10.],
[20., 20., 20., 20.],
[30., 30., 30., 30.]]
lon_data = [[-10., 0., 10., 20.],
[-10., 0., 10., 20.],
[-10., 0., 10., 20.]]
dataset = xr.Dataset({'a': (dims, a_data, attribs),
'b': (dims, b_data, attribs)
},
{'time': (('time',), time_data),
'lat': (('y', 'x'), lat_data),
'lon': (('y', 'x'), lon_data)
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.
}
)
new_dims = ('time', 'lat', 'lon')
expected = xr.Dataset({'a': (new_dims, a_data, attribs),
'b': (new_dims, b_data, attribs)},
{'time': (('time',), time_data),
'lat': (('lat',), [10., 20., 30.]),
'lon': (('lon',), [-10., 0., 10., 20.]),
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.})
actual = normalize(dataset)
xr.testing.assert_equal(actual, expected)
def test_normalize_lon_lat(self):
"""
Test nominal execution
"""
dataset = xr.Dataset({'first': (['latitude',
'longitude'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['lat', 'long'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['latitude',
'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize(dataset)
assertDatasetEqual(actual, expected)
def test_normalize_inverted_lat(self):
first = np.zeros([3, 45, 90])
first[0, :, :] = np.eye(45, 90)
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], first),
'second': (['time', 'lat', 'lon'], np.zeros([3, 45, 90])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 4)]}).chunk(chunks={'time': 1})
first = np.zeros([3, 45, 90])
first[0, :, :] = np.flip(np.eye(45, 90), axis=0)
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], first),
'second': (['time', 'lat', 'lon'], np.zeros([3, 45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 4)]}).chunk(chunks={'time': 1})
actual = normalize(ds)
xr.testing.assert_equal(actual, expected)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
def test_normalize_julian_day(self):
"""
Test Julian Day -> Datetime conversion
"""
tuples = [gcal2jd(2000, x, 1) for x in range(1, 13)]
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [x[0] + x[1] for x in tuples]})
ds.time.attrs['long_name'] = 'time in julian days'
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'second': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
expected.time.attrs['long_name'] = 'time'
actual = normalize(ds)
assertDatasetEqual(actual, expected)
def test_registered(self):
"""
Test as a registered operation
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(normalize))
dataset = xr.Dataset({'first': (['latitude',
'longitude'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = reg_op(ds=dataset)
assertDatasetEqual(actual, expected)
class TestAdjustSpatial(TestCase):
def test_nominal(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_nominal_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values - 2
lat_bnds[:, 1] = ds.lat.values + 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values + 2
lat_bnds[:, 1] = ds.lat.values - 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0 -42.0, -20.0 -42.0))')
def test_once_cell_with_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'lat_bnds': (['lat', 'bnds'], np.array([[52.4, 52.6]])),
'lon_bnds': (['lon', 'bnds'], np.array([[11.4, 11.6]])),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_min'], 52.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_max'], 52.6)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertAlmostEqual(ds1.attrs['geospatial_lon_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_min'], 11.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_max'], 11.6)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((11.4 52.4, 11.4 52.6, 11.6 52.6, 11.6 52.4, 11.4 52.4))')
def test_once_cell_without_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds2 = adjust_spatial_attrs(ds)
# Datasets should be the same --> not modified
self.assertIs(ds2, ds)
class TestAdjustTemporal(TestCase):
def test_nominal(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds1 = adjust_temporal_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['time_coverage_start']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['time_coverage_start'],
'2000-01-01T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_end'],
'2000-12-01T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_resolution'],
'P1M')
self.assertEqual(ds1.attrs['time_coverage_duration'],
'P336D')
# Test existing attributes update
# noinspection PyTypeChecker
indexers = {'time': slice(datetime(2000, 2, 15), datetime(2000, 6, 15))}
ds2 = ds1.sel(**indexers)
ds2 = adjust_temporal_attrs(ds2)
self.assertEqual(ds2.attrs['time_coverage_start'],
'2000-03-01T00:00:00.000000000')
self.assertEqual(ds2.attrs['time_coverage_end'],
'2000-06-01T00:00:00.000000000')
self.assertEqual(ds2.attrs['time_coverage_resolution'],
'P1M')
self.assertEqual(ds2.attrs['time_coverage_duration'],
'P93D')
def test_wrong_type(self):
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'second': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'lon': (['lon'], np.linspace(-178, 178, 90)),
'lat': (['lat'], np.linspace(-88, 88, 45)),
'time': (['time'], np.linspace(0, 1, 12))})
ds1 = adjust_temporal_attrs(ds)
self.assertIs(ds1, ds)
self.assertNotIn('time_coverage_start', ds1)
self.assertNotIn('time_coverage_end', ds1)
self.assertNotIn('time_coverage_resolution', ds1)
self.assertNotIn('time_coverage_duration', ds1)
def test_bnds(self):
"""Test a case when time_bnds is available"""
time = [datetime(2000, x, 1) for x in range(1, 13)]
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'nv': [0, 1],
'time': time})
month_ends = list()
for x in ds.time.values:
year = int(str(x)[0:4])
month = int(str(x)[5:7])
day = calendar.monthrange(year, month)[1]
month_ends.append(datetime(year, month, day))
ds['time_bnds'] = (['time', 'nv'], list(zip(time, month_ends)))
ds.time.attrs['bounds'] = 'time_bnds'
ds1 = adjust_temporal_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['time_coverage_start']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['time_coverage_start'],
'2000-01-01T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_end'],
'2000-12-31T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_resolution'],
'P1M')
self.assertEqual(ds1.attrs['time_coverage_duration'],
'P366D')
def test_single_slice(self):
"""Test a case when the dataset is a single time slice"""
# With bnds
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 1])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 1])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'nv': [0, 1],
'time': [datetime(2000, 1, 1)]})
ds.time.attrs['bounds'] = 'time_bnds'
ds['time_bnds'] = (['time', 'nv'],
[(datetime(2000, 1, 1), datetime(2000, 1, 31))])
ds1 = adjust_temporal_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['time_coverage_start']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['time_coverage_start'],
'2000-01-01T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_end'],
'2000-01-31T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_duration'],
'P31D')
with self.assertRaises(KeyError):
# Resolution is not defined for a single slice
# noinspection PyStatementEffect
ds.attrs['time_coverage_resolution']
# Without bnds
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 1])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 1])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, 1, 1)]})
ds1 = adjust_temporal_attrs(ds)
self.assertEqual(ds1.attrs['time_coverage_start'],
'2000-01-01T00:00:00.000000000')
self.assertEqual(ds1.attrs['time_coverage_end'],
'2000-01-01T00:00:00.000000000')
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['time_coverage_resolution']
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['time_coverage_duration']
class TestNormalizeCoordVars(TestCase):
def test_ds_with_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_potential_coords_and_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2])),
'lat': (['lat'], np.linspace(-89.5, 89.5, 90)),
'lon': (['lon'], np.linspace(-179.5, 179.5, 180))})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_no_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_coord_vars(ds)
self.assertIs(ds, new_ds)
class TestNormalizeMissingTime(TestCase):
def test_ds_without_time(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
self.assertEqual(new_ds.coords['time_bnds'].attrs.get('long_name'), 'time')
def test_ds_without_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 3)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertNotIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), None)
def test_ds_without_time_attrs(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_missing_time(ds)
self.assertIs(ds, new_ds)
class Fix360Test(TestCase):
def test_fix_360_lon(self):
# The following simulates a strangely geo-coded soil moisture dataset
# we found at ...
#
lon_size = 360
lat_size = 130
time_size = 12
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.random.random_sample([time_size, lat_size, lon_size])),
'second': (['time', 'lat', 'lon'], np.random.random_sample([time_size, lat_size, lon_size]))},
coords={'lon': np.linspace(1., 360., lon_size),
'lat': np.linspace(-64., 65., lat_size),
'time': [datetime(2000, x, 1) for x in range(1, time_size + 1)]},
attrs=dict(geospatial_lon_min=0.,
geospatial_lon_max=360.,
geospatial_lat_min=-64.5,
geospatial_lat_max=+65.5,
geospatial_lon_resolution=1.,
geospatial_lat_resolution=1.))
new_ds = normalize(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(ds.dims, new_ds.dims)
self.assertEqual(ds.sizes, new_ds.sizes)
assert_array_almost_equal(new_ds.lon, np.linspace(-179.5, 179.5, 360))
assert_array_almost_equal(new_ds.lat, np.linspace(-64., 65., 130))
assert_array_almost_equal(new_ds.first[..., :180], ds.first[..., 180:])
assert_array_almost_equal(new_ds.first[..., 180:], ds.first[..., :180])
assert_array_almost_equal(new_ds.second[..., :180], ds.second[..., 180:])
assert_array_almost_equal(new_ds.second[..., 180:], ds.second[..., :180])
self.assertEqual(-180., new_ds.attrs['geospatial_lon_min'])
self.assertEqual(+180., new_ds.attrs['geospatial_lon_max'])
self.assertEqual(-64.5, new_ds.attrs['geospatial_lat_min'])
self.assertEqual(+65.5, new_ds.attrs['geospatial_lat_max'])
self.assertEqual(1., new_ds.attrs['geospatial_lon_resolution'])
self.assertEqual(1., new_ds.attrs['geospatial_lat_resolution'])
class NormalizeDimOrder(TestCase):
"""
Test normalize_cci_sea_level operation
"""
def test_no_change(self):
"""
Test nominal operation
"""
lon_size = 360
lat_size = 130
time_size = 12
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.random.random_sample([time_size, lat_size, lon_size])),
'second': (['time', 'lat', 'lon'], np.random.random_sample([time_size, lat_size, lon_size]))},
coords={'lon': np.linspace(-179.5, -179.5, lon_size),
'lat': np.linspace(-64., 65., lat_size),
'time': [datetime(2000, x, 1) for x in range(1, time_size + 1)]})
ds2 = normalize(ds)
self.assertIs(ds2, ds)
def test_nominal(self):
"""
Test nominal operation
"""
ds = self.new_cci_seal_level_ds()
ds2 = normalize(ds)
self.assertIsNot(ds2, ds)
self.assertIn('ampl', ds2)
self.assertIn('phase', ds2)
self.assertIn('time', ds2.coords)
self.assertIn('time_bnds', ds2.coords)
self.assertNotIn('time_step', ds2.coords)
self.assertEqual(['time', 'period', 'lat', 'lon'], list(ds2.ampl.dims))
self.assertEqual(['time', 'period', 'lat', 'lon'], list(ds2.phase.dims))
def new_cci_seal_level_ds(self):
period_size = 2
lon_size = 4
lat_size = 2
dataset = xr.Dataset(dict(ampl=(['lat', 'lon', 'period'], np.ones(shape=(lat_size, lon_size, period_size))),
phase=(['lat', 'lon', 'period'], np.zeros(shape=(lat_size, lon_size, period_size)))),
coords=dict(lon=np.array([-135, -45., 45., 135.]), lat=np.array([-45., 45.]),
time=pd.to_datetime(
['1993-01-15T00:00:00.000000000', '1993-02-15T00:00:00.000000000',
'2015-11-15T00:00:00.000000000', '2015-12-15T00:00:00.000000000'])))
dataset.coords['time'].encoding.update(units='days since 1950-01-01', dtype=np.dtype(np.float32))
dataset.coords['time'].attrs.update(long_name='time', standard_name='time')
dataset.attrs['time_coverage_start'] = '1993-01-01 00:00:00'
dataset.attrs['time_coverage_end'] = '2015-12-31 23:59:59'
return dataset
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing statistics of samples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"percentile",
]
# TODO(langmore) To make equivalent to numpy.percentile:
# Make work with a sequence of floats or single float for 'q'.
# Make work with "linear", "midpoint" interpolation. (linear should be default)
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, [x, q]):
x = ops.convert_to_tensor(x, name="x")
q = math_ops.to_float(q, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0), check_ops.assert_greater_equal(q, 0.),
check_ops.assert_less_equal(q, 100.)
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
else:
axis = ops.convert_to_tensor(axis, name="axis")
check_ops.assert_integer(axis)
axis_ndims = _get_static_ndims(
axis, expect_static=True, expect_ndims_no_more_than=1)
axis_const = tensor_util.constant_value(axis)
if axis_const is None:
raise ValueError(
"Expected argument 'axis' to be statically available. Found: %s" %
axis)
axis = axis_const
if axis_ndims == 0:
axis = [axis]
axis = [int(a) for a in axis]
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.to_float(array_ops.shape(y)[-1])
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
elif interpolation == "higher":
index = math_ops.floor((d - 1) * frac_at_q_or_above)
elif interpolation == "nearest":
index = math_ops.round((d - 1) * frac_at_q_or_above)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
# result.shape = B
result = sorted_y[..., math_ops.to_int32(index)]
result.set_shape(y.get_shape()[:-1])
if keep_dims:
if axis is None:
# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
ones_vec = array_ops.ones(
shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
result *= array_ops.ones(ones_vec, dtype=x.dtype)
else:
result = _insert_back_keep_dims(result, axis)
return result
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions "ndims" of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that
x has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.get_shape().ndims
if ndims is None:
shape_const = tensor_util.constant_value(array_ops.shape(x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
"Expected argument 'x' to have statically defined 'ndims'. Found: " %
x)
return
if expect_ndims is not None:
ndims_message = ("Expected argument 'x' to have ndims %s. Found tensor %s"
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
"Expected argument 'x' to have ndims >= %d. Found tensor %s" % (
expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
"Expected argument 'x' to have ndims <= %d. Found tensor %s" % (
expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return array_ops.rank(x)
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = array_ops.expand_dims(x, axis=i)
return x
def _make_static_axis_non_negative(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative.
Args:
axis: Iterable over Python integers.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If values in `axis` are too big/small to index into `ndims`.
"""
non_negative_axis = []
for d in axis:
if d >= 0:
if d >= ndims:
raise ValueError("dim %d not in the interval [0, %d]." % (d, ndims - 1))
non_negative_axis.append(d)
else:
if d < -1 * ndims:
raise ValueError(
"Negatively indexed dim %d not in the interval [-%d, -1]" % (d,
ndims))
non_negative_axis.append(ndims + d)
return non_negative_axis
def _move_dims_to_flat_end(x, axis, x_ndims):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# front_dims = [0, 2] in example above.
front_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
x_permed = array_ops.transpose(x, perm=front_dims + list(axis))
if x.get_shape().is_fully_defined():
x_shape = x.get_shape().as_list()
# front_shape = [a, c], end_shape = [b * d]
front_shape = [x_shape[i] for i in front_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = front_shape + end_shape
else:
front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)]
end_shape = [-1]
full_shape = array_ops.concat([front_shape, end_shape], axis=0)
return array_ops.reshape(x_permed, shape=full_shape)
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = nn_ops.top_k(tensor, k=array_ops.shape(tensor)[-1])
return sorted_
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import speech_recognition
from os.path import dirname, join
from speech_recognition import WavFile, AudioData
from mycroft.client.speech.listener import (AudioConsumer, RecognizerLoop,
AUDIO_DATA, STREAM_START,
STREAM_DATA, STREAM_STOP)
from mycroft.stt import MycroftSTT
from queue import Queue
class MockRecognizer(object):
def __init__(self):
self.transcriptions = []
def recognize_mycroft(self, audio, key=None,
language=None, show_all=False):
if len(self.transcriptions) > 0:
return self.transcriptions.pop(0)
else:
raise speech_recognition.UnknownValueError()
def set_transcriptions(self, transcriptions):
self.transcriptions = transcriptions
class AudioConsumerTest(unittest.TestCase):
"""
AudioConsumerTest
"""
def setUp(self):
self.loop = RecognizerLoop()
self.queue = Queue()
self.recognizer = MockRecognizer()
self.consumer = AudioConsumer(
self.loop.state, self.queue, self.loop, MycroftSTT(),
self.loop.wakeup_recognizer,
self.loop.wakeword_recognizer)
def __create_sample_from_test_file(self, sample_name):
root_dir = dirname(dirname(dirname(__file__)))
filename = join(
root_dir, 'unittests', 'client',
'data', sample_name + '.wav')
wavfile = WavFile(filename)
with wavfile as source:
return AudioData(
source.stream.read(), wavfile.SAMPLE_RATE,
wavfile.SAMPLE_WIDTH)
def test_word_extraction(self):
"""
This is intended to test the extraction of the word: ``mycroft``.
The values for ``ideal_begin`` and ``ideal_end`` were found using an
audio tool like Audacity and they represent a sample value position of
the audio. ``tolerance`` is an acceptable margin error for the distance
between the ideal and actual values found by the ``WordExtractor``
"""
# TODO: implement WordExtractor test without relying on the listener
return
audio = self.__create_sample_from_test_file('weather_mycroft')
self.queue.put((AUDIO_DATA, audio))
tolerance = 4000
ideal_begin = 70000
ideal_end = 92000
monitor = {}
self.recognizer.set_transcriptions(["what's the weather next week"])
def wakeword_callback(message):
monitor['pos_begin'] = message.get('pos_begin')
monitor['pos_end'] = message.get('pos_end')
self.loop.once('recognizer_loop:wakeword', wakeword_callback)
self.consumer.read()
actual_begin = monitor.get('pos_begin')
self.assertIsNotNone(actual_begin)
diff = abs(actual_begin - ideal_begin)
self.assertTrue(
diff <= tolerance,
str(diff) + " is not less than " + str(tolerance))
actual_end = monitor.get('pos_end')
self.assertIsNotNone(actual_end)
diff = abs(actual_end - ideal_end)
self.assertTrue(
diff <= tolerance,
str(diff) + " is not less than " + str(tolerance))
@unittest.skip('Disabled while unittests are brought upto date')
def test_wakeword_in_beginning(self):
tag = AUDIO_DATA
data = self.__create_sample_from_test_file('weather_mycroft')
self.queue.put((tag, data))
self.recognizer.set_transcriptions(["what's the weather next week"])
monitor = {}
def callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEqual("what's the weather next week", utterances[0])
@unittest.skip('Disabled while unittests are brought upto date')
def test_wakeword(self):
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('mycroft')))
self.recognizer.set_transcriptions(["silence"])
monitor = {}
def callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEqual("silence", utterances[0])
def test_ignore_wakeword_when_sleeping(self):
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('mycroft')))
self.recognizer.set_transcriptions(["not detected"])
self.loop.sleep()
monitor = {}
def wakeword_callback(message):
monitor['wakeword'] = message.get('utterance')
self.loop.once('recognizer_loop:wakeword', wakeword_callback)
self.consumer.read()
self.assertIsNone(monitor.get('wakeword'))
self.assertTrue(self.loop.state.sleeping)
def test_wakeup(self):
tag = AUDIO_DATA
data = self.__create_sample_from_test_file('mycroft_wakeup')
self.queue.put((tag, data))
self.loop.sleep()
self.consumer.read()
self.assertFalse(self.loop.state.sleeping)
@unittest.skip('Disabled while unittests are brought upto date')
def test_stop(self):
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('mycroft')))
self.consumer.read()
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('stop')))
self.recognizer.set_transcriptions(["stop"])
monitor = {}
def utterance_callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', utterance_callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEqual("stop", utterances[0])
@unittest.skip('Disabled while unittests are brought upto date')
def test_record(self):
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('mycroft')))
self.consumer.read()
self.queue.put((AUDIO_DATA,
self.__create_sample_from_test_file('record')))
self.recognizer.set_transcriptions(["record"])
monitor = {}
def utterance_callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', utterance_callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEqual("record", utterances[0])
|
|
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import PreprintProvider, PreprintService, Subject
from osf.models.preprint_provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = PreprintProvider.objects.filter(_id='osf').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.iteritems():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.iteritems():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in PreprintService.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.iteritems():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in PreprintService.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects, update_share=True)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = PreprintProvider.objects.filter(_id='osf').first()
custom_provider = PreprintProvider.objects.filter(_id=provider).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the PreprintProvider object, e.g. "osf". Provider is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = PreprintProvider.objects.filter(_id='osf').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.layers import pooling as tf_pooling_layers
class MaxPooling1D(tf_pooling_layers.MaxPooling1D, Layer):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
if strides is None:
strides = pool_size
super(MaxPooling1D, self).__init__(pool_size, strides, padding, **kwargs)
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding
}
base_config = super(MaxPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling1D(tf_pooling_layers.AveragePooling1D, Layer):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
if strides is None:
strides = pool_size
super(AveragePooling1D, self).__init__(pool_size, strides, padding,
**kwargs)
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding
}
base_config = super(AveragePooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaxPooling2D(tf_pooling_layers.MaxPooling2D, Layer):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(MaxPooling2D, self).__init__(pool_size, strides, padding, data_format,
**kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(MaxPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling2D(tf_pooling_layers.AveragePooling2D, Layer):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(AveragePooling2D, self).__init__(pool_size, strides, padding,
data_format, **kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(AveragePooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaxPooling3D(tf_pooling_layers.MaxPooling3D, Layer):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(MaxPooling3D, self).__init__(pool_size, strides, padding, data_format,
**kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(MaxPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AveragePooling3D(tf_pooling_layers.AveragePooling3D, Layer):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(AveragePooling3D, self).__init__(pool_size, strides, padding,
data_format, **kwargs)
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(AveragePooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers.
"""
def __init__(self, **kwargs):
super(_GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
class GlobalAveragePooling1D(_GlobalPooling1D):
"""Global average pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def call(self, inputs):
return K.mean(inputs, axis=1)
class GlobalMaxPooling1D(_GlobalPooling1D):
"""Global max pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def call(self, inputs):
return K.max(inputs, axis=1)
class _GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(_GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(_GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAveragePooling2D(_GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.mean(inputs, axis=[1, 2])
else:
return K.mean(inputs, axis=[2, 3])
class GlobalMaxPooling2D(_GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.max(inputs, axis=[1, 2])
else:
return K.max(inputs, axis=[2, 3])
class _GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(_GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(_GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAveragePooling3D(_GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.mean(inputs, axis=[1, 2, 3])
else:
return K.mean(inputs, axis=[2, 3, 4])
class GlobalMaxPooling3D(_GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return K.max(inputs, axis=[1, 2, 3])
else:
return K.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import sys
from requests import RequestException
from requests.structures import CaseInsensitiveDict
from time import sleep
import unittest
import testtools
import mock
import six
from six.moves import reload_module
from six.moves.urllib.parse import urlparse, ParseResult
from swiftclient import client as c
from swiftclient import shell as s
from swiftclient.utils import EMPTY_ETAG
def fake_get_auth_keystone(expected_os_options=None, exc=None,
storage_url='http://url/', token='token',
**kwargs):
def fake_get_auth_keystone(auth_url,
user,
key,
actual_os_options, **actual_kwargs):
if exc:
raise exc('test')
# TODO: some way to require auth_url, user and key?
if expected_os_options and actual_os_options != expected_os_options:
return "", None
if 'required_kwargs' in kwargs:
for k, v in kwargs['required_kwargs'].items():
if v != actual_kwargs.get(k):
return "", None
if auth_url.startswith("https") and \
auth_url.endswith("invalid-certificate") and \
not actual_kwargs['insecure']:
from swiftclient import client as c
raise c.ClientException("invalid-certificate")
if auth_url.startswith("https") and \
auth_url.endswith("self-signed-certificate") and \
not actual_kwargs['insecure'] and \
actual_kwargs['cacert'] is None:
from swiftclient import client as c
raise c.ClientException("unverified-certificate")
return storage_url, token
return fake_get_auth_keystone
class StubResponse(object):
"""
Placeholder structure for use with fake_http_connect's code_iter to modify
response attributes (status, body, headers) on a per-request basis.
"""
def __init__(self, status=200, body='', headers=None):
self.status = status
self.body = body
self.headers = headers or {}
def fake_http_connect(*code_iter, **kwargs):
"""
Generate a callable which yields a series of stubbed responses. Because
swiftclient will reuse an HTTP connection across pipelined requests it is
not always the case that this fake is used strictly for mocking an HTTP
connection, but rather each HTTP response (i.e. each call to requests
get_response).
"""
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.timestamp = timestamp
self._is_closed = True
self.headers = headers or {}
def connect(self):
self._is_closed = False
def close(self):
self._is_closed = True
def isclosed(self):
return self._is_closed
def getresponse(self):
if kwargs.get('raise_exc'):
raise Exception('test')
return self
def getexpect(self):
if self.status == -2:
raise RequestException()
if self.status == -3:
return FakeConn(507)
return FakeConn(100)
def getheaders(self):
if self.headers:
return self.headers.items()
headers = {'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'etag':
self.etag or '"%s"' % EMPTY_ETAG,
'x-works': 'yes',
'x-account-container-count': 12345}
if not self.timestamp:
del headers['x-timestamp']
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
if 'slow' in kwargs:
headers['content-length'] = '4'
if 'headers' in kwargs:
headers.update(kwargs['headers'])
if 'auth_v1' in kwargs:
headers.update(
{'x-storage-url': 'storageURL',
'x-auth-token': 'someauthtoken'})
return headers.items()
def read(self, amt=None):
if 'slow' in kwargs:
if self.sent < 4:
self.sent += 1
sleep(0.1)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if 'slow' in kwargs:
if self.received < 4:
self.received += 1
sleep(0.1)
def getheader(self, name, default=None):
return dict(self.getheaders()).get(name.lower(), default)
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
def connect(*args, **ckwargs):
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
if 'give_connect' in kwargs:
kwargs['give_connect'](*args, **ckwargs)
status = next(code_iter)
if isinstance(status, StubResponse):
fake_conn = FakeConn(status.status, body=status.body,
headers=status.headers)
else:
etag = next(etag_iter)
timestamp = next(timestamps_iter)
fake_conn = FakeConn(status, etag, body=kwargs.get('body', ''),
timestamp=timestamp)
if fake_conn.status <= 0:
raise RequestException()
fake_conn.connect()
return fake_conn
connect.code_iter = code_iter
return connect
class MockHttpTest(testtools.TestCase):
def setUp(self):
super(MockHttpTest, self).setUp()
self.fake_connect = None
self.request_log = []
def fake_http_connection(*args, **kwargs):
self.validateMockedRequestsConsumed()
self.request_log = []
self.fake_connect = fake_http_connect(*args, **kwargs)
_orig_http_connection = c.http_connection
query_string = kwargs.get('query_string')
storage_url = kwargs.get('storage_url')
auth_token = kwargs.get('auth_token')
exc = kwargs.get('exc')
on_request = kwargs.get('on_request')
def wrapper(url, proxy=None, cacert=None, insecure=False,
ssl_compression=True):
if storage_url:
self.assertEqual(storage_url, url)
parsed, _conn = _orig_http_connection(url, proxy=proxy)
class RequestsWrapper(object):
pass
conn = RequestsWrapper()
def request(method, url, *args, **kwargs):
try:
conn.resp = self.fake_connect()
except StopIteration:
self.fail('Unexpected %s request for %s' % (
method, url))
self.request_log.append((parsed, method, url, args,
kwargs, conn.resp))
conn.host = conn.resp.host
conn.isclosed = conn.resp.isclosed
conn.resp.has_been_read = False
_orig_read = conn.resp.read
def read(*args, **kwargs):
conn.resp.has_been_read = True
return _orig_read(*args, **kwargs)
conn.resp.read = read
if on_request:
status = on_request(method, url, *args, **kwargs)
conn.resp.status = status
if auth_token:
headers = args[1]
self.assertTrue('X-Auth-Token' in headers)
actual_token = headers.get('X-Auth-Token')
self.assertEqual(auth_token, actual_token)
if query_string:
self.assertTrue(url.endswith('?' + query_string))
if url.endswith('invalid_cert') and not insecure:
from swiftclient import client as c
raise c.ClientException("invalid_certificate")
if exc:
raise exc
return conn.resp
def putrequest(path, data=None, headers=None, **kwargs):
request('PUT', path, data, headers, **kwargs)
conn.request = request
conn.putrequest = putrequest
def getresponse():
return conn.resp
conn.getresponse = getresponse
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def iter_request_log(self):
for parsed, method, path, args, kwargs, resp in self.request_log:
parts = parsed._asdict()
parts['path'] = path
full_path = ParseResult(**parts).geturl()
args = list(args)
log = dict(zip(('body', 'headers'), args))
log.update({
'method': method,
'full_path': full_path,
'parsed_path': urlparse(full_path),
'path': path,
'headers': CaseInsensitiveDict(log.get('headers')),
'resp': resp,
'status': resp.status,
})
yield log
orig_assertEqual = unittest.TestCase.assertEqual
def assert_request_equal(self, expected, real_request):
method, path = expected[:2]
if urlparse(path).scheme:
match_path = real_request['full_path']
else:
match_path = real_request['path']
self.assertEqual((method, path), (real_request['method'],
match_path))
if len(expected) > 2:
body = expected[2]
real_request['expected'] = body
err_msg = 'Body mismatch for %(method)s %(path)s, ' \
'expected %(expected)r, and got %(body)r' % real_request
self.orig_assertEqual(body, real_request['body'], err_msg)
if len(expected) > 3:
headers = expected[3]
for key, value in headers.items():
real_request['key'] = key
real_request['expected_value'] = value
real_request['value'] = real_request['headers'].get(key)
err_msg = (
'Header mismatch on %(key)r, '
'expected %(expected_value)r and got %(value)r '
'for %(method)s %(path)s %(headers)r' % real_request)
self.orig_assertEqual(value, real_request['value'],
err_msg)
def assertRequests(self, expected_requests):
"""
Make sure some requests were made like you expected, provide a list of
expected requests, typically in the form of [(method, path), ...]
"""
real_requests = self.iter_request_log()
for expected in expected_requests:
real_request = next(real_requests)
self.assert_request_equal(expected, real_request)
def assert_request(self, expected_request):
"""
Make sure a request was made as expected. Provide the
expected request in the form of [(method, path), ...]
"""
real_requests = self.iter_request_log()
for real_request in real_requests:
try:
self.assert_request_equal(expected_request, real_request)
break
except AssertionError:
pass
else:
raise AssertionError(
"Expected request %s not found in actual requests %s"
% (expected_request, self.request_log)
)
def validateMockedRequestsConsumed(self):
if not self.fake_connect:
return
unused_responses = list(self.fake_connect.code_iter)
if unused_responses:
self.fail('Unused responses %r' % (unused_responses,))
def tearDown(self):
self.validateMockedRequestsConsumed()
super(MockHttpTest, self).tearDown()
# TODO: this nuke from orbit clean up seems to be encouraging
# un-hygienic mocking on the swiftclient.client module; which may lead
# to some unfortunate test order dependency bugs by way of the broken
# window theory if any other modules are similarly patched
reload_module(c)
class CaptureStreamBuffer(object):
"""
CaptureStreamBuffer is used for testing raw byte writing for PY3. Anything
written here is decoded as utf-8 and written to the parent CaptureStream
"""
def __init__(self, captured_stream):
self._captured_stream = captured_stream
def write(self, bytes_data):
# No encoding, just convert the raw bytes into a str for testing
# The below call also validates that we have a byte string.
self._captured_stream.write(
''.join(map(chr, bytes_data))
)
class CaptureStream(object):
def __init__(self, stream):
self.stream = stream
self._capture = six.StringIO()
self._buffer = CaptureStreamBuffer(self)
self.streams = [self.stream, self._capture]
@property
def buffer(self):
if six.PY3:
return self._buffer
else:
raise AttributeError(
'Output stream has no attribute "buffer" in Python2')
def flush(self):
pass
def write(self, *args, **kwargs):
for stream in self.streams:
stream.write(*args, **kwargs)
def writelines(self, *args, **kwargs):
for stream in self.streams:
stream.writelines(*args, **kwargs)
def getvalue(self):
return self._capture.getvalue()
def clear(self):
self._capture.truncate(0)
self._capture.seek(0)
class CaptureOutput(object):
def __init__(self, suppress_systemexit=False):
self._out = CaptureStream(sys.stdout)
self._err = CaptureStream(sys.stderr)
self.patchers = []
WrappedOutputManager = functools.partial(s.OutputManager,
print_stream=self._out,
error_stream=self._err)
if suppress_systemexit:
self.patchers += [
mock.patch('swiftclient.shell.OutputManager.get_error_count',
return_value=0)
]
self.patchers += [
mock.patch('swiftclient.shell.OutputManager',
WrappedOutputManager),
mock.patch('sys.stdout', self._out),
mock.patch('sys.stderr', self._err),
]
def __enter__(self):
for patcher in self.patchers:
patcher.start()
return self
def __exit__(self, *args, **kwargs):
for patcher in self.patchers:
patcher.stop()
@property
def out(self):
return self._out.getvalue()
@property
def err(self):
return self._err.getvalue()
def clear(self):
self._out.clear()
self._err.clear()
# act like the string captured by stdout
def __str__(self):
return self.out
def __len__(self):
return len(self.out)
def __eq__(self, other):
return self.out == other
def __getattr__(self, name):
return getattr(self.out, name)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.linalg_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
def _RandomPDMatrix(n, rng, dtype=np.float64):
"""Random positive definite matrix."""
temp = rng.randn(n, n).astype(dtype)
if dtype in [np.complex64, np.complex128]:
temp.imag = rng.randn(n, n)
return np.conj(temp).dot(temp.T)
class CholeskySolveTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
with self.session(use_gpu=True):
# Create 2 x n x n matrix
array = np.array(
[_RandomPDMatrix(n, self.rng),
_RandomPDMatrix(n, self.rng)]).astype(np_type)
chol = linalg_ops.cholesky(array)
for k in range(1, 3):
rhs = self.rng.randn(2, n, k).astype(np_type)
x = linalg_ops.cholesky_solve(chol, rhs)
self.assertAllClose(
rhs, math_ops.matmul(array, x).eval(), atol=atol)
class LogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
# Create 2 x n x n matrix
# matrix = np.array(
# [_RandomPDMatrix(n, self.rng, np_dtype),
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
class SlogdetTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
@test_util.run_deprecated_v1
def test_works_with_five_different_random_pos_def_matrices(self):
for n in range(1, 6):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
(np.complex64, 0.05), (np.complex128, 1e-5)]:
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
class AdjointTest(test.TestCase):
def test_compare_to_numpy(self):
for dtype in np.float64, np.float64, np.complex64, np.complex128:
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
6 + 6j]]).astype(dtype)
expected_transposed = np.conj(matrix_np.T)
with self.session():
matrix = ops.convert_to_tensor(matrix_np)
transposed = linalg.adjoint(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
class EyeTest(parameterized.TestCase, test.TestCase):
def testShapeInferenceNoBatch(self):
self.assertEqual((2, 2), linalg_ops.eye(num_rows=2).shape)
self.assertEqual((2, 3), linalg_ops.eye(num_rows=2, num_columns=3).shape)
def testShapeInferenceStaticBatch(self):
batch_shape = (2, 3)
self.assertEqual(
(2, 3, 2, 2),
linalg_ops.eye(num_rows=2, batch_shape=batch_shape).shape)
self.assertEqual(
(2, 3, 2, 3),
linalg_ops.eye(
num_rows=2, num_columns=3, batch_shape=batch_shape).shape)
@parameterized.named_parameters(
("DynamicRow",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: None),
("DynamicRowStaticColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: 3),
("StaticRowDynamicColumn",
lambda: 2,
lambda: array_ops.placeholder_with_default(3, shape=None)),
("DynamicRowDynamicColumn",
lambda: array_ops.placeholder_with_default(2, shape=None),
lambda: array_ops.placeholder_with_default(3, shape=None)))
def testShapeInferenceStaticBatchWith(self, num_rows_fn, num_columns_fn):
num_rows = num_rows_fn()
num_columns = num_columns_fn()
batch_shape = (2, 3)
identity_matrix = linalg_ops.eye(
num_rows=num_rows,
num_columns=num_columns,
batch_shape=batch_shape)
self.assertEqual(4, identity_matrix.shape.ndims)
self.assertEqual((2, 3), identity_matrix.shape[:2])
if num_rows is not None and not isinstance(num_rows, ops.Tensor):
self.assertEqual(2, identity_matrix.shape[-2])
if num_columns is not None and not isinstance(num_columns, ops.Tensor):
self.assertEqual(3, identity_matrix.shape[-1])
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[None, 0, 1, 2, 5],
# batch_shape
[None, [], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
def test_eye_no_placeholder(self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
if batch_shape is not None:
eye_np = np.tile(eye_np, batch_shape + [1, 1])
eye_tf = self.evaluate(linalg_ops.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype))
self.assertAllEqual(eye_np, eye_tf)
@parameterized.parameters(
itertools.product(
# num_rows
[0, 1, 2, 5],
# num_columns
[0, 1, 2, 5],
# batch_shape
[[], [2], [2, 3]],
# dtype
[
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128
])
)
@test_util.run_deprecated_v1
def test_eye_with_placeholder(
self, num_rows, num_columns, batch_shape, dtype):
eye_np = np.eye(num_rows, M=num_columns, dtype=dtype.as_numpy_dtype)
eye_np = np.tile(eye_np, batch_shape + [1, 1])
num_rows_placeholder = array_ops.placeholder(
dtypes.int32, name="num_rows")
num_columns_placeholder = array_ops.placeholder(
dtypes.int32, name="num_columns")
batch_shape_placeholder = array_ops.placeholder(
dtypes.int32, name="batch_shape")
eye = linalg_ops.eye(
num_rows_placeholder,
num_columns=num_columns_placeholder,
batch_shape=batch_shape_placeholder,
dtype=dtype)
with self.session(use_gpu=True) as sess:
eye_tf = sess.run(
eye,
feed_dict={
num_rows_placeholder: num_rows,
num_columns_placeholder: num_columns,
batch_shape_placeholder: batch_shape
})
self.assertAllEqual(eye_np, eye_tf)
class _MatrixRankTest(object):
def test_batch_default_tolerance(self):
x_ = np.array(
[
[
[2, 3, -2], # = row2+row3
[-1, 1, -2],
[3, 2, 0]
],
[
[0, 2, 0], # = 2*row2
[0, 1, 0],
[0, 3, 0]
], # = 3*row2
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
],
self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1, 3], self.evaluate(linalg.matrix_rank(x)))
def test_custom_tolerance_broadcasts(self):
q = linalg.qr(random_ops.random_uniform([3, 3], dtype=self.dtype))[0]
e = constant_op.constant([0.1, 0.2, 0.3], dtype=self.dtype)
a = linalg.solve(q, linalg.transpose(a=e * q), adjoint=True)
self.assertAllEqual([3, 2, 1, 0],
self.evaluate(
linalg.matrix_rank(
a, tol=[[0.09], [0.19], [0.29], [0.31]])))
def test_nonsquare(self):
x_ = np.array(
[
[
[2, 3, -2, 2], # = row2+row3
[-1, 1, -2, 4],
[3, 2, 0, -2]
],
[
[0, 2, 0, 6], # = 2*row2
[0, 1, 0, 3],
[0, 3, 0, 9]
]
], # = 3*row2
self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
self.assertAllEqual([2, 1], self.evaluate(linalg.matrix_rank(x)))
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankStatic32Test(test.TestCase, _MatrixRankTest):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class MatrixRankDynamic64Test(test.TestCase, _MatrixRankTest):
dtype = np.float64
use_static_shape = False
class _PinvTest(object):
def expected_pinv(self, a, rcond):
"""Calls `np.linalg.pinv` but corrects its broken batch semantics."""
if a.ndim < 3:
return np.linalg.pinv(a, rcond)
if rcond is None:
rcond = 10. * max(a.shape[-2], a.shape[-1]) * np.finfo(a.dtype).eps
s = np.concatenate([a.shape[:-2], [a.shape[-1], a.shape[-2]]])
a_pinv = np.zeros(s, dtype=a.dtype)
for i in np.ndindex(a.shape[:(a.ndim - 2)]):
a_pinv[i] = np.linalg.pinv(
a[i], rcond=rcond if isinstance(rcond, float) else rcond[i])
return a_pinv
def test_symmetric(self):
a_ = self.dtype([[1., .4, .5], [.4, .2, .25], [.5, .25, .35]])
a_ = np.stack([a_ + 1., a_], axis=0) # Batch of matrices.
a = array_ops.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
rcond = self.dtype([0., 0.01]) # Smallest 1 component is forced to zero.
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = linalg.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_, atol=2e-5, rtol=2e-5)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
def test_nonsquare(self):
a_ = self.dtype([[1., .4, .5, 1.], [.4, .2, .25, 2.], [.5, .25, .35, 3.]])
a_ = np.stack([a_ + 0.5, a_], axis=0) # Batch of matrices.
a = array_ops.placeholder_with_default(
a_, shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
# Smallest 2 components are forced to zero.
rcond = self.dtype([0., 0.25])
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = linalg.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_, atol=1e-5, rtol=1e-4)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32DefaultRcond(test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64DefaultRcond(test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = True
@test_util.run_all_in_graph_and_eager_modes
class PinvTestDynamic32CustomtRcond(test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = False
@test_util.run_all_in_graph_and_eager_modes
class PinvTestStatic64CustomRcond(test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = False
def make_tensor_hiding_attributes(value, hide_shape, hide_value=True):
if not hide_value:
return ops.convert_to_tensor(value)
shape = None if hide_shape else getattr(value, "shape", None)
return array_ops.placeholder_with_default(value, shape=shape)
class _LUReconstruct(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array([[3, 4], [1, 2]], dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_reconstruct(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array([
[[3, 4], [1, 2]],
[[7, 8], [3, 4]],
], dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_reconstruct(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUReconstructStatic(test.TestCase, _LUReconstruct):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUReconstructDynamic(test.TestCase, _LUReconstruct):
use_static_shape = False
class _LUMatrixInverse(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array([[1, 2], [3, 4]], dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_matrix_inverse(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array([
[[1, 2], [3, 4]],
[[7, 8], [3, 4]],
[[0.25, 0.5], [0.75, -2.]],
],
dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_matrix_inverse(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUMatrixInverseStatic(test.TestCase, _LUMatrixInverse):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUMatrixInverseDynamic(test.TestCase, _LUMatrixInverse):
use_static_shape = False
class _LUSolve(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array([[1, 2], [3, 4]], dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
rhs_ = np.array([[1, 1]], dtype=self.dtype).T
rhs = array_ops.placeholder_with_default(
rhs_, shape=rhs_.shape if self.use_static_shape else None)
lower_upper, perm = linalg.lu(x)
y = linalg.lu_solve(lower_upper, perm, rhs, validate_args=True)
y_, perm_ = self.evaluate([y, perm])
self.assertAllEqual([1, 0], perm_)
expected_ = np.linalg.solve(x_, rhs_)
if self.use_static_shape:
self.assertAllEqual(expected_.shape, y.shape)
self.assertAllClose(expected_, y_, atol=0., rtol=1e-3)
def test_batch_broadcast(self):
x_ = np.array([
[[1, 2], [3, 4]],
[[7, 8], [3, 4]],
[[0.25, 0.5], [0.75, -2.]],
],
dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
rhs_ = np.array([[1, 1]], dtype=self.dtype).T
rhs = array_ops.placeholder_with_default(
rhs_, shape=rhs_.shape if self.use_static_shape else None)
lower_upper, perm = linalg.lu(x)
y = linalg.lu_solve(lower_upper, perm, rhs, validate_args=True)
y_, perm_ = self.evaluate([y, perm])
self.assertAllEqual([[1, 0], [0, 1], [1, 0]], perm_)
expected_ = np.linalg.solve(x_, rhs_[np.newaxis])
if self.use_static_shape:
self.assertAllEqual(expected_.shape, y.shape)
self.assertAllClose(expected_, y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class LUSolveStatic(test.TestCase, _LUSolve):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class LUSolveDynamic(test.TestCase, _LUSolve):
use_static_shape = False
if __name__ == "__main__":
test.main()
|
|
"""Tango Parser.
The objective of the preliminary parsing phase is to
(try to) uncover the overall structure of the input document.
In particular it handles :
- line comments
- latex-style commands \cmd[<opt>]{<arg1>}{...}{<argN>}
- special preformated commands \cmd[<opt>]{{{ <preformated>}}}
- latex-style environments \begin{env}[<opt>]{<arg1>}{...}{<argN>} <body> \end{env}
- latex-style sectionning commands \part \chapter \section \subsection \subsubsection \paragraph
"""
import tangolib.eregex as ere
import tangolib.lexer as lexer
from tangolib.markup import Document, Section, Command, CommandArg, \
Environment, Text, Newlines, Spaces, Preformated, SubDocument, EnvArg
import tangolib.template as template
from tangolib.macros import DefCommand, DefEnvironment
from tangolib.cmdparse import GLOBAL_COMMAND_LINE_ARGUMENTS
import tangolib.globalvars as globvars
class ParseError(Exception):
pass
def depth_of_section(section_type):
if section_type == 'part':
return -1
elif section_type == 'chapter':
return 0
elif section_type == 'section':
return 1
elif section_type == 'subsection':
return 2
elif section_type == 'subsubsection':
return 3
elif section_type == 'paragraph':
return 4
else:
raise ValueError("Not a valid section type: {}".format(section_type))
def section_of_depth(section_depth):
if section_depth == -1:
return "part"
elif section_depth == 0:
return "chapter"
elif section_depth == 1:
return "section"
elif section_depth == 2:
return "subsection"
elif section_depth == 3:
return "subsubsection"
elif section_depth == 4:
return "paragraph"
else:
raise ValueError("Not a valid section depth: {}".format(section_depth))
# extended regular expressions constants
REGEX_IDENT_STR = r"[a-zA-Z_][a-zA-Z_0-9]*"
REGEX_PROTECTED = ere.ERegex(r"\\{|\\}")
REGEX_LINE_COMMENT = ere.ERegex('%') + ere.zero_or_more(ere.any_char()) + ere.str_end()
REGEX_ENV_HEADER = ere.ERegex(r"\\begin{(" + REGEX_IDENT_STR + r")}(?:\[([^\]]+)\])?")
REGEX_ENV_FOOTER = ere.ERegex(r"\\end{(" + REGEX_IDENT_STR + r")}")
REGEX_SECTION = ere.ERegex(r"\\(part|chapter|section|subsection|subsubsection|paragraph)(?:\[([^\]]+)\])?{([^}]+)}")
REGEX_MDSECTION = ere.ERegex(r"^(=+)\s+([^=]+)\s+(=*)(.*)$")
REGEX_CMD_PRE_HEADER = ere.ERegex(r"\\(" + REGEX_IDENT_STR + r")(?:\[([^\]]+)\])?{{{")
REGEX_CMD_HEADER = ere.ERegex(r"\\(" + REGEX_IDENT_STR + r")(?:\[([^\]]+)\])?")
REGEX_SPACE_STR = r"[^\S\n\f\r]"
REGEX_SPACE = ere.ERegex(REGEX_SPACE_STR)
REGEX_SPACES = ere.ERegex("({})+".format(REGEX_SPACE_STR))
REGEX_MDLIST_OPEN = ere.ERegex("(?:^{0}*\\n)+({0}+)([-+*\\d](?:\\.)?){0}".format(REGEX_SPACE_STR))
REGEX_MDLIST_ITEM = ere.ERegex("^({0}+)([-+*\\d](?:\\.)?){0}".format(REGEX_SPACE_STR))
REGEX_INLINE_PREFORMATED = ere.ERegex("`([^`]*)`")
REGEX_EMPH_STAR = ere.ERegex(r"(\*)(?=[^*]+\*)")
REGEX_STRONG_STAR = ere.ERegex(r"(\*)\*(?=[^*]+\*\*)")
REGEX_EMPH_UNDER = ere.ERegex(r"(_)(?=[^_]+_)")
REGEX_STRONG_UNDER = ere.ERegex(r"(_)_(?=[^_]+__)")
REGEX_DEF_CMD_HEADER = ere.ERegex(r"\\defCommand{\\(" + REGEX_IDENT_STR + r")}(?:\[([0-9]+)\])?")
REGEX_DEF_CMD_HEADER_SHORT = ere.ERegex(r"\\defCmd{\\(" + REGEX_IDENT_STR + r")}(?:\[([0-9]+)\])?")
REGEX_DEF_ENV_HEADER = ere.ERegex(r"\\defEnvironment{(" + REGEX_IDENT_STR + r")}(?:\[([0-9]+)\])?")
REGEX_DEF_ENV_HEADER_SHORT = ere.ERegex(r"\\defEnv{(" + REGEX_IDENT_STR + r")}(?:\[([0-9]+)\])?")
REGEX_MACRO_CMD_ARG = ere.ERegex(r"\\macroCommandArgument\[([0-9]+)\]")
# main parser class
class Parser:
def __init__(self):
self.recognizers = []
self.prepare_recognizers()
def prepare_recognizers(self):
self.recognizers.append(lexer.Regexp("protected", REGEX_PROTECTED))
self.recognizers.append(lexer.EndOfInput("end_of_input"))
self.recognizers.append(lexer.Regexp("line_comment", REGEX_LINE_COMMENT))
self.recognizers.append(lexer.Regexp("def_env_header", REGEX_DEF_ENV_HEADER))
self.recognizers.append(lexer.Regexp("def_env_header", REGEX_DEF_ENV_HEADER_SHORT))
self.recognizers.append(lexer.Regexp("def_cmd_header", REGEX_DEF_CMD_HEADER))
self.recognizers.append(lexer.Regexp("def_cmd_header", REGEX_DEF_CMD_HEADER_SHORT))
self.recognizers.append(lexer.Regexp("macro_cmd_arg", REGEX_MACRO_CMD_ARG))
self.recognizers.append(lexer.Regexp("env_header", REGEX_ENV_HEADER))
self.recognizers.append(lexer.Regexp("env_footer", REGEX_ENV_FOOTER))
self.recognizers.append(lexer.Regexp("section", REGEX_SECTION))
self.recognizers.append(lexer.Regexp("mdsection", REGEX_MDSECTION, re_flags=ere.MULTILINE))
self.recognizers.append(lexer.Regexp("inline_preformated", REGEX_INLINE_PREFORMATED))
self.recognizers.append(lexer.Regexp("strong", REGEX_STRONG_STAR))
self.recognizers.append(lexer.Regexp("strong", REGEX_STRONG_UNDER))
self.recognizers.append(lexer.Regexp("emph", REGEX_EMPH_STAR))
self.recognizers.append(lexer.Regexp("emph", REGEX_EMPH_UNDER))
# markdown lists
self.recognizers.append(lexer.Regexp("mdlist_open", REGEX_MDLIST_OPEN, re_flags=ere.MULTILINE))
self.recognizers.append(lexer.Regexp("mdlist_item", REGEX_MDLIST_ITEM, re_flags=ere.MULTILINE))
self.recognizers.append(lexer.Regexp("cmd_pre_header", REGEX_CMD_PRE_HEADER))
self.recognizers.append(lexer.Regexp("cmd_header", REGEX_CMD_HEADER))
self.recognizers.append(lexer.Char("open_curly", '{'))
self.recognizers.append(lexer.Char("close_curly", '}'))
self.recognizers.append(lexer.CharIn("newline", "\n", "\r"))
self.recognizers.append(lexer.Regexp("spaces", REGEX_SPACES))
class UnparsedContent:
def __init__(self):
self.content = ""
self.start_pos = None
self.end_pos = None
def append_char(self, lexer):
if self.start_pos is None:
self.start_pos = lexer.pos
self.content += lexer.next_char()
self.end_pos = lexer.pos
def append_str(self, str_, start_pos, end_pos):
if self.start_pos is None:
self.start_pos = start_pos
self.content += str_
self.end_pos = end_pos
def flush(self, parent):
if self.content != "":
parent.append(Text(parent.doc, self.content, self.start_pos, self.end_pos))
self.content = ""
self.start_pos = None
self.end_pos = None
def __repr__(self):
return "UnparsedContent({},start_pos={},end_pos={})".format(repr(self.content), self.start_pos, self.end_pos)
def parse(self, doc, macro_cmd_arguments=None):
# BREAKPOINT >>> # import pdb; pdb.set_trace() # <<< BREAKPOINT #
element_stack = []
current_element = doc
continue_parse = True
unparsed_content = Parser.UnparsedContent()
lex = doc.lex
while continue_parse:
tok = lex.next_token()
if tok is None:
next_char = lex.peek_char()
# When closing an emphasis
if next_char in { '*', '_' } and current_element.markup_type == "command" \
and current_element.cmd_name in { "emph" , "strong" }:
if current_element.cmd_name == "emph":
if current_element.cmd_opts['emph_type'] == next_char:
lex.next_char() # consume
unparsed_content.flush(current_element)
current_element = element_stack.pop()
else:
unparsed_content.append_char(lex)
elif current_element.cmd_name == "strong":
if current_element.cmd_opts['strong_type'] == next_char:
lex.next_chars(2) # consume two
unparsed_content.flush(current_element)
current_element = element_stack.pop()
else:
unparsed_content.append_char(lex)
else:
unparsed_content.append_char(lex)
else: # in the other case just append the character
unparsed_content.append_char(lex)
###############################################
### End of input ###
###############################################
elif tok.token_type == "end_of_input":
unparsed_content.flush(current_element)
while current_element.markup_type not in { "document", "subdoc", "macrocmddoc", "macroenvdoc", "macroenvfooterdoc" }:
if current_element.markup_type == "command":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished command before end of document")
elif current_element.markup_type == "environment":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished environment before end of document")
else:
# ok to close
current_element.end_pos = tok.start_pos
current_element = element_stack.pop()
continue_parse = False
### Line comment ###
elif tok.token_type == "line_comment":
pass # just skip this
###############################################
### Environments ###
###############################################
elif tok.token_type == "env_header":
unparsed_content.flush(current_element)
env = Environment(doc, tok.value.group(1), tok.value.group(2), tok.start_pos, tok.end_pos)
current_element.append(env)
element_stack.append(current_element)
current_element = env
# check if the environment has at least an argument
ntok = lex.next_token()
if ntok is None:
pass # special case: no more tokens (last command)
elif ntok.token_type == "open_curly":
env.parsing_argument = True
lex.putback(ntok) # need the bracket for argument parsing
else:
env.parsing_argument = False
lex.putback(ntok) # command without argument
# start of argument (or dummy bracket somewhere)
elif current_element.markup_type == "environment" and tok.token_type == "open_curly" and hasattr(current_element,"parsing_argument") and current_element.parsing_argument:
# first argument
env_arg = EnvArg(doc,current_element, tok.start_pos)
current_element.add_argument(env_arg)
element_stack.append(current_element)
current_element = env_arg
# end of argument (or dummy bracket somewhere)
elif current_element.markup_type == "env_arg" and tok.token_type == "close_curly":
unparsed_content.flush(current_element)
current_element.end_pos = tok.end_pos
# Pop parent element (environment)
current_element = element_stack.pop()
# check if the environment has at least a further argument
ntok = lex.next_token()
if ntok is None: # no more token ? ==> ERROR !
raise ParseError(tok.start_pos, tok.end_pos, "Missing closing environment at end of input")
elif ntok.token_type == "open_curly":
current_element.parsing_argument = True
lex.putback(ntok)
else:
current_element.parsing_argument = False
# keep the environment as current element for further argument or the body
lex.putback(ntok) # need the bracket for argument parsing
elif tok.token_type == "env_footer":
if current_element.markup_type != "environment":
raise ParseError(tok.start_pos, tok.end_pos, "Cannot close environment")
if current_element.env_name != tok.value.group(1):
raise ParseError(tok.start_pos, tok.end_pos, "Mismatch environment '{}' (expecting '{}')".format(tok.group(1), current_element.env_name))
unparsed_content.flush(current_element)
current_element.footer_start_pos = tok.start_pos
current_element.end_pos = tok.end_pos
# Pop parent element
current_element = element_stack.pop()
###############################################
### Commands ###
###############################################
elif tok.token_type == "cmd_header":
unparsed_content.flush(current_element)
cmd = Command(doc, tok.value.group(1), tok.value.group(2), tok.start_pos, tok.end_pos)
current_element.append(cmd)
# check if the command has at least an arguemnt
ntok = lex.next_token()
if ntok is None:
pass # special case: no more tokens (last command)
elif ntok.token_type == "open_curly":
element_stack.append(current_element)
current_element = cmd
lex.putback(ntok) # need the bracket for argument parsing
else:
lex.putback(ntok) # command without argument
# start of argument (or dummy bracket somewhere)
elif current_element.markup_type == "command" and tok.token_type == "open_curly":
# first argument
cmd_arg = CommandArg(doc,current_element, tok.start_pos)
current_element.add_argument(cmd_arg)
element_stack.append(current_element)
current_element = cmd_arg
# end of argument (or dummy bracket somewhere)
elif current_element.markup_type == "command_arg" and tok.token_type == "close_curly":
unparsed_content.flush(current_element)
current_element.end_pos = tok.end_pos
# Pop parent element (command)
current_element = element_stack.pop()
# check if the command has at least an arguemnt
ntok = lex.next_token()
if ntok is None:
current_element = element_stack.pop() # special case: no more tokens (last command, pop it)
elif ntok.token_type == "open_curly":
# keep the command as current element
lex.putback(ntok) # need the bracket for argument parsing
else:
# pop the command
current_element = element_stack.pop()
lex.putback(ntok) # command without more argument
elif tok.token_type == "cmd_pre_header":
unparsed_content.flush(current_element)
cmd = Command(doc, tok.value.group(1), tok.value.group(2), tok.start_pos, tok.end_pos, preformated=True)
current_element.append(cmd)
preformated = ""
eat_preformated = True
while eat_preformated:
footer = lex.peek_chars(3)
if footer is None:
raise ParseError(tok.start_pos, lex.pos, "Preformated command unfinished (missing '}}}')")
elif footer == "}}}":
cmd.content = preformated
eat_preformated = False
lex.next_chars(3)
else:
preformated += lex.next_char()
elif tok.token_type == "open_curly":
unparsed_content.append_str("{", tok.start_pos, tok.end_pos)
###############################################
### Sections (latex-style or markdown-style ###
###############################################
elif tok.token_type == "section" or tok.token_type == "mdsection":
if tok.token_type == "section":
# latex section markup
section_title = tok.value.group(2)
section_depth = depth_of_section(tok.value.group(1))
elif tok.token_type == "mdsection":
# markdown section markup
section_title = tok.value.group(2)
section_depth = len(tok.value.group(1))
if tok.value.group(3) != "" and tok.value.group(3) != tok.value.group(1):
raise ParseError(tok.start_pos.next_char(tok.value.start(3)), tok.start_pos.next_char(tok.value.end(3)), 'Wrong section marker: should be "" or "{}"'.format(tok.value.group(1)))
if tok.value.group(4) != "" and not tok.value.group(4).isspace():
raise ParseError(tok.start_pos.next_char(tok.value.start(4)), tok.start_pos.next_char(tok.value.end(3)), "Unexpected text '{}' after section markup".format(tok.value.group(4)))
if current_element.markup_type == "command":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished command before section")
elif current_element.markup_type == "environment":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished environment before section")
# ok to parse new section
unparsed_content.flush(current_element)
# close all sections of greater or equal depth
while current_element.markup_type == "section" \
and current_element.section_depth >= section_depth: # TODO: fix probably needed for mixing inclusion and sectionnning
current_element.end_pos = tok.start_pos
current_element = element_stack.pop()
section = Section(doc, section_title, section_of_depth(section_depth), section_depth, tok.start_pos, tok.end_pos)
current_element.append(section)
element_stack.append(current_element)
current_element = section
############################################
### Markdown-style itemize and enumerate ###
############################################$
elif tok.token_type == "mdlist_open" or tok.token_type == "mdlist_item":
mditem_indent = len(tok.value.group(1))
mditem_style = "itemize" if (tok.value.group(2)[0] == '-' or tok.value.group(2)[0] == '+') else "enumerate"
unparsed_content.flush(current_element)
continue_closing = True
while continue_closing:
dig_once_more = False
# TODO: fix probably needed for closing with subdocument
while current_element.markup_type not in { "command", "environment", "section", "document", "subdoc" }:
current_element = element_stack.pop()
if (current_element.markup_type == "environment") and (current_element.env_name in { "itemize", "enumerate" }) \
and (current_element.env_name == mditem_style):
try:
if current_element.markdown_style:
pass # ok
except AttributeError:
raise ParseError(current_element.start_pos, tok.start_pos, "Mixing latex-style and markdown-style lists is forbidden")
if current_element.markdown_indent == mditem_indent:
# add a further item at the same level
#### element_stack.append(current_element)
mditem = Command(doc, "item", None, tok.start_pos, tok.end_pos)
mditem.markdown_style = True
current_element.append(mditem)
#### current_element = mditem
continue_closing = False
elif current_element.markdown_indent > mditem_indent:
# close one
current_element = element_stack.pop()
continue_closing = True
else: # dig one level more
dig_once_more = True
continue_closing = False
else:
dig_once_more = True
continue_closing = False
if dig_once_more:
mdlist = Environment(doc, mditem_style, None, tok.start_pos, tok.end_pos)
mdlist.markdown_style = True
mdlist.markdown_indent = mditem_indent
current_element.append(mdlist)
element_stack.append(current_element)
current_element = mdlist
mditem = Command(doc, "item", None, tok.start_pos, tok.end_pos)
mditem.markdown_style = True
current_element.append(mditem)
#### element_stack.append(current_element)
#### current_element = mditem
# loop if continue_closing == True
# and we're done
###########################################
### Inline preformated ###
###########################################
elif tok.token_type == "inline_preformated":
unparsed_content.flush(current_element)
preformated = Preformated(doc, tok.value.group(1), "inline", tok.start_pos, tok.end_pos)
current_element.append(preformated)
### Emphasis (normal) ###
elif tok.token_type == "emph":
unparsed_content.flush(current_element)
cmd = Command(doc, "emph", {'emph_type': tok.value.group(1) }, tok.start_pos, tok.end_pos)
current_element.append(cmd)
element_stack.append(current_element)
current_element = cmd
### Strong emphasis ###
elif tok.token_type == "strong":
unparsed_content.flush(current_element)
cmd = Command(doc, "strong", {'strong_type': tok.value.group(1) }, tok.start_pos, tok.end_pos)
current_element.append(cmd)
element_stack.append(current_element)
current_element = cmd
######################################################
### Macros: commands and environments definitions ###
######################################################
### command definition
elif tok.token_type == "def_cmd_header":
unparsed_content.flush(current_element)
def_cmd_name = tok.value.group(1)
def_cmd_arity = 0
if tok.value.group(2) is not None:
def_cmd_arity = int(tok.value.group(2))
tok2 = lex.next_token()
if tok2.token_type != "open_curly":
raise ParseError(tok.end_pos, tok.end_pos.next_char(), "Missing '{' for \\defCommand body")
# prepare the template string
def_cmd_lex_start_pos = lex.pos
def_cmd_lex_str = ""
nb_curly = 1
while nb_curly > 0:
ch = lex.next_char()
if ch is None:
raise ParseError(def_cmd_lex_start_pos, lex.pos, "Unexpected end of input while parsing \\defCommand body")
elif ch == '}':
nb_curly -= 1
if nb_curly > 0:
def_cmd_lex_str += ch
else:
if ch == '{':
nb_curly += 1
def_cmd_lex_str += ch
def_cmd_tpl = template.Template(def_cmd_lex_str,
globvars.TANGO_EVAL_GLOBAL_ENV,
escape_var='#',
escape_inline='@',
escape_block='@',
escape_block_open='{',
escape_block_close='}',
escape_emit_function='emit',
filename='<defCommand:{}>'.format(def_cmd_name),
base_pos=def_cmd_lex_start_pos)
# register the command
doc.register_def_command(def_cmd_name, DefCommand(doc, def_cmd_name, def_cmd_arity, tok.start_pos, tok.end_pos, def_cmd_tpl))
### macro-command argument
elif tok.token_type == "macro_cmd_arg": ### XXX: dead code ?
unparsed_content.flush(current_element)
arg_num = int(tok.value.group(1))
command_arg_markup = macro_cmd_arguments[arg_num]
current_element.append(command_arg_markup)
element_stack.append(current_element)
current_element = command_arg_markup
### environment definition
elif tok.token_type == "def_env_header":
unparsed_content.flush(current_element)
def_env_name = tok.value.group(1)
def_env_arity = 0
if tok.value.group(2) is not None:
def_env_arity = int(tok.value.group(2))
tok2 = lex.next_token()
if tok2.token_type != "open_curly":
raise ParseError(tok.end_pos, tok.end_pos.next_char(), "Missing '{' for \\defEnvironment header body")
# prepare the template string for the header part
def_env_header_lex_start_pos = lex.pos
def_env_header_lex_str = ""
nb_curly = 1
while nb_curly > 0:
ch = None
try:
ch = lex.next_char()
if ch is None:
raise ParseError(def_env_header_lex_start_pos, lex.pos, "Unexpected end of input while parsing \\defEnvironment header body")
except:
raise ParseError(def_env_header_lex_start_pos, lex.pos, "Unexpected end of input while parsing \\defEnvironment header body")
if ch == '}':
nb_curly -= 1
if nb_curly > 0:
def_env_header_lex_str += ch
else:
if ch == '{':
nb_curly += 1
def_env_header_lex_str += ch
def_env_header_tpl = template.Template(def_env_header_lex_str,
globvars.TANGO_EVAL_GLOBAL_ENV,
escape_var='#',
escape_inline='@',
escape_block='@',
escape_block_open='{',
escape_block_close='}',
escape_emit_function='emit',
filename='<defEnvironment:{}>'.format(def_env_name),
base_pos=def_env_header_lex_start_pos)
# prepare the template string for the footer part
tok2 = lex.next_token()
if tok2.token_type != "open_curly":
raise ParseError(tok.end_pos, tok.end_pos.next_char(), "Missing '{' for \\defEnvironment footer body")
def_env_footer_lex_start_pos = lex.pos
def_env_footer_lex_str = ""
nb_curly = 1
while nb_curly > 0:
ch = None
try:
ch = lex.next_char()
if ch is None:
raise ParseError(def_env_footer_lex_start_pos, lex.pos, "Unexpected end of input while parsing \\defEnvironment footer body")
except:
raise ParseError(def_env_footer_lex_start_pos, lex.pos, "Unexpected end of input while parsing \\defEnvironment footer body")
if ch == '}':
nb_curly -= 1
if nb_curly > 0:
def_env_footer_lex_str += ch
else:
if ch == '{':
nb_curly += 1
def_env_footer_lex_str += ch
def_env_footer_tpl = template.Template(def_env_footer_lex_str,
globvars.TANGO_EVAL_GLOBAL_ENV,
escape_var='#',
escape_inline='@',
escape_block='@',
escape_block_open='{',
escape_block_close='}',
escape_emit_function='emit',
filename='<defEnvironment:{}>'.format(def_env_name),
base_pos=def_env_footer_lex_start_pos)
# register the environement
doc.register_def_environment(def_env_name, DefEnvironment(doc, def_env_name, def_env_arity, def_env_header_lex_start_pos, lex.pos, def_env_header_tpl, def_env_footer_tpl))
###########################################
### Special characters (newlines, etc.) ###
###########################################
elif tok.token_type == "protected":
unparsed_content.append_str(tok.value[1:], tok.start_pos, tok.end_pos)
elif tok.token_type == "newline":
unparsed_content.flush(current_element)
newlines = tok.value
while lex.peek_char() == "\n" or lex.peek_char() == "\r":
newlines += lex.next_char()
## Special treatment for markdown lists
if len(newlines) >= 2 or lex.at_eof():
# check if we need to finish some markdown list
element_stack_copy = element_stack[:]
element_stack_copy.append(current_element)
top_mdlist = None
while element_stack_copy:
current_element_copy = element_stack_copy.pop()
if current_element_copy.markup_type in { "command", "environment", "section", "document" } \
and not hasattr(current_element_copy, "markdown_style"):
element_stack_copy = []
elif current_element_copy.markup_type == "environment":
top_mdlist = current_element_copy
if top_mdlist: # found a markdown list to close
while current_element is not top_mdlist:
current_element = element_stack.pop()
# close the top markdown list
current_element = element_stack.pop()
current_element.append(Newlines(doc, newlines, tok.start_pos, tok.end_pos))
elif tok.token_type == "spaces":
unparsed_content.flush(current_element)
current_element.append(Spaces(doc, tok.value.group(0), tok.start_pos, tok.end_pos))
else:
# unrecognized token type
raise ParseError(tok.start_pos, tok.end_pos, "Unrecognized token type: {}".format(tok.token_type))
# at the end of input
unparsed_content.flush(current_element)
while current_element != doc:
if current_element.markup_type == "command":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished command before end of document")
elif current_element.markup_type == "environment":
raise ParseError(current_element.start_pos, tok.start_pos, "Unfinished environment before end of document")
else:
# ok to close
current_element.end_pos = tok.start_pos
current_element = element_stack.pop()
# preparsing finished
return doc
def prepare_string_lexer(self, input):
tokens = lexer.Tokenizer(lexer.StringTokenizer(input))
lex = lexer.Lexer(tokens, *self.recognizers)
return lex
def parse_from_string(self, input, filename="<string>"):
self.filename = filename
lex = self.prepare_string_lexer(input)
doc = Document(self.filename, lex)
return self.parse(doc)
def parse_from_file(self, filename):
f = open(filename, "r")
input = f.read()
f.close()
doc = self.parse_from_string(input, filename)
return doc
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
The simplest form of RNN network generated is:
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time t for batch row b,
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
state is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(inputs[0])[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length: # Prepare variables
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
zero_output.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: vs.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length:
(output, state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
state_saver: A state saver object with methods `state` and `save_state`.
state_name: The name to use with the state_saver.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
save_state = state_saver.save_state(state_name, state)
with ops.control_dependencies([save_state]):
outputs[-1] = array_ops.identity(outputs[-1])
return (outputs, state)
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
The pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: `Tensor` matrix of shape [batch_size, state_size]
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is a `Tensor` matrix of shape [batch_size, state_size]
"""
# Step 1: determine whether we need to call_cell or not
empty_update = lambda: (zero_output, state)
state_shape = state.get_shape()
output, new_state = control_flow_ops.cond(
time < max_sequence_length, call_cell, empty_update)
# Step 2: determine whether we need to copy through state and/or outputs
existing_output_state = lambda: (output, new_state)
def copy_through():
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return (math_ops.select(copy_cond, zero_output, output),
math_ops.select(copy_cond, state, new_state))
(output, state) = control_flow_ops.cond(
time < min_sequence_length, existing_output_state, copy_through)
output.set_shape(zero_output.get_shape())
state.set_shape(state_shape)
return (output, state)
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if either
of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size [batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
output_state_fw is the final state of the forward rnn
output_state_bw is the final state of the backward rnn
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
tmp, output_state_bw = rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw)
def dynamic_rnn(cell, inputs, sequence_length, initial_state=None, dtype=None,
parallel_iterations=None, swap_memory=False, time_major=False,
scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
it is a single `Tensor` where the maximum time is either the first or second
dimension (see the parameter `time_major`). The corresponding output is
a single `Tensor` having the same number of time steps and batch size.
The parameter `sequence_length` is required and dynamic calculation is
automatically performed.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, cell.input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, cell.input_size]`.
sequence_length: An int32/int64 vector (tensor) size [batch_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Swap the tensors produced in forward inference but needed
for back prop from GPU to CPU.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using time_major = False is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state, shaped:
`[batch_size, cell.state_size]`.
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,D) => (T,B,D)
parallel_iterations = parallel_iterations or 32
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = array_ops.identity(sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name="CheckSeqLen")
(outputs, final_state) = _dynamic_rnn_loop(
cell, inputs, state, sequence_length,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
outputs = array_ops.transpose(outputs, [1, 0, 2]) # (T,B,D) => (B,T,D)
return (outputs, final_state)
def _dynamic_rnn_loop(cell, inputs, initial_state, sequence_length,
parallel_iterations, swap_memory):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, depth].
initial_state: A `Tensor` of shape [batch_size, depth].
sequence_length: An `int32` `Tensor` of shape [batch_size].
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
Returns:
Tuple (final_outputs, final_state).
final_outputs:
A `Tensor` of shape [time, batch_size, depth]`.
final_state:
A `Tensor` of shape [batch_size, depth].
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
# Construct an initial output
input_shape = array_ops.shape(inputs)
(time_steps, batch_size, unused_depth) = array_ops.unpack(input_shape, 3)
inputs_got_shape = inputs.get_shape().with_rank(3)
(const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()
# Prepare dynamic conditional copying of state & output
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs.dtype)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.op_scope([], "dynamic_rnn") as scope:
base_name = scope
output_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "output")
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "input")
input_ta = input_ta.unpack(inputs)
def _time_step(time, state, output_ta_t):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
state: Vector.
output_ta_t: `TensorArray`, the output with existing flow.
Returns:
The tuple (time + 1, new_state, output_ta_t with updated flow).
"""
input_t = input_ta.read(time)
# Restore some shape information
input_t.set_shape([const_batch_size, const_depth])
(output, new_state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, lambda: cell(input_t, state))
output_ta_t = output_ta_t.write(time, output)
return (time + 1, new_state, output_ta_t)
(unused_final_time, final_state, output_final_ta) = control_flow_ops.While(
cond=lambda time, _1, _2: time < time_steps,
body=_time_step,
loop_vars=(time, state, output_ta),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = output_final_ta.pack()
# Restore some shape information
final_outputs.set_shape([
const_time_steps, const_batch_size, cell.output_size])
return (final_outputs, final_state)
|
|
from sympy import Integer
from threading import RLock
from pyglet.gl import *
from plot_object import PlotObject
from plot_axes import PlotAxes
from plot_window import PlotWindow
from plot_mode import PlotMode
import plot_modes
from time import sleep
from os import getcwd, listdir
from util import parse_option_string
class Plot(object):
"""
Plot Examples
=============
See examples/plotting.py for many more examples.
>>> from sympy import symbols, Plot
>>> x,y,z = symbols('xyz')
>>> Plot(x*y**3-y*x**3)
>>> p = Plot()
>>> p[1] = x*y
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p = Plot()
>>> p[1] = x**2+y**2
>>> p[2] = -x**2-y**2
Variable Intervals
==================
The basic format is [var, min, max, steps], but the
syntax is flexible and arguments left out are taken
from the defaults for the current coordinate mode:
>>> Plot(x**2) # implies [x,-5,5,100]
>>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40]
>>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100]
>>> Plot(x**2, [x,-13,13,100])
>>> Plot(x**2, [-13,13]) # [x,-13,13,100]
>>> Plot(x**2, [x,-13,13]) # [x,-13,13,100]
>>> Plot(1*x, [], [x], mode='cylindrical')
... # [unbound_theta,0,2*Pi,40], [x,-1,1,20]
Coordinate Modes
================
Plot supports several curvilinear coordinate modes, and
they independent for each plotted function. You can specify
a coordinate mode explicitly with the 'mode' named argument,
but it can be automatically determined for cartesian or
parametric plots, and therefore must only be specified for
polar, cylindrical, and spherical modes.
Specifically, Plot(function arguments) and Plot[n] =
(function arguments) will interpret your arguments as a
cartesian plot if you provide one function and a parametric
plot if you provide two or three functions. Similarly, the
arguments will be interpreted as a curve is one variable is
used, and a surface if two are used.
Supported mode names by number of variables:
1: parametric, cartesian, polar
2: parametric, cartesian, cylindrical = polar, spherical
>>> Plot(1, mode='spherical')
Calculator-like Interface
=========================
>>> p = Plot(visible=False)
>>> f = x**2
>>> p[1] = f
>>> p[2] = f.diff(x)
>>> p[3] = f.diff(x).diff(x)
>>> p
[1]: x**2, 'mode=cartesian'
[2]: 2*x, 'mode=cartesian'
[3]: 2, 'mode=cartesian'
>>> p.show()
>>> p.clear()
>>> p
<blank plot>
>>> p[1] = x**2+y**2
>>> p[1].style = 'solid'
>>> p[2] = -x**2-y**2
>>> p[2].style = 'wireframe'
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p[1].style = 'both'
>>> p[2].style = 'both'
>>> p.close()
Plot Window Keyboard Controls
=============================
Screen Rotation:
X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2
Z axis Q,E, Numpad 7,9
Model Rotation:
Z axis Z,C, Numpad 1,3
Zoom: R,F, PgUp,PgDn, Numpad +,-
Reset Camera: X, Numpad 5
Camera Presets:
XY F1
XZ F2
YZ F3
Perspective F4
Sensitivity Modifier: SHIFT
Axes Toggle:
Visible F5
Colors F6
Close Window: ESCAPE
=============================
"""
def __init__(self, *fargs, **win_args):
"""
Positional Arguments
====================
Any given positional arguments are used to
initialize a plot function at index 1. In
other words...
>>> from sympy.core import Symbol
>>> x = Symbol('x')
>>> p = Plot(x**2, visible=False)
...is equivalent to...
>>> p = Plot(visible=False)
>>> p[1] = x**2
Note that in earlier versions of the plotting
module, you were able to specify multiple
functions in the initializer. This functionality
has been dropped in favor of better automatic
plot plot_mode detection.
Named Arguments
===============
axes
An option string of the form
"key1=value1; key2 = value2" which
can use the following options:
style = ordinate
none OR frame OR box OR ordinate
stride = 0.25
val OR (val_x, val_y, val_z)
overlay = True (draw on top of plot)
True OR False
colored = False (False uses Black,
True uses colors
R,G,B = X,Y,Z)
True OR False
label_axes = False (display axis names
at endpoints)
True OR False
visible = True (show immediately
True OR False
The following named arguments are passed as
arguments to window initialization:
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
self._win_args = win_args
self._window = None
self._render_lock = RLock()
self._functions = {}
self._pobjects = []
self._screenshot = ScreenShot(self)
axe_options = parse_option_string(win_args.pop('axes', ''))
self.axes = PlotAxes(**axe_options)
self._pobjects.append(self.axes)
self[0] = fargs
if win_args.get('visible', True):
self.show()
## Window Interfaces
def show(self):
"""
Creates and displays a plot window, or activates it
(gives it focus) if it has already been created.
"""
if self._window and not self._window.has_exit:
self._window.activate()
else:
self._win_args['visible'] = True
self.axes.reset_resources()
self._window = PlotWindow(self, **self._win_args)
def close(self):
"""
Closes the plot window.
"""
if self._window:
self._window.close()
def saveimage(self, outfile=None, format='', size=(600, 500)):
"""
Saves a screen capture of the plot window to an
image file.
If outfile is given, it can either be a path
or a file object. Otherwise a png image will
be saved to the current working directory.
If the format is omitted, it is determined from
the filename extension.
"""
self._screenshot.save(outfile, format, size)
## Function List Interfaces
def clear(self):
"""
Clears the function list of this plot.
"""
self._render_lock.acquire()
self._functions = {}
self.adjust_all_bounds()
self._render_lock.release()
def __getitem__(self, i):
"""
Returns the function at position i in the
function list.
"""
return self._functions[i]
def __setitem__(self, i, args):
"""
Parses and adds a PlotMode to the function
list.
"""
if not (isinstance(i, (int, Integer)) and i >= 0):
raise ValueError("Function index must "
"be an integer >= 0.")
if isinstance(args, PlotObject):
f = args
else:
if not isinstance(args, (list, tuple)):
args = [args]
if len(args) == 0:
return # no arguments given
kwargs = dict(bounds_callback=self.adjust_all_bounds)
f = PlotMode(*args, **kwargs)
if f:
self._render_lock.acquire()
self._functions[i] = f
self._render_lock.release()
else:
raise ValueError("Failed to parse '%s'."
% ', '.join(str(a) for a in args))
def __delitem__(self, i):
"""
Removes the function in the function list at
position i.
"""
self._render_lock.acquire()
del self._functions[i]
self.adjust_all_bounds()
self._render_lock.release()
def firstavailableindex(self):
"""
Returns the first unused index in the function list.
"""
i = 0
self._render_lock.acquire()
while i in self._functions: i += 1
self._render_lock.release()
return i
def append(self, *args):
"""
Parses and adds a PlotMode to the function
list at the first available index.
"""
self.__setitem__(self.firstavailableindex(), args)
def __len__(self):
"""
Returns the number of functions in the function list.
"""
return len(self._functions)
def __iter__(self):
"""
Allows iteration of the function list.
"""
return self._functions.itervalues()
def __repr__(self):
return str(self)
def __str__(self):
"""
Returns a string containing a new-line separated
list of the functions in the function list.
"""
s = ""
if len(self._functions) == 0:
s += "<blank plot>"
else:
self._render_lock.acquire()
s += "\n".join(["%s[%i]: %s" % ("", i, str(self._functions[i]))
for i in self._functions])
self._render_lock.release()
return s
def adjust_all_bounds(self):
self._render_lock.acquire()
self.axes.reset_bounding_box()
for f in self._functions:
self.axes.adjust_bounds(self._functions[f].bounds)
self._render_lock.release()
def wait_for_calculations(self):
sleep(0)
self._render_lock.acquire()
for f in self._functions:
a = self._functions[f]._get_calculating_verts
b = self._functions[f]._get_calculating_cverts
while a() or b(): sleep(0)
self._render_lock.release()
class ScreenShot:
def __init__(self, plot):
self._plot = plot
self.screenshot_requested = False
self.outfile = None
self.format = ''
self.invisibleMode = False
self.flag = 0
def __nonzero__(self):
if self.screenshot_requested:
return 1
return 0
def _execute_saving(self):
if self.flag <3:
self.flag += 1
return
size_x, size_y = self._plot._window.get_size()
size = size_x*size_y*4*sizeof(c_ubyte)
image = create_string_buffer(size)
glReadPixels(0,0,size_x,size_y, GL_RGBA, GL_UNSIGNED_BYTE, image)
from PIL import Image
im = Image.frombuffer('RGBA',(size_x,size_y),image.raw, 'raw', 'RGBA', 0, 1)
if type(self.outfile) in (str, unicode):
im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile)
elif type(self.outfile)==file:
im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile, self.format)
self.flag = 0
self.screenshot_requested = False
if self.invisibleMode:
self._plot._window.close()
def save(self, outfile=None, format='', size=(600, 500)):
self.outfile = outfile
self.format = format
self.size = size
if not self._plot._window or self._plot._window.has_exit:
self._plot._win_args['visible'] = False
self._plot._win_args['width'] = size[0]
self._plot._win_args['height'] = size[1]
self._plot.axes.reset_resources()
self._plot._window = PlotWindow(self._plot, **self._plot._win_args)
self.invisibleMode = True
if type(self.outfile) in (str, unicode):
self.screenshot_requested = True
elif type(self.outfile)==file and self.format:
self.screenshot_requested = True
elif self.outfile==None:
self.outfile=self._create_unique_path()
self.screenshot_requested = True
print self.outfile
def _create_unique_path(self):
cwd = getcwd()
l = listdir(cwd)
path = ''
i=0
while True:
if not 'plot_%s.png'%i in l:
path = cwd+'/plot_%s.png'%i
break
i+=1
return path
|
|
import os
import sys
import zipfile
import tarfile
import glob
from CityscapeMask2Pascal import cityscapeMask2Pascal
from xml.etree import ElementTree as et
#---------------------------------------------------------------------------------------------
def createFolder(directory):
try:
if not os.path.exists(directory):
print("Make dir {}".format(directory))
os.makedirs(directory)
except OSError:
print()
print("=== Atention ===")
print ('Error: Creating directory. ' + directory)
sys.exit()
#---------------------------------------------------------------------------------------------
def CityscapeDatasetAnn(databaseName, cityscapeFolder, zipFolder, tmpFolder):
print()
print("{} annotations".format(databaseName))
zipFile = "gtFine_trainvaltest.zip"
if not os.path.isfile(os.path.join(zipFolder,zipFile)):
print()
print("=== Atention ===")
print("Please, first you need download {} to {}".format(zipFile, zipFolder) )
print("https://www.cityscapes-dataset.com/downloads/" )
sys.exit()
extractedFolder = os.path.join(tmpFolder,"gtFine/")
if os.path.exists(extractedFolder):
print("ZIP was already extracted to {}".format(extractedFolder))
else:
print("Extracting {} to {}".format(os.path.join(zipFolder,zipFile), tmpFolder))
zip_ref = zipfile.ZipFile(os.path.join(zipFolder,zipFile), 'r')
zip_ref.extractall(tmpFolder)
zip_ref.close()
for folderType in ["train","val"]:
extractedFolderAnn = os.path.join(extractedFolder, folderType)
outputFolderAnn = os.path.join(cityscapeFolder,folderType+".ann")
createFolder(outputFolderAnn)
imagesFolderType = os.path.join(cityscapeFolder,folderType)
cityscapeMask2Pascal(databaseName, extractedFolderAnn, outputFolderAnn, imagesFolderType)
#---------------------------------------------------------------------------------------------
def CityscapeDataset(datasetFolder, zipFolder, tmpFolder):
print("=====================================================================")
print("Cityscape dataset")
print("=====================================================================")
zipFile = "leftImg8bit_trainvaltest.zip"
if not os.path.isfile(os.path.join(zipFolder,zipFile)):
print()
print("=== Atention ===")
print("Please, first you need download {} to {}".format(zipFile, zipFolder) )
print("https://www.cityscapes-dataset.com/downloads/" )
sys.exit()
extractedFolder = os.path.join(tmpFolder,"leftImg8bit/")
if os.path.exists(extractedFolder):
print("ZIP was already extracted to {}".format(extractedFolder))
else:
print("Extracting {} to {}".format(os.path.join(zipFolder,zipFile), tmpFolder))
zip_ref = zipfile.ZipFile(os.path.join(zipFolder,zipFile), 'r')
zip_ref.extractall(tmpFolder)
zip_ref.close()
cityscapeFolder = os.path.join(datasetFolder,"Cityscape")
print("Cityscape dataset Folder {}".format(cityscapeFolder))
createFolder(cityscapeFolder)
for folderType in ["train","val"]:
cityscapefolderType = os.path.join(cityscapeFolder,folderType)
createFolder(cityscapefolderType)
print("Moving file from {} to {}".format(extractedFolder+folderType, cityscapefolderType))
for fileName in glob.glob(extractedFolder+folderType+'/**/*.png'):
destiny = os.path.join( cityscapefolderType, os.path.split(fileName)[1])
#print("copy file from {} to {}".format(fileName,destiny))
os.rename(fileName, destiny)
#-- Annotations
CityscapeDatasetAnn("Cityscape",cityscapeFolder, zipFolder, tmpFolder)
#---------------------------------------------------------------------------------------------
def FoggyCityscapeDataset(datasetFolder, zipFolder, tmpFolder):
print("=====================================================================")
print("Foggy Cityscape dataset")
print("=====================================================================")
zipFile = "leftImg8bit_trainvaltest_foggy.zip"
if not os.path.isfile(os.path.join(zipFolder,zipFile)):
print()
print("=== Atention ===")
print("Please, first you need download {} to {}".format(zipFile, zipFolder) )
print("https://www.cityscapes-dataset.com/downloads/" )
sys.exit()
extractedFolder = os.path.join(tmpFolder,"leftImg8bit_foggy/")
if os.path.exists(extractedFolder):
print("ZIP was already extracted to {}".format(tmpFolder))
else:
print("Extracting {} to {}".format(os.path.join(zipFolder,zipFile), tmpFolder))
zip_ref = zipfile.ZipFile(os.path.join(zipFolder,zipFile), 'r')
zip_ref.extractall(tmpFolder)
zip_ref.close()
for intensity in ["0.02"]:
print("Intensity {}".format(intensity))
foggyCityscapeFolder = os.path.join(datasetFolder,"FoggyCityscape",intensity)
print("foggy Cityscape dataset intensity {} Folder {}".format(intensity, foggyCityscapeFolder))
createFolder(foggyCityscapeFolder)
for folderType in ["train","val"]:
foggyCityscapefolderType = os.path.join(foggyCityscapeFolder,folderType)
createFolder(foggyCityscapefolderType)
print("Moving file from {} to {}".format(extractedFolder+folderType,foggyCityscapefolderType))
for fileName in glob.glob(extractedFolder+folderType+'/**/*'+intensity+'.png'):
destiny = os.path.join( foggyCityscapefolderType, os.path.split(fileName)[1])
#print("copy file from {} to {}".format(fileName,destiny))
os.rename(fileName, destiny)
foggyCityscapeFolderfolderTypeAnn = os.path.join(foggyCityscapeFolder,folderType+".ann")
createFolder(foggyCityscapeFolderfolderTypeAnn)
#-- Annotations
CityscapeDatasetAnn("FoggyCityscape",foggyCityscapeFolder, zipFolder, tmpFolder)
#---------------------------------------------------------------------------------------------
def KittiDataset(datasetFolder, zipFolder, tmpFolder):
print("=====================================================================")
print("Kitti dataset")
print("=====================================================================")
tmpFolder = os.path.join(tmpFolder,"kitti")
zipFiles = ["data_object_image_2.zip","data_object_label_2.zip","vod-converter-master.zip"]
first = True
for zipFile in zipFiles:
if not os.path.isfile(os.path.join(zipFolder,zipFile)):
if first:
print()
print("=== Atention ===")
print("Please, first you need download {} to {}".format(zipFile, zipFolder) )
first = False
print("http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d")
print("https://github.com/umautobots/vod-converter")
if not first:
sys.exit()
extractedFolder = os.path.join(tmpFolder,"training")
if os.path.exists(extractedFolder):
print("ZIP was already extracted to {}".format(extractedFolder))
else:
zipFiles = ["data_object_image_2.zip","data_object_label_2.zip","vod-converter-master.zip"]
for zipFile in zipFiles:
print("Extracting {} to {}".format(os.path.join(zipFolder,zipFile), tmpFolder))
zip_ref = zipfile.ZipFile(os.path.join(zipFolder,zipFile), 'r')
zip_ref.extractall(tmpFolder)
zip_ref.close()
print("Creating file train.txt")
text_value = ""
for fileName in glob.glob(extractedFolder+'/**/*.png'):
text_value = text_value+ fileName.split("\\")[-1:][0].split(".")[0] + "\n"
text_file = open(os.path.join(tmpFolder,"train.txt"), "w")
text_file.write(text_value)
text_file.close()
print("Annotation convert")
sys.path.insert(0,os.path.join(tmpFolder,"vod-converter-master"))
sys.path.insert(0,os.path.join(tmpFolder,"vod-converter-master","vod_converter"))
from vod_converter import main as kittiConverter
kittiConverter.main(from_path=tmpFolder, from_key="kitti",
to_path=tmpFolder, to_key="voc",
select_only_known_labels=False,
filter_images_without_labels=False)
kittiFolder = os.path.join(datasetFolder,"Kitti")
createFolder(kittiFolder)
folderType = "train"
kittifolderType = os.path.join(kittiFolder,folderType)
createFolder(kittifolderType)
extractedFolder = os.path.join(tmpFolder,"VOC2012","JPEGImages")
print("Moving file from {} to {}".format(extractedFolder, kittifolderType))
for fileName in glob.glob(extractedFolder+'/*.png'):
destiny = os.path.join( kittifolderType, os.path.split(fileName)[1])
os.rename(fileName, destiny)
kittifolderType = kittifolderType + ".ann"
createFolder(kittifolderType)
extractedFolder = os.path.join(tmpFolder,"VOC2012","Annotations")
print("Moving file from {} to {}".format(extractedFolder, kittifolderType))
for fileName in glob.glob(extractedFolder+'/*.xml'):
destiny = os.path.join( kittifolderType, os.path.split(fileName)[1])
os.rename(fileName, destiny)
#---------------------------------------------------------------------------------------------
def Sim10KDataset(datasetFolder, zipFolder, tmpFolder):
print("=====================================================================")
print("Sim10K dataset")
print("=====================================================================")
tmpFolder = os.path.join(tmpFolder,"Sim10K")
zipFiles = ["repro_10k_annotations.tgz","repro_10k_images.tgz"]
first = True
for zipFile in zipFiles:
if not os.path.isfile(os.path.join(zipFolder,zipFile)):
if first:
print()
print("=== Atention ===")
print("Please, first you need download {} to {}".format(zipFile, zipFolder) )
first = False
print("https://fcav.engin.umich.edu/sim-dataset/")
if not first:
sys.exit()
extractedFolder = os.path.join(tmpFolder,"VOC2012")
if os.path.exists(extractedFolder):
print("ZIP was already extracted to {}".format(extractedFolder))
else:
zipFiles = ["repro_10k_images.tgz","repro_10k_annotations.tgz"]
for zipFile in zipFiles:
print("Extracting {} to {}".format(os.path.join(zipFolder,zipFile), tmpFolder))
zip_ref = tarfile.open(os.path.join(zipFolder,zipFile), 'r')
zip_ref.extractall(tmpFolder)
zip_ref.close()
extractedFolder = os.path.join(tmpFolder,"VOC2012","Annotations")
print("Adjust filename proper into xml files")
for xml_file in glob.glob(extractedFolder+'/*.xml'):
tree = et.parse(xml_file)
tree.find('.//filename').text = os.path.split(xml_file)[1].split(".")[0]+".jpg"
tree.write(xml_file)
Sim10kFolder = os.path.join(datasetFolder,"Sim10k")
createFolder(Sim10kFolder)
count = 0
for folderType in ["train","val"]:
Sim10kFolderType = os.path.join(Sim10kFolder,folderType)
createFolder(Sim10kFolderType)
Sim10kFolderType = Sim10kFolderType + ".ann"
createFolder(Sim10kFolderType)
extractedFolder = os.path.join(tmpFolder,"VOC2012","JPEGImages")
print("Moving file from {} to {}".format(extractedFolder, Sim10kFolderType))
for fileName in glob.glob(extractedFolder+'/*.jpg'):
destiny = os.path.join( Sim10kFolderType, os.path.split(fileName)[1])
os.rename(fileName, destiny)
extractedFolder = os.path.join(tmpFolder,"VOC2012","Annotations")
print("Moving file from {} to {}".format(extractedFolder, Sim10kFolderType))
for fileName in glob.glob(extractedFolder+'/*.xml'):
destiny = os.path.join( Sim10kFolderType, os.path.split(fileName)[1])
os.rename(fileName, destiny)
#---------------------------------------------------------------------------------------------
if __name__ == "__main__":
print("=====================================================================")
datasetFolder = "F:/datasets/"
print("Set Datasets folder = {}".format(datasetFolder))
print("=====================================================================")
if sys.version_info.major<3 or (sys.version_info.major==3 and sys.version_info.minor<7) :
print("This code need python version >= 3.7.X")
sys.exit()
zipFolder = os.path.join(datasetFolder,"zip")
print("ZIP Folder {}".format(zipFolder))
createFolder(zipFolder)
tmpFolder = os.path.join(datasetFolder,"tmp")
print("TMP Folder {}".format(tmpFolder))
createFolder(tmpFolder)
print()
#CityscapeDataset(datasetFolder, zipFolder, tmpFolder)
print()
#FoggyCityscapeDataset(datasetFolder, zipFolder, tmpFolder)
print()
#KittiDataset(datasetFolder, zipFolder, tmpFolder)
print()
Sim10KDataset(datasetFolder, zipFolder, tmpFolder)
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from logging import getLogger
import chainer
from chainer import functions as F
from chainer.initializers import LeCunNormal
from chainer import links as L
import numpy as np
from chainerrl import distribution
from chainerrl.functions.bound_by_tanh import bound_by_tanh
from chainerrl import links
from chainerrl.policy import Policy
logger = getLogger(__name__)
class FCGaussianPolicy(chainer.ChainList, Policy):
"""Gaussian policy that consists of fully-connected layers.
This model has two output layers: the mean layer and the variance layer.
The mean of the Gaussian is computed as follows:
Let y as the output of the mean layer.
If bound_mean=False:
mean = y (if bound_mean=False)
If bound_mean=True:
mean = min_action + tanh(y) * (max_action - min_action) / 2
The variance of the Gaussian is computed as follows:
Let y as the output of the variance layer.
variance = softplus(y) + min_var
Args:
n_input_channels (int): Number of input channels.
action_size (int): Number of dimensions of the action space.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
min_action (ndarray): Minimum action. Used only when bound_mean=True.
max_action (ndarray): Maximum action. Used only when bound_mean=True.
var_type (str): Type of parameterization of variance. It must be
'spherical' or 'diagonal'.
nonlinearity (callable): Nonlinearity placed between layers.
mean_wscale (float): Scale of weight initialization of the mean layer.
var_wscale (float): Scale of weight initialization of the variance
layer.
var_bias (float): The initial value of the bias parameter for the
variance layer.
min_var (float): Minimum value of the variance.
"""
def __init__(self, n_input_channels, action_size,
n_hidden_layers=0, n_hidden_channels=None,
min_action=None, max_action=None, bound_mean=False,
var_type='spherical', nonlinearity=F.relu,
mean_wscale=1, var_wscale=1, var_bias=0,
min_var=0):
self.n_input_channels = n_input_channels
self.action_size = action_size
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.min_action = min_action
self.max_action = max_action
self.bound_mean = bound_mean
self.nonlinearity = nonlinearity
self.min_var = min_var
var_size = {'spherical': 1, 'diagonal': action_size}[var_type]
self.hidden_layers = []
if n_hidden_layers > 0:
self.hidden_layers.append(
L.Linear(n_input_channels, n_hidden_channels))
for _ in range(n_hidden_layers - 1):
self.hidden_layers.append(
L.Linear(n_hidden_channels, n_hidden_channels))
self.mean_layer = L.Linear(n_hidden_channels, action_size,
initialW=LeCunNormal(mean_wscale))
self.var_layer = L.Linear(n_hidden_channels, var_size,
initialW=LeCunNormal(var_wscale),
initial_bias=var_bias)
else:
self.mean_layer = L.Linear(n_input_channels, action_size,
initialW=LeCunNormal(mean_wscale))
self.var_layer = L.Linear(n_input_channels, var_size,
initialW=LeCunNormal(var_wscale),
initial_bias=var_bias)
super().__init__(
self.mean_layer, self.var_layer, *self.hidden_layers)
def compute_mean_and_var(self, x):
h = x
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
mean = self.mean_layer(h)
if self.bound_mean:
mean = bound_by_tanh(mean, self.min_action, self.max_action)
var = F.broadcast_to(F.softplus(self.var_layer(h)), mean.shape) + \
self.min_var
return mean, var
def __call__(self, x):
mean, var = self.compute_mean_and_var(x)
return distribution.GaussianDistribution(mean, var=var)
class FCGaussianPolicyWithStateIndependentCovariance(
chainer.Chain, Policy):
"""Gaussian policy that consists of FC layers with parametrized covariance.
This model has one output layers: the mean layer.
The mean of the Gaussian is computed in the same way as FCGaussianPolicy.
Args:
n_input_channels (int): Number of input channels.
action_size (int): Number of dimensions of the action space.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
min_action (ndarray): Minimum action. Used only when bound_mean=True.
max_action (ndarray): Maximum action. Used only when bound_mean=True.
var_type (str): Type of parameterization of variance. It must be
'spherical' or 'diagonal'.
nonlinearity (callable): Nonlinearity placed between layers.
mean_wscale (float): Scale of weight initialization of the mean layer.
var_func (callable): Callable that computes the variance from the var
parameter. It should always return positive values.
var_param_init (float): Initial value the var parameter.
"""
def __init__(self, n_input_channels, action_size,
n_hidden_layers=0, n_hidden_channels=None,
min_action=None, max_action=None, bound_mean=False,
var_type='spherical',
nonlinearity=F.relu,
mean_wscale=1,
var_func=F.softplus,
var_param_init=0,
):
self.n_input_channels = n_input_channels
self.action_size = action_size
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.min_action = min_action
self.max_action = max_action
self.bound_mean = bound_mean
self.nonlinearity = nonlinearity
self.var_func = var_func
var_size = {'spherical': 1, 'diagonal': action_size}[var_type]
layers = []
layers.append(L.Linear(n_input_channels, n_hidden_channels))
for _ in range(n_hidden_layers - 1):
layers.append(self.nonlinearity)
layers.append(L.Linear(n_hidden_channels, n_hidden_channels))
layers.append(self.nonlinearity)
# The last layer is used to compute the mean
layers.append(
L.Linear(n_hidden_channels, action_size,
initialW=LeCunNormal(mean_wscale)))
if self.bound_mean:
layers.append(lambda x: bound_by_tanh(
x, self.min_action, self.max_action))
super().__init__()
with self.init_scope():
self.hidden_layers = links.Sequence(*layers)
self.var_param = chainer.Parameter(
initializer=var_param_init, shape=(var_size,))
def __call__(self, x):
mean = self.hidden_layers(x)
var = F.broadcast_to(self.var_func(self.var_param), mean.shape)
return distribution.GaussianDistribution(mean, var)
class FCGaussianPolicyWithFixedCovariance(links.Sequence, Policy):
"""Gaussian policy that consists of FC layers with fixed covariance.
This model has one output layers: the mean layer.
The mean of the Gaussian is computed in the same way as FCGaussianPolicy.
The variance of the Gaussian must be specified as an argument.
Args:
n_input_channels (int): Number of input channels.
action_size (int): Number of dimensions of the action space.
var (float or ndarray): Variance of the Gaussian distribution.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
min_action (ndarray): Minimum action. Used only when bound_mean=True.
max_action (ndarray): Maximum action. Used only when bound_mean=True.
nonlinearity (callable): Nonlinearity placed between layers.
mean_wscale (float): Scale of weight initialization of the mean layer.
"""
def __init__(self, n_input_channels, action_size, var,
n_hidden_layers=0, n_hidden_channels=None,
min_action=None, max_action=None, bound_mean=False,
nonlinearity=F.relu, mean_wscale=1):
self.n_input_channels = n_input_channels
self.action_size = action_size
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.min_action = min_action
self.max_action = max_action
self.bound_mean = bound_mean
self.nonlinearity = nonlinearity
if np.isscalar(var):
self.var = np.full(action_size, var, dtype=np.float32)
else:
self.var = var
layers = []
if n_hidden_layers > 0:
# Input to hidden
layers.append(L.Linear(n_input_channels, n_hidden_channels))
layers.append(self.nonlinearity)
for _ in range(n_hidden_layers - 1):
# Hidden to hidden
layers.append(L.Linear(n_hidden_channels, n_hidden_channels))
layers.append(self.nonlinearity)
# The last layer is used to compute the mean
layers.append(
L.Linear(n_hidden_channels, action_size,
initialW=LeCunNormal(mean_wscale)))
else:
# There's only one layer for computing the mean
layers.append(
L.Linear(n_input_channels, action_size,
initialW=LeCunNormal(mean_wscale)))
if self.bound_mean:
layers.append(lambda x: bound_by_tanh(
x, self.min_action, self.max_action))
def get_var_array(shape):
self.var = self.xp.asarray(self.var)
return self.xp.broadcast_to(self.var, shape)
layers.append(lambda x: distribution.GaussianDistribution(
x, get_var_array(x.shape)))
super().__init__(*layers)
class GaussianHeadWithStateIndependentCovariance(chainer.Chain):
"""Gaussian head with state-independent learned covariance.
This link is intended to be attached to a neural network that outputs
the mean of a Gaussian policy. The only learnable parameter this link has
determines the variance in a state-independent way.
State-independent parameterization of the variance of a Gaussian policy
is often used with PPO and TRPO, e.g., in https://arxiv.org/abs/1709.06560.
Args:
action_size (int): Number of dimensions of the action space.
var_type (str): Type of parameterization of variance. It must be
'spherical' or 'diagonal'.
var_func (callable): Callable that computes the variance from the var
parameter. It should always return positive values.
var_param_init (float): Initial value the var parameter.
"""
def __init__(
self,
action_size,
var_type='spherical',
var_func=F.softplus,
var_param_init=0,
):
self.var_func = var_func
var_size = {'spherical': 1, 'diagonal': action_size}[var_type]
super().__init__()
with self.init_scope():
self.var_param = chainer.Parameter(
initializer=var_param_init, shape=(var_size,))
def __call__(self, mean):
"""Return a Gaussian with given mean.
Args:
mean (chainer.Variable or ndarray): Mean of Gaussian.
Returns:
chainerrl.distribution.Distribution: Gaussian whose mean is the
mean argument and whose variance is computed from the parameter
of this link.
"""
var = F.broadcast_to(self.var_func(self.var_param), mean.shape)
return distribution.GaussianDistribution(mean, var)
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
import mock
import six
import time
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd'}
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
super(NetAppBaseClientTestCase, self).setUp()
self.mock_object(client_base, 'LOG')
self.mock_object(client_base.Client, '_init_ssh_client')
self.client = client_base.Client(**CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.client.connection.get_api_version.return_value = (1, 100)
self.client.ssh_client = mock.MagicMock()
self.connection = self.client.connection
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
self.fake_size = '1024'
self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'}
self.mock_send_request = self.mock_object(self.client, 'send_request')
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<major-version>1</major-version>
<minor-version>19</minor-version>
</results>"""))
self.connection.invoke_successfully.return_value = version_response
major, minor = self.client.get_ontapi_version(cached=False)
self.assertEqual('1', major)
self.assertEqual('19', minor)
def test_get_ontapi_version_cached(self):
self.connection.get_api_version.return_value = (1, 20)
major, minor = self.client.get_ontapi_version()
self.assertEqual(1, self.connection.get_api_version.call_count)
self.assertEqual(1, major)
self.assertEqual(20, minor)
def test_check_is_naelement(self):
element = netapp_api.NaElement('name')
self.assertIsNone(self.client.check_is_naelement(element))
self.assertRaises(ValueError, self.client.check_is_naelement, None)
def test_create_lun(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path,
'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_lun_exact_size(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
self.connection.get_api_version.return_value = (1, 110)
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path,
'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'use-exact-size': 'true',
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_lun_with_qos_policy_group_name(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_qos_group_name = 'qos_1'
mock_request = mock.Mock()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
return_value=mock_request
) as mock_create_node:
self.client.create_lun(
self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata,
qos_policy_group_name=expected_qos_group_name)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path, 'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
mock_request.add_new_child.assert_called_once_with(
'qos-policy-group', expected_qos_group_name)
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_lun_raises_on_failure(self):
self.connection.invoke_successfully = mock.Mock(
side_effect=netapp_api.NaApiError)
self.assertRaises(netapp_api.NaApiError,
self.client.create_lun,
self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata)
def test_destroy_lun(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.destroy_lun(path)
mock_create_node.assert_called_once_with(
'lun-destroy',
**{'path': path})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_destroy_lun_force(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
mock_request = mock.Mock()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
return_value=mock_request
) as mock_create_node:
self.client.destroy_lun(path)
mock_create_node.assert_called_once_with('lun-destroy',
**{'path': path})
mock_request.add_new_child.assert_called_once_with('force', 'true')
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_map_lun(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
expected_lun_id = 'my_lun'
mock_response = mock.Mock()
self.connection.invoke_successfully.return_value = mock_response
mock_response.get_child_content.return_value = expected_lun_id
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
actual_lun_id = self.client.map_lun(path, igroup)
mock_create_node.assert_called_once_with(
'lun-map',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
self.assertEqual(expected_lun_id, actual_lun_id)
def test_map_lun_with_lun_id(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
expected_lun_id = 'my_lun'
mock_response = mock.Mock()
self.connection.invoke_successfully.return_value = mock_response
mock_response.get_child_content.return_value = expected_lun_id
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
actual_lun_id = self.client.map_lun(path, igroup,
lun_id=expected_lun_id)
mock_create_node.assert_called_once_with(
'lun-map',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
self.assertEqual(expected_lun_id, actual_lun_id)
def test_map_lun_with_api_error(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
self.connection.invoke_successfully.side_effect =\
netapp_api.NaApiError()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.assertRaises(netapp_api.NaApiError, self.client.map_lun,
path, igroup)
mock_create_node.assert_called_once_with(
'lun-map',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_unmap_lun(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
mock_response = mock.Mock()
self.connection.invoke_successfully.return_value = mock_response
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.unmap_lun(path, igroup)
mock_create_node.assert_called_once_with(
'lun-unmap',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_unmap_lun_with_api_error(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
self.connection.invoke_successfully.side_effect =\
netapp_api.NaApiError()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.assertRaises(netapp_api.NaApiError, self.client.unmap_lun,
path, igroup)
mock_create_node.assert_called_once_with(
'lun-unmap',
**{'path': path, 'initiator-group': igroup})
def test_unmap_lun_already_unmapped(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
EINVALIDINPUTERROR = '13115'
self.connection.invoke_successfully.side_effect =\
netapp_api.NaApiError(code=EINVALIDINPUTERROR)
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.unmap_lun(path, igroup)
mock_create_node.assert_called_once_with(
'lun-unmap',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_unmap_lun_lun_not_mapped_in_group(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
igroup = 'igroup'
EVDISK_ERROR_NO_SUCH_LUNMAP = '9016'
self.connection.invoke_successfully.side_effect =\
netapp_api.NaApiError(code=EVDISK_ERROR_NO_SUCH_LUNMAP)
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.unmap_lun(path, igroup)
mock_create_node.assert_called_once_with(
'lun-unmap',
**{'path': path, 'initiator-group': igroup})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_igroup(self):
igroup = 'igroup'
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.create_igroup(igroup)
mock_create_node.assert_called_once_with(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': 'iscsi',
'os-type': 'default'})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_add_igroup_initiator(self):
igroup = 'igroup'
initiator = 'initiator'
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
) as mock_create_node:
self.client.add_igroup_initiator(igroup, initiator)
mock_create_node.assert_called_once_with(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_do_direct_resize(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
new_size = 1024
mock_request = mock.Mock()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
return_value=mock_request
) as mock_create_node:
self.client.do_direct_resize(path, new_size)
mock_create_node.assert_called_once_with(
'lun-resize',
**{'path': path,
'size': new_size})
mock_request.add_new_child.assert_called_once_with(
'force', 'true')
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_do_direct_resize_not_forced(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
new_size = 1024
mock_request = mock.Mock()
with mock.patch.object(netapp_api.NaElement,
'create_node_with_children',
return_value=mock_request
) as mock_create_node:
self.client.do_direct_resize(path, new_size, force=False)
mock_create_node.assert_called_once_with(
'lun-resize',
**{'path': path,
'size': new_size})
self.assertFalse(mock_request.add_new_child.called)
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_get_lun_geometry(self):
expected_keys = set(['size', 'bytes_per_sector', 'sectors_per_track',
'tracks_per_cylinder', 'cylinders', 'max_resize'])
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
mock_response = mock.Mock()
self.connection.invoke_successfully.return_value = mock_response
geometry = self.client.get_lun_geometry(path)
self.assertEqual(expected_keys, set(geometry.keys()))
def test_get_lun_geometry_with_api_error(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
self.connection.invoke_successfully.side_effect =\
netapp_api.NaApiError()
geometry = self.client.get_lun_geometry(path)
self.assertEqual({}, geometry)
def test_get_volume_options(self):
fake_response = netapp_api.NaElement('volume')
fake_response.add_node_with_children('options', test='blah')
self.connection.invoke_successfully.return_value = fake_response
options = self.client.get_volume_options('volume')
self.assertEqual(1, len(options))
def test_get_volume_options_with_no_options(self):
fake_response = netapp_api.NaElement('options')
self.connection.invoke_successfully.return_value = fake_response
options = self.client.get_volume_options('volume')
self.assertEqual([], options)
def test_move_lun(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
new_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
fake_response = netapp_api.NaElement('options')
self.connection.invoke_successfully.return_value = fake_response
self.client.move_lun(path, new_path)
self.connection.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_get_igroup_by_initiators(self):
self.assertRaises(NotImplementedError,
self.client.get_igroup_by_initiators,
fake.FC_FORMATTED_INITIATORS)
def test_get_fc_target_wwpns(self):
self.assertRaises(NotImplementedError,
self.client.get_fc_target_wwpns)
def test_has_luns_mapped_to_initiator(self):
initiator = fake.FC_FORMATTED_INITIATORS[0]
version_response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<lun-maps>
<lun-map-info>
<path>/vol/cinder1/volume-9be956b3-9854-4a5c-a7f5-13a16da52c9c</path>
<initiator-group>openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b
</initiator-group>
<lun-id>0</lun-id>
</lun-map-info>
<lun-map-info>
<path>/vol/cinder1/volume-ac90433c-a560-41b3-9357-7f3f80071eb5</path>
<initiator-group>openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b
</initiator-group>
<lun-id>1</lun-id>
</lun-map-info>
</lun-maps>
</results>"""))
self.connection.invoke_successfully.return_value = version_response
self.assertTrue(self.client._has_luns_mapped_to_initiator(initiator))
def test_has_luns_mapped_to_initiator_not_mapped(self):
initiator = fake.FC_FORMATTED_INITIATORS[0]
version_response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<lun-maps />
</results>"""))
self.connection.invoke_successfully.return_value = version_response
self.assertFalse(self.client._has_luns_mapped_to_initiator(initiator))
@mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator')
def test_has_luns_mapped_to_initiators(self,
mock_has_luns_mapped_to_initiator):
initiators = fake.FC_FORMATTED_INITIATORS
mock_has_luns_mapped_to_initiator.return_value = True
self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators))
@mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator')
def test_has_luns_mapped_to_initiators_not_mapped(
self, mock_has_luns_mapped_to_initiator):
initiators = fake.FC_FORMATTED_INITIATORS
mock_has_luns_mapped_to_initiator.return_value = False
self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators))
def test_get_performance_counter_info(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
result = self.client.get_performance_counter_info('wafl',
'cp_phase_times')
expected = {
'name': 'cp_phase_times',
'base-counter': 'total_cp_msecs',
'labels': fake_client.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS,
}
self.assertEqual(expected, result)
perf_object_counter_list_info_args = {'objectname': 'wafl'}
self.mock_send_request.assert_called_once_with(
'perf-object-counter-list-info',
perf_object_counter_list_info_args, enable_tunneling=False)
def test_get_performance_counter_info_not_found(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
self.assertRaises(exception.NotFound,
self.client.get_performance_counter_info,
'wafl',
'invalid')
def test_delete_snapshot(self):
api_args = {
'volume': fake.SNAPSHOT['volume_id'],
'snapshot': fake.SNAPSHOT['name'],
}
self.mock_object(self.client, 'send_request')
self.client.delete_snapshot(api_args['volume'],
api_args['snapshot'])
asserted_api_args = {
'volume': api_args['volume'],
'snapshot': api_args['snapshot'],
}
self.client.send_request.assert_called_once_with('snapshot-delete',
asserted_api_args)
def test_create_cg_snapshot(self):
self.mock_object(self.client, '_start_cg_snapshot',
return_value=fake.CONSISTENCY_GROUP_ID)
self.mock_object(self.client, '_commit_cg_snapshot')
self.client.create_cg_snapshot([fake.CG_VOLUME_NAME],
fake.CG_SNAPSHOT_NAME)
self.client._commit_cg_snapshot.assert_called_once_with(
fake.CONSISTENCY_GROUP_ID)
def test_create_cg_snapshot_no_id(self):
self.mock_object(self.client, '_start_cg_snapshot', return_value=None)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.create_cg_snapshot,
[fake.CG_VOLUME_NAME],
fake.CG_SNAPSHOT_NAME)
def test_start_cg_snapshot(self):
snapshot_init = {
'snapshot': fake.CG_SNAPSHOT_NAME,
'timeout': 'relaxed',
'volumes': [{'volume-name': fake.CG_VOLUME_NAME}],
}
self.mock_object(self.client, 'send_request')
self.client._start_cg_snapshot([fake.CG_VOLUME_NAME],
snapshot_init['snapshot'])
self.client.send_request.assert_called_once_with('cg-start',
snapshot_init)
def test_commit_cg_snapshot(self):
snapshot_commit = {'cg-id': fake.CG_VOLUME_ID}
self.mock_object(self.client, 'send_request')
self.client._commit_cg_snapshot(snapshot_commit['cg-id'])
self.client.send_request.assert_called_once_with(
'cg-commit', {'cg-id': snapshot_commit['cg-id']})
def test_wait_for_busy_snapshot_raise_exception(self):
BUSY_SNAPSHOT = dict(fake.SNAPSHOT)
BUSY_SNAPSHOT['busy'] = True
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
mock_get_snapshot = self.mock_object(self.client, 'get_snapshot',
return_value=BUSY_SNAPSHOT)
self.assertRaises(exception.SnapshotIsBusy,
self.client.wait_for_busy_snapshot,
fake.FLEXVOL, fake.SNAPSHOT_NAME)
calls = [
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
]
mock_get_snapshot.assert_has_calls(calls)
def test_rename_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client.rename_snapshot(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT_NAME,
client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME)
api_args = {
'volume': fake.SNAPSHOT['volume_id'],
'current-name': fake.SNAPSHOT_NAME,
'new-name':
client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME,
}
self.client.send_request.assert_called_once_with(
'snapshot-rename', api_args)
|
|
import re
import os.path
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from guardian.shortcuts import assign, get_objects_for_user
from taggit.managers import TaggableManager
from projects.models import Project
from projects import constants
from .constants import BUILD_STATE, BUILD_TYPES, VERSION_TYPES
class VersionManager(models.Manager):
def _filter_queryset(self, user, project, privacy_level, only_active):
if isinstance(privacy_level, basestring):
privacy_level = (privacy_level,)
queryset = Version.objects.filter(privacy_level__in=privacy_level)
# Remove this so we can use public() for all active public projects
#if not user and not project:
#return queryset
if user and user.is_authenticated():
# Add in possible user-specific views
user_queryset = get_objects_for_user(user, 'builds.view_version')
queryset = user_queryset | queryset
elif user:
# Hack around get_objects_for_user not supporting global perms
global_access = user.has_perm('builds.view_version')
if global_access:
queryset = Version.objects.all()
if project:
# Filter by project if requested
queryset = queryset.filter(project=project)
if only_active:
queryset = queryset.filter(active=True)
return queryset
def active(self, user=None, project=None, *args, **kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED,
constants.PRIVATE),
only_active=True,
)
return queryset.filter(*args, **kwargs)
def public(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def protected(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def private(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PRIVATE),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
class Version(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='versions')
type = models.CharField(
_('Type'), max_length=20,
choices=VERSION_TYPES, default='unknown',
)
# used by the vcs backend
identifier = models.CharField(_('Identifier'), max_length=255)
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
slug = models.CharField(_('Slug'), max_length=255)
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES,
default='public', help_text=_("Level of privacy for this Version.")
)
tags = TaggableManager(blank=True)
objects = VersionManager()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),
)
def __unicode__(self):
return ugettext(u"Version %(version)s of %(project)s (%(pk)s)" % {
'version': self.verbose_name,
'project': self.project,
'pk': self.pk
})
def get_absolute_url(self):
if not self.built and not self.uploaded:
return ''
return self.project.get_docs_url(version_slug=self.slug)
def save(self, *args, **kwargs):
"""
Add permissions to the Version for all owners on save.
"""
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
self.project.sync_supported_versions()
return obj
@property
def remote_slug(self):
if self.slug == 'latest':
if self.project.default_branch:
return self.project.default_branch
else:
return self.project.vcs_repo().fallback_branch
else:
return self.slug
def get_subdomain_url(self):
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain:
return "/%s/%s/" % (
self.project.language,
self.slug,
)
else:
return reverse('docs_detail', kwargs={
'project_slug': self.project.slug,
'lang_slug': self.project.language,
'version_slug': self.slug,
'filename': ''
})
def get_subproject_url(self):
return "/projects/%s/%s/%s/" % (
self.project.slug,
self.project.language,
self.slug,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_epub_url(self.slug)
else:
if project.has_pdf(self.slug):
data['pdf_url'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip_url'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['epub_url'] = project.get_epub_url(self.slug)
#if project.has_manpage(self.slug):
#data['manpage_url'] = project.get_manpage_url(self.slug)
if project.has_dash(self.slug):
data['dash_url'] = project.get_dash_url(self.slug)
data['dash_feed_url'] = project.get_dash_feed_url(self.slug)
return data
def get_conf_py_path(self):
# Hack this for now.
return "/docs/"
conf_py_path = self.project.conf_file(self.slug)
conf_py_path = conf_py_path.replace(
self.project.checkout_path(self.slug), '')
return conf_py_path.replace('conf.py', '')
def get_build_path(self):
'''Return version build path if path exists, otherwise `None`'''
path = self.project.checkout_path(version=self.slug)
if os.path.exists(path):
return path
return None
def get_github_url(self, docroot, filename, source_suffix='.rst'):
GITHUB_REGEXS = [
re.compile('github.com/(.+)/(.+)(?:\.git){1}'),
re.compile('github.com/(.+)/(.+)'),
re.compile('github.com:(.+)/(.+).git'),
]
GITHUB_URL = 'https://github.com/{user}/{repo}/blob/{version}{docroot}{path}{source_suffix}'
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
for regex in GITHUB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
def get_bitbucket_url(self, docroot, filename, source_suffix='.rst'):
BB_REGEXS = [
re.compile('bitbucket.org/(.+)/(.+).git'),
re.compile('bitbucket.org/(.+)/(.+)/'),
re.compile('bitbucket.org/(.+)/(.+)'),
]
BB_URL = 'https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}{source_suffix}'
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
for regex in BB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return BB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
class VersionAlias(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(_('To slug'), max_length=255, default='',
blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __unicode__(self):
return ugettext(u"Alias for %(project)s: %(from)s -> %(to)s" % {
'project': self.project,
'form': self.from_slug,
'to': self.to_slug,
})
class Build(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='builds')
version = models.ForeignKey(Version, verbose_name=_('Version'), null=True,
related_name='builds')
type = models.CharField(_('Type'), max_length=55, choices=BUILD_TYPES,
default='html')
state = models.CharField(_('State'), max_length=55, choices=BUILD_STATE,
default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'))
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), max_length=3, null=True,
blank=True)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
def __unicode__(self):
return ugettext(u"Build %(project)s for %(usernames)s (%(pk)s)" % {
'project': self.project,
'usernames': ' '.join(self.project.users.all()
.values_list('username', flat=True)),
'pk': self.pk,
})
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
|
|
import json
from datetime import datetime, timedelta
from random import random
import mock
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from nose.tools import eq_
from rest_framework.test import APIClient
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.questions.tests import question, answer, answervote
from kitsune.users import api
from kitsune.users.models import Profile
from kitsune.users.tests import user, profile, setting
class UsernamesTests(TestCase):
"""Test the usernames API method."""
url = reverse('users.api.usernames', locale='en-US')
def setUp(self):
self.u = user(username='testUser', save=True)
self.client.login(username=self.u.username, password='testpass')
def tearDown(self):
self.client.logout()
def test_no_query(self):
res = self.client.get(self.url)
eq_(200, res.status_code)
eq_('[]', res.content)
def test_query_old(self):
res = self.client.get(urlparams(self.url, term='a'))
eq_(200, res.status_code)
data = json.loads(res.content)
eq_(0, len(data))
def test_query_current(self):
res = self.client.get(urlparams(self.url, term=self.u.username[0]))
eq_(200, res.status_code)
data = json.loads(res.content)
eq_(1, len(data))
def test_post(self):
res = self.client.post(self.url)
eq_(405, res.status_code)
def test_logged_out(self):
self.client.logout()
res = self.client.get(self.url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(403, res.status_code)
class TestUserSerializer(TestCase):
def setUp(self):
self.request = mock.Mock()
self.data = {
'username': 'bobb',
'display_name': 'Bobbert the Seventh',
'password': 'testpass',
'email': 'bobb@example.com',
}
def test_user_created(self):
# There is at least one user in existence due to migrations
number_users = User.objects.count()
serializer = api.ProfileSerializer(data=self.data)
assert serializer.is_valid()
serializer.save()
eq_(User.objects.count(), number_users + 1)
eq_(Profile.objects.count(), 1)
def test_password(self):
serializer = api.ProfileSerializer(data=self.data)
assert serializer.is_valid()
serializer.save()
assert serializer.object.user.password != 'testpass'
assert serializer.object.user.check_password('testpass')
def test_automatic_display_name(self):
del self.data['display_name']
serializer = api.ProfileSerializer(data=self.data)
assert serializer.is_valid()
eq_(serializer.object.name, 'bobb')
def test_no_duplicate_emails(self):
user(email=self.data['email'], save=True)
serializer = api.ProfileSerializer(data=self.data)
eq_(serializer.errors, {
'email': ['A user with that email address already exists.'],
})
assert not serializer.is_valid()
def test_users_with_emails_are_inactive(self):
serializer = api.ProfileSerializer(data=self.data)
serializer.is_valid()
assert serializer.is_valid()
serializer.save()
eq_(serializer.object.user.is_active, False)
def test_users_with_emails_get_confirmation_email(self):
serializer = api.ProfileSerializer(data=self.data)
assert serializer.is_valid()
serializer.save()
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].subject, 'Please confirm your email address')
def test_cant_update_username(self):
p = profile()
p.user.username = 'notbobb'
p.user.save()
serializer = api.ProfileSerializer(data=self.data, instance=p)
eq_(serializer.is_valid(), False)
eq_(serializer.errors, {
'username': [u"Can't change this field."],
})
def test_username_bad_chars(self):
# New users shouldn't be able to have '@' in their username.
self.data['username'] = 'bobb@example.com'
serializer = api.ProfileSerializer(data=self.data)
eq_(serializer.is_valid(), False)
eq_(serializer.errors, {'username':
[u'Usernames may only be letters, numbers, "." and "-".']})
def test_username_too_long(self):
# Max length is 30
self.data['username'] = 'B' * 31
serializer = api.ProfileSerializer(data=self.data)
eq_(serializer.is_valid(), False)
eq_(serializer.errors, {'username':
[u'Usernames may only be letters, numbers, "." and "-".']})
def test_username_too_short(self):
# Min length is 4 chars.
self.data['username'] = 'bob'
serializer = api.ProfileSerializer(data=self.data)
eq_(serializer.is_valid(), False)
eq_(serializer.errors, {'username':
[u'Usernames may only be letters, numbers, "." and "-".']})
def test_helpfulness(self):
p = profile()
u = p.user
a1 = answer(creator=u, save=True)
a2 = answer(creator=u, save=True)
answervote(answer=a1, helpful=True, save=True)
answervote(answer=a2, helpful=True, save=True)
answervote(answer=a2, helpful=True, save=True)
# Some red herrings.
answervote(creator=u, save=True)
answervote(answer=a1, helpful=False, save=True)
serializer = api.ProfileSerializer(instance=p)
eq_(serializer.data['helpfulness'], 3)
def test_counts(self):
p = profile()
u = p.user
q = question(creator=u, save=True)
answer(creator=u, save=True)
q.solution = answer(question=q, creator=u, save=True)
q.save()
serializer = api.ProfileSerializer(instance=p)
eq_(serializer.data['question_count'], 1)
eq_(serializer.data['answer_count'], 2)
eq_(serializer.data['solution_count'], 1)
def test_last_answer_date(self):
p = profile()
u = p.user
answer(creator=u, save=True)
serializer = api.ProfileSerializer(instance=p)
eq_(serializer.data['last_answer_date'], u.answers.last().created)
class TestUserView(TestCase):
def setUp(self):
self.client = APIClient()
def test_only_self_edits(self):
p1 = profile()
p2 = profile()
self.client.force_authenticate(user=p2.user)
url = reverse('user-detail', args=[p1.user.username])
res = self.client.patch(url, {})
# u2 should not have permission to edit u1's user.
eq_(res.status_code, 403)
def test_cant_delete(self):
p = profile()
self.client.force_authenticate(user=p.user)
url = reverse('user-detail', args=[p.user.username])
res = self.client.delete(url)
eq_(res.status_code, 405)
def test_generator_on_stage(self):
# There is at least one user made during tests.
old_user_count = User.objects.count()
res = self.client.post(reverse('user-generate'))
eq_(res.status_code, 200)
eq_(User.objects.count(), old_user_count + 1)
new_user = User.objects.order_by('-id')[0]
eq_(res.data['user']['username'], new_user.username)
assert 'password' in res.data
assert 'token' in res.data
def test_generated_users_tagged(self):
res = self.client.post(reverse('user-generate'))
eq_(res.status_code, 200)
assert {'name': 'autogenerated', 'value': 'true'} in res.data['user']['settings']
def test_weekly_solutions(self):
eight_days_ago = datetime.now() - timedelta(days=8)
# ``a1`` is a solution in the right range.
# ``a2`` is a solution, but it is too old.
# The third answer is not a solution.
a1 = answer(save=True)
a1.question.solution = a1
a1.question.save()
a2 = answer(created=eight_days_ago, save=True)
a2.question.solution = a2
a2.question.save()
answer(save=True)
res = self.client.get(reverse('user-weekly-solutions'))
eq_(res.status_code, 200)
eq_(len(res.data), 1)
eq_(res.data[0]['username'], a1.creator.username)
def test_email_visible_when_signed_in(self):
p = profile()
url = reverse('user-detail', args=[p.user.username])
self.client.force_authenticate(user=p.user)
res = self.client.get(url)
eq_(res.data['email'], p.user.email)
def test_email_not_visible_when_signed_out(self):
p = profile()
url = reverse('user-detail', args=[p.user.username])
res = self.client.get(url)
assert 'email' not in res.data
def test_set_setting_add(self):
p = profile()
self.client.force_authenticate(user=p.user)
url = reverse('user-set-setting', args=[p.user.username])
res = self.client.post(url, {'name': 'foo', 'value': 'bar'})
eq_(res.status_code, 200)
eq_(p.settings.get(name='foo').value, 'bar')
def test_set_setting_update(self):
p = profile()
self.client.force_authenticate(user=p.user)
s = setting(user=p.user, name='favorite_fruit', value='apple', save=True)
url = reverse('user-set-setting', args=[p.user.username])
res = self.client.post(url, {'name': s.name, 'value': 'banana'})
eq_(res.status_code, 200)
eq_(p.settings.get(name=s.name).value, 'banana')
def test_delete_setting_exists_with_post(self):
p = profile()
self.client.force_authenticate(user=p.user)
s = setting(user=p.user, save=True)
url = reverse('user-delete-setting', args=[p.user.username])
res = self.client.post(url, {'name': s.name})
eq_(res.status_code, 204)
eq_(p.settings.filter(name=s.name).count(), 0)
def test_delete_setting_exists_with_delete(self):
p = profile()
self.client.force_authenticate(user=p.user)
s = setting(user=p.user, save=True)
url = reverse('user-delete-setting', args=[p.user.username])
res = self.client.delete(url, {'name': s.name})
eq_(res.status_code, 204)
eq_(p.settings.filter(name=s.name).count(), 0)
def test_delete_setting_404(self):
p = profile()
self.client.force_authenticate(user=p.user)
url = reverse('user-delete-setting', args=[p.user.username])
res = self.client.post(url, {'name': 'nonexistant'})
eq_(res.status_code, 404)
def test_is_active(self):
p = profile()
url = reverse('user-detail', args=[p.user.username])
res = self.client.get(url)
assert 'is_active' in res.data
@mock.patch.object(Site.objects, 'get_current')
def test_request_password_reset(self, get_current):
get_current.return_value.domain = 'testserver'
p = profile()
url = reverse('user-request-password-reset', args=[p.user.username])
res = self.client.get(url)
eq_(res.status_code, 204)
eq_(1, len(mail.outbox))
def test_avatar_size(self):
p = profile()
url = reverse('user-detail', args=[p.user.username])
res = self.client.get(url)
assert '?s=48' in res.data['avatar']
res = self.client.get(url, {'avatar_size': 128})
assert '?s=128' in res.data['avatar']
def test_create_use(self):
# There is at least one user in existence due to migrations
number_users = User.objects.count()
username = 'kris-{}'.format(random())
url = reverse('user-list')
res = self.client.post(url, {
'username': username,
'password': 'testpass',
'email': 'kris@example.com'
})
eq_(res.status_code, 201)
eq_(User.objects.count(), number_users + 1)
u = User.objects.order_by('-id')[0]
eq_(u.username, username)
eq_(u.email, 'kris@example.com')
eq_(u.is_active, False)
def test_invalid_email(self):
username = 'sarah-{}'.format(random())
url = reverse('user-list')
res = self.client.post(url, {
'username': username,
'password': 'testpass',
'email': 'sarah', # invalid
})
eq_(res.status_code, 400)
eq_(res.data, {'email': [u'Enter a valid email address.']})
def test_invalid_username(self):
url = reverse('user-list')
res = self.client.post(url, {
'username': '&', # invalid
'password': 'testpass',
'email': 'lucy@example.com',
})
eq_(res.status_code, 400)
eq_(res.data, {'username': [u'Usernames may only be letters, numbers, "." and "-".']})
def test_too_short_username(self):
url = reverse('user-list')
res = self.client.post(url, {
'username': 'a', # too short
'password': 'testpass',
'email': 'lucy@example.com',
})
eq_(res.status_code, 400)
eq_(res.data, {'username': [u'Usernames may only be letters, numbers, "." and "-".']})
def test_too_long_username(self):
url = reverse('user-list')
res = self.client.post(url, {
'username': 'a' * 100, # too long
'password': 'testpass',
'email': 'lucy@example.com',
})
eq_(res.status_code, 400)
eq_(res.data, {'username': [u'Usernames may only be letters, numbers, "." and "-".']})
|
|
import cv2
import xml.etree.ElementTree as ET
import os
from rod import *
import pprint
pp = pprint.PrettyPrinter(depth=6)
def xml_load_point(point):
return (int(point.split(',')[0]), int(point.split(',')[1]))
def xml_load_rect(rect):
return (int(rect.find("X").text), int(rect.find("Y").text), int(rect.find("W").text), int(rect.find("H").text))
def xml_load_rgb(text):
tokens = text.split(",")
return (int(tokens[0]), int(tokens[1]), int(tokens[2]))
global refPt
refPt = (0,0)
def click_callback(event, x, y, flags, param):
global refPt
if event == cv2.EVENT_LBUTTONDOWN:
refPt = (x, y)
cropping = True
class Recording(object):
def __init__(self, recording_file, models, blackouts, crop_to_rod):
self.recording_file = recording_file
self.tree = ET.parse(recording_file)
self.root = self.tree.getroot()
base_path = os.path.dirname(recording_file)
# Blackouts to hide any rods
self.blackouts = blackouts # List of rod names to hide
# Crops
self.crop = None
self.crop_to_rod = crop_to_rod
if self.crop_to_rod is None:
if self.root.find("CROP") is not None:
self.crop = xml_load_rect(self.root.find("CROP").find("RECT"))
# Files
self.file_avi = os.path.join(base_path, self.root.find("RECORDING").find("AVI").text)
self.frame_start = int(self.root.find("RECORDING").find("STARTFRAME").text)
self.frame_end = int(self.root.find("RECORDING").find("ENDFRAME").text)
# Rod tracking settings
rod_tracking_alignment = None
rod_tracking_gap_colour = None
rod_tracking_gap_colour_distance = None
rod_tracking_gap_min_size = None
rod_tracking_rod_width = None
self.line_detection_frequency = 1
self.model = None
if self.root.find("RODS").find("TRACKING").find("MODEL") is not None:
# Load the corresponding tracking model
self.model = models[self.root.find("RODS").find("TRACKING").find("MODEL").text]
else:
# Use standard line-detection based rod tracking
rod_tracking_alignment = self.root.find("RODS").find("TRACKING").find("ALIGNMENT").text
rod_tracking_gap_colour = xml_load_rgb(self.root.find("RODS").find("TRACKING").find("GAP_COLOUR").text)
rod_tracking_gap_colour_distance = int(self.root.find("RODS").find("TRACKING").find("GAP_COLOUR_DISTANCE").text)
rod_tracking_gap_min_size = int(self.root.find("RODS").find("TRACKING").find("GAP_MIN_SIZE").text)
rod_tracking_rod_width = int(self.root.find("RODS").find("TRACKING").find("ROD_WIDTH").text)
if self.root.find("RODS").find("TRACKING").find("LINE_DETECTION_FREQUENCY") is None:
self.line_detection_frequency = 30
else:
self.line_detection_frequency = int(self.root.find("RODS").find("TRACKING").find("LINE_DETECTION_FREQUENCY").text)
# Video file
self.cap = cv2.VideoCapture(self.file_avi)
self.num_frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.frame = 0
self.has_more = False
# Rods
self.rods = {}
for rod in self.root.find("RODS").iter("ROD"):
rod_left = None
rod_right = None
if self.model is not None:
if rod.find("TRACKING") is None or rod.find("TRACKING").find("LEFT") is None:
rod_left = None
else:
rod_left = xml_load_point(rod.find("TRACKING").find("LEFT").text)
if rod.find("TRACKING") is None or rod.find("TRACKING").find("RIGHT") is None:
rod_right = None
else:
rod_right = xml_load_point(rod.find("TRACKING").find("RIGHT").text)
rod_name = rod.find("NAME").text
self.rods[rod_name] = Rod( xml_load_rect(rod.find("RECT")),rod_name, rod_tracking_gap_colour, rod_tracking_gap_colour_distance, rod_tracking_gap_min_size, rod_tracking_rod_width, self.line_detection_frequency, rod_left, rod_right, self.model)
def __del__(self):
self.cap.release()
def process(self):
# Extract the rod positions
pass
def initialize(self):
self._initialize_rod_gaps()
self.cap.set(1,self.frame_start)
self.frame = self.frame_start
self.has_more = True
def get_next_frame(self):
# Returns:
# (cropped frame, {rod name = rod positions})
if( self.cap.isOpened() and self.has_more ):
font = cv2.FONT_HERSHEY_SIMPLEX
ret, frame = self.cap.read()
if ret==True and self.frame < self.frame_end:
self.frame += 1
frame_with_markup = frame.copy()
# Only do edge-processing if we aren't using a ML model to extract position
if self.model is None:
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
else:
edges = None
# Draw the crop box
if self.crop is not None:
box = self.crop
cv2.rectangle(frame_with_markup, (box[0], box[1]),
(box[0] + box[2], box[1] + box[3]),
(0,255,0) );
# Process each rod
rod_positions = {}
failure_count = 0
for rod_name, rod in self.rods.items():
# Update rod tracking
if rod.rod_left is None and self.model is None:
# Update from the rod line
if self.frame % self.line_detection_frequency == 0:
rod.update_rod_line(edges)
# Get this rod position
if rod.rod_line is not None or rod.model is not None:
(new_x, line, success) = rod.track_rod_position(frame)
if( not success ):
failure_count += 1
rod_positions[rod_name] = new_x
# Update the graphics
box = rod.box
if rod_name in self.blackouts:
# Black box to indicate it is being cut out
cv2.rectangle(frame_with_markup, (box[0], box[1]),
(box[0] + box[2], box[1] + box[3]),
(0,0,0), 2 );
else:
# Show regular box
cv2.rectangle(frame_with_markup, (box[0], box[1]),
(box[0] + box[2], box[1] + box[3]),
(255,0,0), 3 );
# Draw the rod line
rod_line = rod.rod_line
if rod_line is not None:
cv2.line(frame_with_markup,rod_line[0],rod_line[1],(0,0,255),2)
if line is not None:
rod_offence_last_frame_x = new_x
cv2.line(frame_with_markup, line[0], line[1], (255,0,255), thickness=4)
# Draw this rod position
cv2.putText(frame_with_markup,'Pos %.2f' % (new_x),(box[0]+30, box[1]+30), font, 1,(255,255,255),1,cv2.LINE_AA)
# Black out the rods
for blackout in self.blackouts:
frame = self.rods[blackout].blackout(frame)
# Crop the frame
if self.crop is None:
frame_cropped = frame
else:
frame_cropped = frame[self.crop[1]:(self.crop[1]+self.crop[3]), self.crop[0]:(self.crop[0]+self.crop[2])]
# Crop to the specific rod
if self.crop_to_rod is not None and self.crop_to_rod in self.rods:
frame_cropped = self.rods[self.crop_to_rod].get_rod_region(frame_cropped)
#cv2.imshow('image',frame_cropped)
#key = cv2.waitKey(1)
# Add text
cv2.putText(frame_with_markup,'%i - %i,%i: rgb: %i,%i,%i' % (self.frame, refPt[0],refPt[1],frame[refPt[1]][refPt[0]][0],frame[refPt[1]][refPt[0]][1],frame[refPt[1]][refPt[0]][2]),(10,50), font, 1,(255,255,255),1,cv2.LINE_AA)
return (frame_cropped, frame_with_markup, rod_positions, failure_count)
else:
print("frame read failure")
self.has_more = False
return (None, None, None, 0)
return (None, None, None, 0)
def _initialize_rod_gaps(self):
print("Loading rod gap tracking sizes...")
key_frame_count = 30
#key_frame_count = 5
small_frame_set = []
for i in range(key_frame_count):
frame = i * (self.num_frames/key_frame_count)
self.cap.set(1,frame)
ret, frame = self.cap.read()
small_frame_set.append(frame)
# Load the rod gap sizes to track from a small subset of the frames
for rod_name, rod in self.rods.items():
# Find the gap size
size = rod.find_gap_size(small_frame_set)
print("\t%s: gap size %i" % (rod_name, size))
def play(self):
self.initialize()
# Extract the rod positions
cv2.namedWindow("image")
global refPt
(frame, frame_with_markup, rod_positions, failure_count) = self.get_next_frame()
while frame is not None:
cv2.setMouseCallback("image", click_callback)
cv2.imshow('image',frame_with_markup)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord(' '):
cv2.waitKey()
(frame, frame_with_markup, rod_positions, failure_count) = self.get_next_frame()
print("Finished after processing %i frames." % self.frame)
cv2.destroyAllWindows()
|
|
#!/usr/bin/env python
'''
harvest.py
A set of modules to support downloading and synchronizing a WAF
'''
from catalog_harvesting.waf_parser import WAFParser
from catalog_harvesting.erddap_waf_parser import ERDDAPWAFParser
from catalog_harvesting.csw import download_csw
from catalog_harvesting import get_logger, get_redis_connection
from catalog_harvesting.records import parse_records
from catalog_harvesting.ckan_api import get_harvest_info, create_harvest_job
from catalog_harvesting.notify import Mail, Message, MAIL_DEFAULT_SENDER
from hashlib import sha1
from pymongo import MongoClient
from datetime import datetime
from base64 import b64encode
import requests
import os
import time
import redis
def download_from_db(conn_string, dest):
'''
Download several WAFs using collections from MongoDB as a source
:param str conn_string: MongoDB connection string
:param str db_name: The name of the MongoDB database to connect to
:param str dest: Write directory destination
'''
tokens = conn_string.split('/')
if len(tokens) > 3:
db_name = tokens[3]
else:
db_name = 'default'
db = MongoClient(conn_string)[db_name]
for harvest in list(db.Harvests.find({"publish": True})):
try:
download_harvest(db, harvest, dest)
except KeyboardInterrupt:
# exit on SIGINT
raise
except:
get_logger().exception("Failed to harvest")
get_logger().error(harvest)
def download_harvest(db, harvest, dest):
'''
Downloads a harvest from the mongo db and updates the harvest with the
latest harvest date.
:param db: Mongo DB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
'''
src = harvest['url']
get_logger().info('harvesting: %s' % src)
db.Harvests.update({"_id": harvest['_id']}, {
"$set": {
"last_harvest_dt": "harvesting",
"last_harvest_status": None
}
})
try:
provider_str = harvest['organization']
path = os.path.join(dest, provider_str)
if harvest['harvest_type'] == 'WAF':
records, errors = download_waf(db, harvest, src, path)
elif harvest['harvest_type'] == 'ERDDAP-WAF':
records, errors = download_erddap_waf(db, harvest, src, path)
elif harvest['harvest_type'] == 'CSW':
records, errors = download_csw(db, harvest, src, path)
else:
raise TypeError('harvest_type "{}" is not supported; use WAF or CSW'.format(harvest['harvest_type']))
db.Harvests.update({"_id": harvest['_id']}, {
"$set": {
"last_harvest_dt": datetime.utcnow(),
"last_record_count": records,
"last_good_count": (records - errors),
"last_bad_count": errors,
"last_harvest_status": "ok"
}
})
trigger_ckan_harvest(db, harvest)
except:
send_notifications(db, harvest)
get_logger().exception("Failed to successfully harvest %s",
harvest['url'])
db.Harvests.update({"_id": harvest['_id']}, {
"$set": {
"last_harvest_dt": datetime.utcnow(),
"last_harvest_status": "fail"
}
})
def delete_harvest(db, harvest):
'''
Deletes a harvest, all associated attempts and records
:param db: MongoDB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
'''
try:
# Remove attempts
records = list(db.Records.find({"harvest_id": harvest['_id']}))
for record in records:
if os.path.exists(record['location']):
get_logger().info("Removing %s", record['location'])
os.remove(record['location'])
db.Records.remove({"harvest_id": harvest['_id']})
db.Attempts.remove({"parent_harvest": harvest['_id']})
db.Harvests.remove({"_id": harvest['_id']})
except:
get_logger().exception("Could not successfully delete harvest")
def send_notifications(db, harvest):
'''
Send an email to all users belonging to the organization of the harvest
notifying them that the harvest failed.
:param db: Mongo DB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
'''
users = db.users.find({"profile.organization": harvest['organization']})
mail = Mail()
emails = []
for user in list(users):
user_emails = user['emails']
if user_emails and user_emails[0]['address']:
emails.append(user_emails[0]['address'])
recipients = [email for email in emails if throttle_email(email)]
# If there are no recipients, obviously don't send an email
if not recipients:
return
for recipient in recipients:
get_logger().info("Sending a notification to %s", recipient)
msg = Message("Failed to correctly harvest",
sender=MAIL_DEFAULT_SENDER or "admin@ioos.us",
recipients=recipients)
body = ("We were unable to harvest from the harvest source {url}. "
"Please verify that the source URL is correct and contains "
"valid XML Documents. \n\n"
"Thanks!\nIOOS Catalog Harvester".format(url=harvest['url']))
msg.body = body
mail.send(msg)
def throttle_email(email, timeout=3600):
'''
Returns True if an email has already been sent in the last timeout seconds.
:param str email: Email address of the recipient
:param int timeout: Seconds to wait until the next email can be sent
'''
host, port, db = get_redis_connection()
redis_pool = redis.ConnectionPool(host=host, port=port, db=db)
rc = redis.Redis(connection_pool=redis_pool)
key = 'harvesting:notifications:' + b64encode(email)
value = rc.get(key)
if value is None:
rc.setex(key, 1, timeout)
return True
return False
def trigger_ckan_harvest(db, harvest):
'''
Initiates a CKAN Harvest
:param db: Mongo DB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
'''
try:
ckan_harvest = get_harvest_info(db, harvest)
ckan_harvest_id = ckan_harvest['id']
create_harvest_job(ckan_harvest_id)
except:
get_logger().exception("Failed to initiate CKAN Harvest")
def download_waf(db, harvest, src, dest):
'''
Downloads a WAF's contents to a destination
:param db: Mongo DB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
:param url src: URL to the WAF
:param str dest: Folder to download to
'''
if not os.path.exists(dest):
os.makedirs(dest)
waf_parser = WAFParser(src)
old_records = list(db.Records.find({"harvest_id": harvest['_id']}))
db.Records.remove({"harvest_id": harvest['_id']})
new_records = []
count = 0
errors = 0
for link in waf_parser.parse():
get_logger().info("Downloading %s", link)
try:
link_hash = sha1(link.encode('utf-8')).hexdigest()
doc_name = link_hash + '.xml'
local_filename = os.path.join(dest, doc_name)
get_logger().info("Saving to %s", local_filename)
download_file(link, local_filename)
rec = parse_records(db, harvest, link, local_filename)
new_records.append(rec)
if len(rec['validation_errors']):
errors += 1
count += 1
except KeyboardInterrupt:
raise
except Exception:
errors += 1
get_logger().exception("Failed to download")
continue
purge_old_records(new_records, old_records)
return count, errors
def download_erddap_waf(db, harvest, src, dest):
'''
Downloads a WAF's from ERDDAP to a destination
:param db: Mongo DB Client
:param dict harvest: A dictionary returned from the mongo collection for
harvests.
:param url src: URL to the WAF
:param str dest: Folder to download to
'''
if not os.path.exists(dest):
os.makedirs(dest)
waf_parser = ERDDAPWAFParser(src)
old_records = list(db.Records.find({"harvest_id": harvest['_id']}))
db.Records.remove({"harvest_id": harvest['_id']})
new_records = []
count = 0
errors = 0
for link in waf_parser.parse():
get_logger().info("Downloading %s", link)
try:
doc_name = link.split('/')[-1]
local_filename = os.path.join(dest, doc_name)
# CKAN only looks for XML documents for the harvester
if not local_filename.endswith('.xml'):
local_filename += '.xml'
download_file(link, local_filename)
rec = parse_records(db, harvest, link, local_filename)
new_records.append(rec)
if len(rec['validation_errors']):
errors += 1
count += 1
except KeyboardInterrupt:
raise
except Exception:
errors += 1
get_logger().exception("Failed to download")
continue
purge_old_records(new_records, old_records)
return count, errors
def download_file(url, location):
'''
Downloads a file from a URL and writes it to location
:param str url: URL to download document
:param str location: Full filename to write to
'''
r = requests.get(url, stream=True)
with open(location, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
return location
def force_clean(path, max_days=3):
'''
Deletes any files in path that end in .xml and are older than the specified
number of days
:param str path: Path to a folder to clean
:param int max_days: Maximum number of days to keep an old record before
removing it.
'''
now = time.time()
for root, dirs, files in os.walk(path):
for filename in files:
filepath = os.path.join(root, filename)
if not filename.endswith('.xml'):
continue
file_st = os.stat(filepath)
mtime = file_st.st_mtime
if (now - mtime) > (24 * 3600 * max_days):
get_logger().info("Removing %s", filepath)
os.remove(filepath)
def purge_old_records(new_records, old_records):
'''
Deletes any records in old_records that aren't in new_records
:param list new_records: List of records
:param list old_records: List of records
'''
get_logger().info("Purging old records from WAF")
new_files = [r['location'] for r in new_records if 'location' in r]
removal = [r for r in old_records if 'location' in r and r['location'] not in new_files]
for record in removal:
if 'location' not in record:
continue
if os.path.exists(record['location']):
get_logger().info("Removing %s", record['location'])
os.remove(record['location'])
|
|
#=========================================================================
# ast_visitor_test.py
#=========================================================================
# Tests for Python AST Visitors.
import pytest
from ast_visitor import *
from ..ast_helpers import get_method_ast, print_simple_ast
from pymtl import InPort, OutPort, Wire
#-------------------------------------------------------------------------
# AST Visitor Checker: Function Decorator
#-------------------------------------------------------------------------
# This decorator takes two parameters: a list of expected signals detected
# as loads by the visitor, and a list of expected signals detected as
# stores by the visitor. Assertions verify that the expected signals
# match the actual signals detected.
# TODO: the order in which you name the signals in each list matters,
# should fix this later.
def check_ast( ld, st ):
def check_decorator( func ):
tree, src = get_method_ast( func )
print()
#import debug_utils
#debug_utils.print_ast( tree )
load, store = DetectLoadsAndStores().enter( tree )
print( "LOADS ", load, "want:", ld )
print( "STORES", store, "want:", st )
assert ld == load
assert st == store
return func
return check_decorator
#-------------------------------------------------------------------------
# Simple Assignments
#-------------------------------------------------------------------------
def test_assign():
@check_ast( ['s.in_'], ['s.out.v'] )
def assign( s ):
s.out.v = s.in_
def test_assign_op():
@check_ast( ['s.a.v', 's.b', 's.c'], ['s.out.v'] )
def assign_op( s ):
s.out.v = s.a.v + s.b + s.c
def test_assign_temp():
@check_ast( ['s.in_', 'x'], ['x', 's.out.v'] )
def assign_temp( s ):
x = s.in_
s.out.v = x
#-------------------------------------------------------------------------
# Bit Indexing
#-------------------------------------------------------------------------
def test_rd_bit_idx_const():
@check_ast( ['s.a.v[?]', 's.b[?]'], ['s.out.v'] )
def rd_bit_idx_const( s ):
s.out.v = s.a.v[ 0 ] + s.b[ 1 ]
def test_rd_bit_idx_var():
@check_ast( ['s.c', 's.a.v[?]', 's.d', 's.b[?]'], ['s.out.v'] )
def rd_bit_idx_var( s ):
s.out.v = s.a.v[ s.c ] & s.b[ s.d ]
def test_rd_bit_idx_slice_const():
@check_ast( ['s.a.v[?]', 's.b[?]'], ['s.out.v'] )
def rd_bit_idx_slice_const( s ):
s.out.v = s.a.v[ 0:2 ] & s.b[ 4:8 ]
def test_rd_bit_idx_slice_var():
@check_ast( ['s.s0', 's.s1', 's.a.v[?]'], ['s.out.v'] )
def rd_bit_idx_slice_var( s ):
s.out.v = s.a.v[ s.s0:s.s1 ]
def test_wr_bit_idx_const():
@check_ast( ['s.in0', 's.in1'], ['s.out.v[?]'] )
def wr_bit_idx_const( s ):
s.out.v[ 0 ] = s.in0 + s.in1
def test_wr_bit_idx_var():
@check_ast( ['s.c', 's.in0', 's.in1'], ['s.out.v[?]'] )
def wr_bit_idx_var( s ):
s.out.v[ s.c ] = s.in0 + s.in1
def test_wr_bit_idx_slice_const():
@check_ast( ['s.in0[?]'], ['s.out.v[?]'] )
def wr_bit_idx_slice_const( s ):
s.out.v[ 0:1 ] = s.in0[ 3:4 ]
def test_wr_bit_idx_slice_var():
@check_ast( ['s.s0', 's.s1', 's.a.v'], ['s.out.v[?]'] )
def wr_bit_idx_slice_var( s ):
s.out.v[ s.s0:s.s1 ] = s.a.v
#-------------------------------------------------------------------------
# List Indexing
#-------------------------------------------------------------------------
def test_rd_list_idx_const():
@check_ast( ['s.a[?].v', 's.b[?].v'], ['s.out.v'] )
def rd_list_idx_const( s ):
s.out.v = s.a[ 0 ].v + s.b[ 1 ].v
def test_rd_list_idx_var():
@check_ast( ['s.c', 's.a[?].v', 's.d', 's.b[?]'], ['s.out.v'] )
def rd_list_idx_var( s ):
s.out.v = s.a[ s.c ].v & s.b[ s.d ]
def test_rd_list_idx_slice_const():
@check_ast( ['s.a[?].v', 's.b[?].v'], ['s.out.v'] )
def rd_list_idx_slice_const( s ):
s.out.v = s.a[ 0:2 ].v & s.b[ 4:8 ].v
def test_rd_list_idx_slice_var():
@check_ast( ['s.s0', 's.s1', 's.a[?].v'], ['s.out.v'] )
def rd_list_idx_slice_var( s ):
s.out.v = s.a[ s.s0:s.s1 ].v
def test_wr_list_idx_const():
@check_ast( ['s.in0', 's.in1'], ['s.out[?].v'] )
def wr_list_idx_const( s ):
s.out[ 0 ].v = s.in0 + s.in1
def test_wr_list_idx_var():
@check_ast( ['s.c', 's.in0', 's.in1'], ['s.out[?].v'] )
def wr_list_idx_var( s ):
s.out[ s.c ].v = s.in0 + s.in1
def test_wr_list_idx_slice_const():
@check_ast( ['s.in0[?]'], ['s.out[?].v'] )
def wr_list_idx_slice_const( s ):
s.out[ 0:1 ].v = s.in0[ 3:4 ]
def test_wr_list_idx_slice_var():
@check_ast( ['s.s0', 's.s1', 's.a.v'], ['s.out[?].v'] )
def wr_list_idx_slice_var( s ):
s.out[ s.s0:s.s1 ].v = s.a.v
#-------------------------------------------------------------------------
# If Statements
#-------------------------------------------------------------------------
def test_if_else():
# TODO: prevent duplication?
@check_ast( ['s.if0', 's.in0', 's.in1'], ['s.out.v', 's.out.v'] )
def if_else( s ):
if s.if0:
s.out.v = s.in0
else:
s.out.v = s.in1
def test_if_elif_else():
@check_ast( ['s.if0', 's.in0', 's.if1', 's.in1', 's.in2'],
['s.out.v']*3 )
def if_elif_else( s ):
if s.if0:
s.out.v = s.in0
elif s.if1:
s.out.v = s.in1
else:
s.out.v = s.in2
def test_if_elif_and():
@check_ast( ['s.if0', 's.if1', 's.in0', 's.if0', 's.if2', 's.in1', 's.in2'],
['s.out.v']*3 )
def if_elif_and( s ):
if s.if0 and s.if1:
s.out.v = s.in0
elif s.if0 and s.if2:
s.out.v = s.in1
else:
s.out.v = s.in2
def test_if_elif_elif():
@check_ast( ['s.if0', 's.in0', 's.if1', 's.in1', 's.if2', 's.in2'],
['s.out.v']*3 )
def if_elif_else( s ):
if s.if0:
s.out.v = s.in0
elif s.if1:
s.out.v = s.in1
elif s.if2:
s.out.v = s.in2
def test_nested_if():
@check_ast( ['s.if0', 's.if1', 's.in0', 's.if2', 's.in1' ],
['s.out.v']*2 )
def logic( s ):
if s.if0:
if s.if1:
s.out.v = s.in0
elif s.if2:
s.out.v = s.in1
def test_nested_else():
@check_ast( ['s.if0', 's.in0', 's.if1', 's.in0', 's.if2', 's.in1' ],
['s.out.v']*3 )
def logic( s ):
if s.if0:
s.out.v = s.in0
else:
if s.if1:
s.out.v = s.in0
elif s.if2:
s.out.v = s.in1
def test_nested_elif():
@check_ast( ['s.if0', 's.in0', 's.if3', 's.if1', 's.in0', 's.if2', 's.in1' ],
['s.out.v']*3 )
def logic( s ):
if s.if0:
s.out.v = s.in0
elif s.if3:
if s.if1:
s.out.v = s.in0
elif s.if2:
s.out.v = s.in1
#-------------------------------------------------------------------------
# AST Visitor Checker: Function Decorator
#-------------------------------------------------------------------------
def next( func ):
tree, src = get_method_ast( func )
#print_simple_ast( tree )
DetectMissingValueNext( func, 'next' ).visit( tree )
def value( func ):
tree, src = get_method_ast( func )
#print_simple_ast( tree )
DetectMissingValueNext( func, 'value' ).visit( tree )
class Temp( object ):
def __init__( s ):
s.i0, s.i1, s.i2 = InPort [3](1)
s.o0, s.o1, s.o2 = OutPort[3](1)
s.out = OutPort[3](1)
def test_noerror_next():
s = Temp()
@next
def logic():
s.i0.next = 5
def test_error_next():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.i0 = 5
def test_noerror_value():
s = Temp()
@value
def logic():
s.i0.next = 5
def test_error_value():
s = Temp()
with pytest.raises( PyMTLError ):
@value
def logic():
s.i0 = 5
def test_noerror_tuple():
s = Temp()
@next
def logic():
s.o0.next,s.o1.next = 5, 6
@value
def logic():
s.o0.value,s.o1.value = 5, 6
@next
def logic():
s.o0.next,s.o1.next,s.o2.next = 5, 6, 7
@next
def logic():
s.o0.value,s.o1.value,s.o2.value = 5, 6, 7
def test_error_tuple():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.o0, s.o1.next = 5, 6
with pytest.raises( PyMTLError ):
@value
def logic():
s.o0, s.o1.value = 5, 6
with pytest.raises( PyMTLError ):
@next
def logic():
s.o0, s.o1 = 5, 6
with pytest.raises( PyMTLError ):
@value
def logic():
s.o0.value, s.o1 = 5, 6
with pytest.raises( PyMTLError ):
@next
def logic():
s.o0.next, s.o1, s.o2.next = 5, 6, 7
with pytest.raises( PyMTLError ):
@value
def logic():
s.o0.value, s.o1.value, s.o2 = 5, 6, 7
def test_noerror_list():
s = Temp()
@next
def logic():
[s.o0.next,s.o1.next] = 5, 6
@value
def logic():
[s.o0.value,s.o1.value] = 5, 6
def test_error_list():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
[s.o0,s.o1.next] = 5, 6
with pytest.raises( PyMTLError ):
@value
def logic():
[s.o0.value,s.o1] = 5, 6
def test_noerror_tuple_packs():
s = Temp()
@next
def logic():
s.o0.next, (s.o1.next, s.o2.next) = 5, (6, 7)
@value
def logic():
(s.o0.value, s.o1.value), s.o2.value = (5, 6), 7
def test_error_tuple_packs():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.o0.next, (s.o1.next, s.o2) = 5, (6, 7)
with pytest.raises( PyMTLError ):
@value
def logic():
(s.o0, s.o1.value), s.o2.value = (5, 6), 7
def test_noerror_list_packs():
s = Temp()
@next
def logic():
s.o0.next, [s.o1.next, s.o2.next] = 5, (6, 7)
@value
def logic():
[s.o0.value, s.o1.value], s.o2.value = (5, 6), 7
def test_error_list_packs():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.o0.next, [s.o1.next, s.o2] = 5, (6, 7)
with pytest.raises( PyMTLError ):
@value
def logic():
[s.o0, s.o1.value], s.o2.value = (5, 6), 7
def test_noerror_mixed_packs():
s = Temp()
@next
def logic():
[s.o0.next], (s.o1.next, s.o2.next) = [5], (6, 7)
@value
def logic():
[s.o0.value, s.o1.value], (s.o2.value,) = (5, 6), [7]
@next
def logic():
[s.o0.next], ([s.o1.next], s.o2.next) = [5], ([6], 7)
@value
def logic():
[s.o0.value, (s.o1.value)], (s.o2.value,) = (5, 6), [7]
def test_error_mixed_packs():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
[s.o0], (s.o1.next, s.o2.next) = [5], (6, 7)
with pytest.raises( PyMTLError ):
@value
def logic():
[s.o0.value, s.o1], (s.o2,) = (5, 6), [7]
with pytest.raises( PyMTLError ):
@next
def logic():
[s.o0.next], ([s.o1], s.o2.next) = [5], ([6], 7)
with pytest.raises( PyMTLError ):
@value
def logic():
[s.o0.value, (s.o1)], (s.o2.value,) = (5, 6), [7]
def test_noerror_subscript():
s = Temp()
@next
def logic():
s.out[0].next = 5
@value
def logic():
s.out[0].value = 5
def test_error_subscript():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.out[0] = 5
with pytest.raises( PyMTLError ):
@value
def logic():
s.out[0] = 5
def test_noerror_subscript_tuple():
s = Temp()
@next
def logic():
s.out[0].next, s.out[1].next = 5,6
@value
def logic():
s.out[0].value, s.out[1].value = 5,6
def test_error_subscript():
s = Temp()
with pytest.raises( PyMTLError ):
@next
def logic():
s.out[0].next, s.out[1] = 5,6
with pytest.raises( PyMTLError ):
@value
def logic():
s.out[0], s.out[1].value = 5,6
|
|
'''
Estimate the impact of beam smearing by running a standard deviation filter
over the peak velocity surface (since we use it for stacking) at different
resolutions.
'''
import numpy as np
import scipy.ndimage as nd
from astropy.io import fits
import astropy.units as u
from spectral_cube import Projection
import matplotlib.pyplot as plt
from turbustat.statistics import PDF
from scipy import stats
from scipy.stats import binned_statistic
import seaborn as sb
from corner import hist2d
from galaxy_params import gal_feath as gal
from paths import (fourteenB_wGBT_HI_file_dict, fourteenB_HI_data_wGBT_path,
allfigs_path)
from plotting_styles import onecolumn_figure, default_figure
def window_stdev(X, window_size):
'''
Standard deviation window.
From: https://nickc1.github.io/python,/matlab/2016/05/17/Standard-Deviation-(Filters)-in-Matlab-and-Python.html
'''
r, c = X.shape
X += np.random.rand(r, c) * 1e-6
c1 = nd.uniform_filter(X, window_size, mode='reflect')
c2 = nd.uniform_filter(X * X, window_size, mode='reflect')
return np.sqrt(c2 - c1 * c1)
default_figure()
peakvels = \
Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['PeakVels']))
mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['Moment0']))
# Set the window size to be the FWHM of the beam
# Pixels are essentially square in WCS here.
window_size = mom0.beam.major.to(u.deg) / \
(peakvels.wcs.wcs.cdelt[-1] * u.deg)
window_size = int(np.ceil(window_size))
peakvels_val = peakvels.value.copy()
peakvels_val[np.isnan(peakvels_val)] = 0.0
stddev = window_stdev(peakvels_val, window_size)
# Now we need to mask out values near edges of NaNs
mask = np.isfinite(peakvels)
mask = nd.binary_erosion(mask, iterations=window_size)
# plt.imshow(stddev, origin='lower', vmax=1.e4)
# plt.contour(mask, colors='r')
onecolumn_figure()
sb.set_palette('colorblind')
col_pal = sb.color_palette('colorblind')
_ = plt.hist(np.log10(stddev[mask] / 1000.), bins='auto', alpha=0.4, label='19"')
peakvels_38 = Projection.from_hdu(fits.open(fourteenB_HI_data_wGBT_path("smooth_2beam/M33_14B-088_HI.clean.image.GBT_feathered.38arcsec.peakvels.fits")))
mom0_38 = Projection.from_hdu(fits.open(fourteenB_HI_data_wGBT_path("smooth_2beam/M33_14B-088_HI.clean.image.GBT_feathered.38arcsec.mom0.fits")))
# Set the window size to be the FWHM of the beam
# Pixels are essentially square in WCS here.
window_size_38 = mom0_38.beam.major.to(u.deg) / \
(peakvels_38.wcs.wcs.cdelt[-1] * u.deg)
window_size_38 = int(np.ceil(window_size_38))
peakvels_val_38 = peakvels_38.value.copy()
peakvels_val_38[np.isnan(peakvels_val_38)] = 0.0
stddev_38 = window_stdev(peakvels_val_38, window_size_38)
# Now we need to mask out values near edges of NaNs
mask_38 = np.isfinite(peakvels_38)
mask_38 = nd.binary_erosion(mask_38, iterations=window_size_38)
mask_38[np.isnan(stddev_38)] = False
mask_38[stddev_38 == 0.0] = False
# Only compare where the original resolution values are defined
mask_38[~mask] = False
_ = plt.hist(np.log10(stddev_38[mask_38] / 1000.), bins='auto', alpha=0.4, label='38"')
peakvels_95 = Projection.from_hdu(fits.open(fourteenB_HI_data_wGBT_path("smooth_5beam/M33_14B-088_HI.clean.image.GBT_feathered.95arcsec.peakvels.fits")))
mom0_95 = Projection.from_hdu(fits.open(fourteenB_HI_data_wGBT_path("smooth_5beam/M33_14B-088_HI.clean.image.GBT_feathered.95arcsec.mom0.fits")))
# Set the window size to be the FWHM of the beam
# Pixels are essentially square in WCS here.
window_size_95 = mom0_95.beam.major.to(u.deg) / \
(peakvels_95.wcs.wcs.cdelt[-1] * u.deg)
window_size_95 = int(np.ceil(window_size_95))
peakvels_val_95 = peakvels_95.value.copy()
peakvels_val_95[np.isnan(peakvels_val_95)] = 0.0
stddev_95 = window_stdev(peakvels_val_95, window_size_95)
# Now we need to mask out values near edges of NaNs
mask_95 = np.isfinite(peakvels_95)
mask_95 = nd.binary_erosion(mask_95, iterations=window_size_95)
mask_95[np.isnan(stddev_95)] = False
mask_95[stddev_95 == 0.0] = False
mask_95[~mask] = False
_ = plt.hist(np.log10(stddev_95[mask_95] / 1000.), bins='auto', alpha=0.4, label='95"')
plt.legend(frameon=True)
plt.xlim([-1., 2.])
plt.axvline(np.log10(0.2), linestyle='--', linewidth=3, alpha=0.6,
color=col_pal[3])
# Fit some log-normals (close-ish to the shape) to get the peak location
# and estimate dispersion.
print("Fitting log-normals")
stddev_masked = stddev.copy()
stddev_masked[~mask] = np.NaN
# pdf = PDF(stddev_masked).run(verbose=False)
stddev_masked_38 = stddev_38.copy()
stddev_masked_38[~mask_38] = np.NaN
# pdf_38 = PDF(stddev_masked_38).run(verbose=False)
stddev_masked_95 = stddev_95.copy()
stddev_masked_95[~mask_95] = np.NaN
# pdf_95 = PDF(stddev_masked_95).run(verbose=False)
# def lognorm_mean(params):
# return float(stats.lognorm.stats(s=params[0], scale=params[1],
# moments='m'))
# print("Mean: 19'' {0}; 38'' {1}; 95'' {2} km/s"
# .format(lognorm_mean(pdf.model_params),
# lognorm_mean(pdf_38.model_params),
# lognorm_mean(pdf_95.model_params)))
# print("Dispersion: 19'' {0}; 38'' {1}; 95'' {2} km/s"
# .format(pdf.model_params[1], pdf_38.model_params[1],
# pdf_95.model_params[1]))
# plt.axvline(np.log10(lognorm_mean(pdf.model_params) / 1.e3), color=col_pal[0])
# plt.axvline(np.log10(lognorm_mean(pdf_38.model_params) / 1.e3), color=col_pal[1])
# plt.axvline(np.log10(lognorm_mean(pdf_95.model_params) / 1.e3), color=col_pal[2])
plt.axvline(np.log10(np.nanmedian(stddev_masked) / 1.e3), color=col_pal[0])
plt.axvline(np.log10(np.nanmedian(stddev_masked_38) / 1.e3), color=col_pal[1])
plt.axvline(np.log10(np.nanmedian(stddev_masked_95) / 1.e3), color=col_pal[2])
print("Median: 19'' {0}; 38'' {1}; 95'' {2} m/s"
.format(np.nanmedian(stddev_masked),
np.nanmedian(stddev_masked_38),
np.nanmedian(stddev_masked_95)))
# Median: 19'' 1200.64344986; 38'' 1040.18519281; 95'' 2125.5324707 m/s
plt.grid()
plt.xlabel("log Standard Deviation (km/s)")
plt.tight_layout()
plt.savefig(allfigs_path('HI_properties/peakvel_stddevfilter_histograms.pdf'))
plt.savefig(allfigs_path('HI_properties/peakvel_stddevfilter_histograms.png'))
plt.close()
# Trends against each other?
# comb_mask_38 = np.logical_and(np.isfinite(stddev_masked),
# np.isfinite(stddev_masked_38))
# hist2d(stddev_masked[comb_mask_38], stddev_masked_38[comb_mask_38])
# comb_mask_95 = np.logical_and(np.isfinite(stddev_masked),
# np.isfinite(stddev_masked_95))
# hist2d(stddev_masked[comb_mask_95], stddev_masked_95[comb_mask_95])
# Against radius?
radius = gal.radius(header=mom0.header).to(u.kpc)
# hist2d(radius.value[comb_mask_38],
# np.log10((stddev_masked_38 / stddev_masked)[comb_mask_38]))
# hist2d(radius.value[comb_mask_95],
# np.log10((stddev_masked_95 / stddev_masked)[comb_mask_95]))
# Mild increase with radius for the inner 2 kpc.
# Create radial profiles of avg std
beam_pix = 41.
rad_bins = np.arange(0, 7.5, 0.5)
med_bin, bin_edges, cts = binned_statistic(radius.value[mask],
stddev_masked[mask] / 1.e3,
bins=rad_bins,
statistic=np.mean)
# Last bin value are the points that are outside the largest bin
bin_cts = np.array([np.sum(cts == bin_lab) for bin_lab in np.unique(cts)[:-1]])
std_bin = binned_statistic(radius.value[mask],
stddev_masked[mask] / 1.e3,
bins=rad_bins,
statistic=np.std)[0]
# Correct by number of independent samples
# std_bin /= np.sqrt(bin_cts / beam_pix)
med_bin_38, bin_edges, cts = binned_statistic(radius.value[mask_38],
stddev_masked_38[mask_38] / 1.e3,
bins=rad_bins,
statistic=np.mean)
bin_cts_38 = np.array([np.sum(cts == bin_lab)
for bin_lab in np.unique(cts)[:-1]])
std_bin_38 = binned_statistic(radius.value[mask_38],
stddev_masked_38[mask_38] / 1.e3,
bins=rad_bins,
statistic=np.std)[0]
# std_bin_38 /= np.sqrt(bin_cts_38 / beam_pix)
med_bin_95, bin_edges, cts = binned_statistic(radius.value[mask_95],
stddev_masked_95[mask_95] / 1.e3,
bins=rad_bins,
statistic=np.mean)
bin_cts_95 = np.array([np.sum(cts == bin_lab)
for bin_lab in np.unique(cts)[:-1]])
std_bin_95 = binned_statistic(radius.value[mask_95],
stddev_masked_95[mask_95] / 1.e3,
bins=rad_bins,
statistic=np.std)[0]
# std_bin_95 /= np.sqrt(bin_cts_95 / beam_pix)
bin_cents = (bin_edges[1:] + bin_edges[:-1]) / 2.
plt.axhline(0.2, color=col_pal[3], linestyle='--', linewidth=4, alpha=0.75)
plt.axhline(2.6, color=col_pal[5], linestyle=':', linewidth=4, alpha=0.75)
plt.errorbar(bin_cents, med_bin, fmt='o-', drawstyle='steps-mid',
yerr=std_bin, label='80 pc (19")')
plt.errorbar(bin_cents, med_bin_38, fmt='D-', drawstyle='steps-mid',
yerr=std_bin_38, label='160 pc (38")')
plt.errorbar(bin_cents, med_bin_95, fmt='^-', drawstyle='steps-mid',
yerr=std_bin_95, label='380 pc (95")')
plt.legend(frameon=True)
plt.grid()
plt.ylabel("Standard deviation of\n peak velocity (km/s)")
plt.xlabel("Radius (kpc)")
plt.tight_layout()
plt.savefig(allfigs_path('HI_properties/peakvel_stddevfilter_radprofile.pdf'))
plt.savefig(allfigs_path('HI_properties/peakvel_stddevfilter_radprofile.png'))
plt.close()
# Calculate the bin-averaged velocity dispersions from the radial profiles
pix_area = np.sum(radius < 7 * u.kpc)
med_bin_area_avg = (bin_cts * med_bin).sum() / pix_area
eight5_bin_area_avg = (bin_cts * (med_bin + std_bin)).sum() / pix_area
fifteen_bin_area_avg = (bin_cts * (med_bin - std_bin)).sum() / pix_area
med_bin_38_area_avg = (bin_cts * med_bin_38).sum() / pix_area
eight5_bin_38_area_avg = (bin_cts * (med_bin + std_bin_38)).sum() / pix_area
fifteen_bin_38_area_avg = (bin_cts * (med_bin - std_bin_38)).sum() / pix_area
med_bin_95_area_avg = (bin_cts * med_bin_95).sum() / pix_area
eight5_bin_95_area_avg = (bin_cts * (med_bin + std_bin_95)).sum() / pix_area
fifteen_bin_95_area_avg = (bin_cts * (med_bin - std_bin_95)).sum() / pix_area
print("Avg. broadening: {0}. Limits: {1}, {2}".format(med_bin_area_avg,
fifteen_bin_area_avg,
eight5_bin_area_avg))
print("38''Avg. broadening: {0}. Limits: {1}, {2}".format(med_bin_38_area_avg,
fifteen_bin_38_area_avg,
eight5_bin_38_area_avg))
print("95''Avg. broadening: {0}. Limits: {1}, {2}".format(med_bin_95_area_avg,
fifteen_bin_95_area_avg,
eight5_bin_95_area_avg))
# Can the line width increases at 95'' be entirely due to beam smearing?
hi_width_95 = 8.9
co_width_95 = 7.3
hi_width_38 = 8.0
co_width_38 = 6.0
hi_width_95_bc = np.sqrt(hi_width_95**2 - med_bin_95_area_avg**2)
hi_width_95_bc_err = (2 / hi_width_95_bc) * \
(hi_width_95 * 0.1 + med_bin_95_area_avg * 1.0)
co_width_95_bc = np.sqrt(co_width_95**2 - med_bin_95_area_avg**2)
co_width_95_bc_err = (2 / co_width_95_bc) * \
(co_width_95 * 1.3 + med_bin_95_area_avg * 1.0)
print("Beam smear-corr 95'' HI: {0}+/-{1}".format(hi_width_95_bc,
hi_width_95_bc_err))
print("Beam smear-corr 95'' CO: {0}+/-{1}".format(co_width_95_bc,
co_width_95_bc_err))
|
|
from django.contrib import admin
from django import forms
from django.forms.models import ModelForm, ModelChoiceField
from django.forms import ValidationError
from mptt.admin import MPTTModelAdmin, MPTTAdminForm
from mptt.fields import TreeNodeChoiceField
from pidman.admin import admin_site
from pidman.pid.ark_utils import normalize_ark, invalid_qualifier_characters
from pidman.pid.models import ExtSystem, Pid, Proxy, Target, Policy, Domain
class TargetInlineForm(ModelForm):
"""Base Target inline form for use in editing ARKs and PURLs."""
class Meta:
model = Target
fields = ('qualify', 'uri', 'proxy', 'active')
def clean_qualify(self):
# check for any characters not allowed in the qualifier
invalid_chars = invalid_qualifier_characters(self.cleaned_data["qualify"])
if invalid_chars:
raise ValidationError("Not permitted: " + ', '.join(invalid_chars))
# normalize according to how the ARK will be resolved
return normalize_ark(self.cleaned_data["qualify"])
class TargetInline(admin.TabularInline):
model = Target
fields = ('qualify', 'uri', 'proxy', 'active')
# no max, default number of extra fields
form = TargetInlineForm
can_delete = True # allow ARK target deletion (e.g., qualifiers)
# NOTE: should be possible to extend inline template here
# to display link status - last checked / message / etc
class PurlTargetInline(TargetInline):
verbose_name_plural = "Target"
max_num = 1
can_delete = False # do not allow PURL target deletion (only one target)
fields = ('uri', 'proxy', 'active')
class PidAdminForm(ModelForm):
domain = TreeNodeChoiceField(queryset=Domain.objects.all())
class Meta:
model = Pid
exclude = []
class PidAdmin(admin.ModelAdmin):
# browse display: type (ark/purl), domain/collection, name/description, and pid url (not target url)
# note: including pid for link to edit page, since name is optional and not always present
# including dates in list display for sorting purposes
# sort columns by: type, domain/collection, name, (pid url?), date created/modified ascending/descending
list_display = ('pid', 'truncated_name', 'type', 'created_at', 'updated_at',
'domain', "primary_target_uri", "is_active")
#, 'linkcheck_status')
# filters: collection/domain, creator/user, type (ark/purl), date ranges (created or modified)
list_filter = (
'type', ('domain', admin.RelatedOnlyFieldListFilter),
'ext_system',
('creator', admin.RelatedOnlyFieldListFilter),
'created_at', 'updated_at')
list_select_related = True # may want to limit this some
form = PidAdminForm
# now possible in django 1.1 - fields to use here?
# list_editable = ('name', 'domain')
date_hierarchy = 'created_at'
search_fields = ['name', 'pid', 'ext_system_key', 'target__uri']
# keep pid type in a separate fieldset in order to suppress it on edit
fieldset_pidtype = ('Pid Type', {
'fields': ('type',),
'description': "Select type of pid to create."
})
fieldset_pidinfo = ("Pid Information", {
'fields': ('name', 'domain', ('ext_system', 'ext_system_key'), 'policy')
})
fieldsets = (fieldset_pidtype, fieldset_pidinfo)
# by default, use purl target inline; if saved as an ark, will use TargetInline
inlines = [PurlTargetInline]
class Media:
css = {
"all": ("css/font-awesome.min.css",)
}
def get_inline_instances(self, request, obj=None):
# get target inline class based on the object
# when adding a new object, restrict targets to purl type until saved
# once the object is saved, display purl or ark target inline
# edit form based on pid type
inlines = list(self.inlines) # make a new copy of inline config
if obj is not None and obj.type == 'Ark':
inlines[0] = TargetInline
return [inline(self.model, self.admin_site) for inline in inlines]
# set creator and editor to current user before saving
def save_model(self, request, obj, form, change):
obj.editor = request.user
if not (change):
obj.creator = request.user
obj.save()
# disallow delete of Pids set targets to inactive instead
def has_delete_permission(self, request, obj=None):
return False
def get_queryset(self, request):
# extend queryset to prefetch targets & linkcheck status,
# which are used in the change list display
pidqs = super(PidAdmin, self).get_queryset(request)
pidqs = pidqs.prefetch_related('target_set', 'domain', 'creator')
# 'target_set__linkcheck__url')
return pidqs
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs['request']
formfield = super(PidAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'domain':
choices = getattr(request, '_domain_choices_cache', None)
if choices is None:
request._domain_choices_cache = choices = list(formfield.choices)
formfield.choices = choices
return formfield
class ExtSystemAdmin(admin.ModelAdmin):
list_display = ('name', 'key_field', 'updated_at')
class ProxyAdmin(admin.ModelAdmin):
list_display = ('name', 'transform', 'updated_at')
class PolicyAdmin(admin.ModelAdmin):
list_display = ('commitment', 'created_at')
# class DomainAdminForm(forms.ModelForm):
class DomainAdminForm(MPTTAdminForm):
# restrict list of domains allowed to be parents to those domains
# without a parent (1 level deep only)
parent = TreeNodeChoiceField(queryset=Domain.objects.filter(parent__isnull=True),
required=False)
class Meta:
model = Domain
exclude = []
def clean_parent(self):
parent = self.cleaned_data["parent"]
if parent:
# check parent id - cannot point to self
if parent.id == self.instance.id:
raise ValidationError("Not permitted: a domain can not be its own parent",
code='invalid')
# restrict hierarchy to one level
elif parent.parent:
raise ValidationError("Domain hierarchy restricted to depth of 1; " +
parent.name + " is a collection of " + parent.parent.name,
code='invalid')
return parent
def clean(self):
# policy is optional by default, but top-level domains must have one (can't inherit from parent)
if not self.cleaned_data.get('parent', None) and not self.cleaned_data['policy']:
raise ValidationError("Policy is required for top-level domains")
return self.cleaned_data
class CollectionInline(admin.TabularInline):
model = Domain
verbose_name = "Domain"
verbose_name_plural = "Collections"
# parent = TreeNodeChoiceField(queryset=Domain.objects.all(),
# level_indicator=u'+--')
class DomainAdmin(MPTTModelAdmin):
form = DomainAdminForm
mptt_level_indent = 20
list_display = ('name', 'num_pids', 'subdomain_count', 'show_policy')
inlines = [CollectionInline]
def get_queryset(self, request):
# extend queryset to prefetch policy and parent,
# which are used in the change list display
domains = super(DomainAdmin, self).get_queryset(request)
domains = domains.prefetch_related('policy', 'parent')
return domains
admin_site.register(Pid, PidAdmin)
admin_site.register(Proxy, ProxyAdmin)
admin_site.register(ExtSystem, ExtSystemAdmin)
admin_site.register(Policy, PolicyAdmin)
admin_site.register(Domain, DomainAdmin)
|
|
import json
from collections import OrderedDict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.rrule import rrule, MONTHLY
from django.http import JsonResponse
from django.utils.decorators import method_decorator
from django.views.generic.base import View
from corehq.apps.domain.decorators import login_and_domain_required
from corehq.apps.fixtures.models import FixtureDataItem
from corehq.apps.groups.models import Group
from corehq.apps.locations.models import SQLLocation
from custom.champ.sqldata import TargetsDataSource, UICFromEPMDataSource, UICFromCCDataSource, \
HivStatusDataSource, FormCompletionDataSource, FirstArtDataSource, LastVLTestDataSource, \
ChampFilter
from custom.champ.utils import PREVENTION_XMLNS, POST_TEST_XMLNS, ACCOMPAGNEMENT_XMLNS, \
SUIVI_MEDICAL_XMLNS, ENHANCED_PEER_MOBILIZATION, CHAMP_CAMEROON, TARGET_XMLNS
def get_user_ids_for_group(groups):
users = []
for group_id in groups:
group = Group.get(group_id)
users.extend(group.get_user_ids())
return users
def get_age_ranges(ages):
ranges = []
for age in ages:
if age != '50+ yrs' and age != '':
start_end = age.split(" ")[0].split("-")
ranges.append({'start': start_end[0], 'end': start_end[1]})
elif age == '50+ yrs':
ranges.append({'start': 50, 'end': 200})
return ranges
def update_date_property(config, post_data, property, filter_key):
value = post_data.get(property, '')
if value:
start_key = '%s_start' % filter_key
end_key = '%s_end' % filter_key
start, end = value.split(' - ')
config.update({
start_key: start,
end_key: end
})
class ChampView(View):
@property
def post_data(self):
return json.loads(self.request.body.decode('utf-8'))
def get_list_property(self, property):
value = self.post_data.get(property, [])
return [] if '' in value else value
@method_decorator([login_and_domain_required], name='dispatch')
class PrevisionVsAchievementsView(ChampView):
def get_target_data(self, domain):
config = {
'domain': domain,
'district': self.get_list_property('target_district'),
'cbo': self.get_list_property('target_cbo'),
'userpl': self.get_list_property('target_userpl'),
'fiscal_year': self.post_data.get('target_fiscal_year', None)
}
clienttype = self.get_list_property('target_clienttype')
for idx, type in enumerate(clienttype):
if type == 'client_fsw':
type = 'cfsw'
clienttype[idx] = type.lower()
config.update({'clienttype': clienttype})
target_data = list(TargetsDataSource(config=config).data.values())[0]
return target_data
def get_kp_prev_achievement(self, domain):
config = {
'domain': domain,
'age': get_age_ranges(self.get_list_property('kp_prev_age')),
'district': self.get_list_property('kp_prev_district'),
'activity_type': self.post_data.get('kp_prev_activity_type', None),
'type_visit': self.post_data.get('kp_prev_visit_type', None),
'client_type': self.get_list_property('kp_prev_client_type'),
'user_id': get_user_ids_for_group(self.get_list_property('kp_prev_user_group')),
'want_hiv_test': self.post_data.get('kp_prev_want_hiv_test', None),
}
update_date_property(config, self.post_data, 'kp_prev_visit_date', 'visit_date')
achievement = UICFromEPMDataSource(config=config).data
return achievement.get(PREVENTION_XMLNS, {}).get('uic', 0)
def get_htc_tst_achievement(self, domain):
config = {
'domain': domain,
'age_range': self.get_list_property('htc_tst_age_range'),
'district': self.get_list_property('htc_tst_district'),
'client_type': self.get_list_property('htc_tst_client_type'),
'user_id': get_user_ids_for_group(self.get_list_property('htc_tst_user_group')),
}
update_date_property(config, self.post_data, 'htc_tst_post_date', 'posttest_date')
update_date_property(config, self.post_data, 'htc_tst_hiv_test_date', 'hiv_test_date')
achievement = UICFromCCDataSource(config=config).data
return achievement.get(POST_TEST_XMLNS, {}).get('uic', 0)
def get_htc_pos_achievement(self, domain):
config = {
'domain': domain,
'age_range': self.get_list_property('htc_pos_age_range'),
'district': self.get_list_property('htc_pos_district'),
'client_type': self.get_list_property('htc_pos_client_type'),
'user_id': get_user_ids_for_group(self.get_list_property('htc_pos_user_group')),
}
update_date_property(config, self.post_data, 'htc_pos_post_date', 'posttest_date')
update_date_property(config, self.post_data, 'htc_pos_hiv_test_date', 'hiv_test_date')
achievement = HivStatusDataSource(config=config).data
return achievement.get(POST_TEST_XMLNS, {}).get('uic', 0)
def get_care_new_achivement(self, domain):
config = {
'domain': domain,
'hiv_status': self.get_list_property('care_new_hiv_status'),
'client_type': self.get_list_property('care_new_client_type'),
'age_range': self.get_list_property('care_new_age_range'),
'district': self.get_list_property('care_new_district'),
'user_id': get_user_ids_for_group(self.get_list_property('care_new_user_group')),
}
update_date_property(config, self.post_data, 'care_new_date_handshake', 'date_handshake')
achievement = FormCompletionDataSource(config=config).data
return achievement.get(ACCOMPAGNEMENT_XMLNS, {}).get('uic', 0)
def get_tx_new_achivement(self, domain):
config = {
'domain': domain,
'hiv_status': self.get_list_property('tx_new_hiv_status'),
'client_type': self.get_list_property('tx_new_client_type'),
'age_range': self.get_list_property('tx_new_age_range'),
'district': self.get_list_property('tx_new_district'),
'user_id': get_user_ids_for_group(self.get_list_property('tx_new_user_group')),
}
update_date_property(config, self.post_data, 'tx_new_first_art_date', 'first_art_date')
achievement = FirstArtDataSource(config=config).data
return achievement.get(SUIVI_MEDICAL_XMLNS, {}).get('uic', 0)
def get_tx_undetect_achivement(self, domain):
config = {
'domain': domain,
'hiv_status': self.get_list_property('tx_undetect_hiv_status'),
'client_type': self.get_list_property('tx_undetect_client_type'),
'age_range': self.get_list_property('tx_undetect_age_range'),
'district': self.get_list_property('tx_undetect_district'),
'undetect_vl': self.post_data.get('tx_undetect_undetect_vl', None),
'user_id': get_user_ids_for_group(self.get_list_property('tx_undetect_user_group')),
}
update_date_property(config, self.post_data, 'tx_undetect_date_last_vl_test', 'date_last_vl_test')
achievement = LastVLTestDataSource(config=config).data
return achievement.get(SUIVI_MEDICAL_XMLNS, {}).get('uic', 0)
def generate_data(self, domain):
targets = self.get_target_data(domain)
return {
'chart': [
{
'key': 'Target',
'color': 'blue',
'values': [
{'x': 'KP_PREV', 'y': (targets.get('target_kp_prev', 0) or 0)},
{'x': 'HTC_TST', 'y': (targets.get('target_htc_tst', 0) or 0)},
{'x': 'HTC_POS', 'y': (targets.get('target_htc_pos', 0) or 0)},
{'x': 'CARE_NEW', 'y': (targets.get('target_care_new', 0) or 0)},
{'x': 'TX_NEW', 'y': (targets.get('target_tx_new', 0) or 0)},
{'x': 'TX_UNDETECT', 'y': (targets.get('target_tx_undetect', 0) or 0)}
]
},
{
'key': 'Achievements',
'color': 'orange',
'values': [
{'x': 'KP_PREV', 'y': self.get_kp_prev_achievement(domain)},
{'x': 'HTC_TST', 'y': self.get_htc_tst_achievement(domain)},
{'x': 'HTC_POS', 'y': self.get_htc_pos_achievement(domain)},
{'x': 'CARE_NEW', 'y': self.get_care_new_achivement(domain)},
{'x': 'TX_NEW', 'y': self.get_tx_new_achivement(domain)},
{'x': 'TX_UNDETECT', 'y': self.get_tx_undetect_achivement(domain)}
]
}
]
}
def post(self, request, *args, **kwargs):
domain = self.kwargs['domain']
return JsonResponse(data=self.generate_data(domain))
@method_decorator([login_and_domain_required], name='dispatch')
class PrevisionVsAchievementsTableView(ChampView):
def generate_data(self, domain):
config = {
'domain': domain,
'district': self.get_list_property('district'),
'cbo': self.get_list_property('cbo'),
'type_visit': self.post_data.get('visit_type', None),
'activity_type': self.post_data.get('activity_type', None),
'client_type': self.get_list_property('client_type'),
'user_id': get_user_ids_for_group(self.get_list_property('organization')),
'fiscal_year': self.post_data.get('fiscal_year', None),
}
update_date_property(config, self.post_data, 'visit_date', 'visit_date')
update_date_property(config, self.post_data, 'posttest_date', 'posttest_date')
update_date_property(config, self.post_data, 'first_art_date', 'first_art_date')
update_date_property(config, self.post_data, 'date_handshake', 'date_handshake')
update_date_property(config, self.post_data, 'date_last_vl_test', 'date_last_vl_test')
target_client_types = []
for client_type in config['client_type']:
if client_type == 'client_fsw':
client_type = 'cfsw'
target_client_types.append(client_type.lower())
config.update({'clienttype': target_client_types})
targets = list(TargetsDataSource(config=config.copy()).data.values())[0]
kp_prev = UICFromEPMDataSource(config=config.copy()).data
htc_tst = UICFromCCDataSource(config=config.copy()).data
htc_pos = HivStatusDataSource(config=config.copy()).data
care_new = FormCompletionDataSource(config=config.copy()).data
tx_new = FirstArtDataSource(config=config.copy()).data
tz_undetect = LastVLTestDataSource(config=config).data
return {
'target_kp_prev': (targets.get('target_kp_prev', 0) or 0),
'target_htc_tst': (targets.get('target_htc_tst', 0) or 0),
'target_htc_pos': (targets.get('target_htc_pos', 0) or 0),
'target_care_new': (targets.get('target_care_new', 0) or 0),
'target_tx_new': (targets.get('target_tx_new', 0) or 0),
'target_tx_undetect': (targets.get('target_tx_undetect', 0) or 0),
'kp_prev': (kp_prev.get(PREVENTION_XMLNS, {}).get('uic', 0) or 0),
'htc_tst': (htc_tst.get(POST_TEST_XMLNS, {}).get('uic', 0) or 0),
'htc_pos': (htc_pos.get(POST_TEST_XMLNS, {}).get('uic', 0) or 0),
'care_new': (care_new.get(ACCOMPAGNEMENT_XMLNS, {}).get('uic', 0) or 0),
'tx_new': (tx_new.get(SUIVI_MEDICAL_XMLNS, {}).get('uic', 0) or 0),
'tx_undetect': (tz_undetect.get(SUIVI_MEDICAL_XMLNS, {}).get('uic', 0) or 0),
}
def post(self, request, *args, **kwargs):
domain = self.kwargs['domain']
return JsonResponse(data=self.generate_data(domain))
@method_decorator([login_and_domain_required], name='dispatch')
class ServiceUptakeView(ChampView):
def generate_data(self, domain):
month_start = self.post_data.get('month_start', 1)
year_start = self.post_data.get('year_start', datetime.now().year)
month_end = self.post_data.get('month_end', datetime.now().month)
year_end = self.post_data.get('year_end', datetime.now().year)
start_date = datetime(year_start, month_start, 1)
end_date = (datetime(year_end, month_end, 1) + relativedelta(months=1)) - relativedelta(days=1)
config = {
'domain': domain,
'district': self.get_list_property('district'),
'type_visit': self.post_data.get('visit_type', None),
'activity_type': self.post_data.get('activity_type', None),
'client_type': self.get_list_property('client_type'),
'user_id': get_user_ids_for_group(self.get_list_property('organization')),
'visit_date_start': start_date,
'visit_date_end': end_date,
'posttest_date_start': start_date,
'posttest_date_end': end_date,
'date_handshake_start': start_date,
'date_handshake_end': end_date,
}
kp_prev = UICFromEPMDataSource(config=config.copy(), replace_group_by='kp_prev_month').data
htc_tst = UICFromCCDataSource(config=config.copy(), replace_group_by='htc_month').data
htc_pos = HivStatusDataSource(config=config.copy(), replace_group_by='htc_month').data
care_new = FormCompletionDataSource(config=config, replace_group_by='care_new_month').data
htc_uptake_chart_data = OrderedDict()
htc_yield_chart_data = OrderedDict()
link_chart_data = OrderedDict()
rrule_dates = [
rrule_date for rrule_date in rrule(
MONTHLY,
dtstart=start_date,
until=end_date
)
]
tickValues = []
for rrule_dt in rrule_dates:
date_in_milliseconds = int(rrule_dt.date().strftime("%s")) * 1000
tickValues.append(date_in_milliseconds)
htc_uptake_chart_data.update({date_in_milliseconds: 0})
htc_yield_chart_data.update({date_in_milliseconds: 0})
link_chart_data.update({date_in_milliseconds: 0})
for row in htc_tst.values():
date = row['htc_month']
date_in_milliseconds = int(date.strftime("%s")) * 1000
nom = (row['uic'] or 0)
denom = (kp_prev[date]['uic'] or 1) if date in kp_prev else 1
htc_uptake_chart_data[date_in_milliseconds] = nom / denom
for row in htc_pos.values():
date = row['htc_month']
date_in_milliseconds = int(date.strftime("%s")) * 1000
nom = (row['uic'] or 0)
denom = (htc_tst[date]['uic'] or 1) if date in htc_tst else 1
htc_yield_chart_data[date_in_milliseconds] = nom / denom
for row in care_new.values():
date = row['care_new_month']
date_in_milliseconds = int(date.strftime("%s")) * 1000
nom = (row['uic'] or 0)
denom = (htc_pos[date]['uic'] or 1) if date in htc_pos else 1
link_chart_data[date_in_milliseconds] = nom / denom
return {
'chart': [
{
"values": [
{'x': key, 'y': value} for key, value in htc_uptake_chart_data.items()
],
"key": "HTC_uptake",
"strokeWidth": 2,
"classed": "dashed",
"color": "blue"
},
{
"values": [
{'x': key, 'y': value} for key, value in htc_yield_chart_data.items()
],
"key": "HTC_yield",
"strokeWidth": 2,
"classed": "dashed",
"color": "orange"
},
{
"values": [
{'x': key, 'y': value} for key, value in link_chart_data.items()
],
"key": "Link to care",
"strokeWidth": 2,
"classed": "dashed",
"color": "gray"
}
],
'tickValues': tickValues
}
def post(self, request, *args, **kwargs):
domain = self.kwargs['domain']
return JsonResponse(data=self.generate_data(domain))
@method_decorator([login_and_domain_required], name='dispatch')
class ChampFilterView(View):
xmlns = None
table_name = None
column_name = None
def get(self, request, *args, **kwargs):
domain = self.kwargs['domain']
return JsonResponse(data={
'options': ChampFilter(domain, self.xmlns, self.table_name, self.column_name).data
})
class PreventionPropertiesFilter(ChampFilterView):
xmlns = PREVENTION_XMLNS
table_name = ENHANCED_PEER_MOBILIZATION
class PostTestFilter(ChampFilterView):
xmlns = POST_TEST_XMLNS
table_name = CHAMP_CAMEROON
class TargetFilter(ChampFilterView):
xmlns = TARGET_XMLNS
table_name = ENHANCED_PEER_MOBILIZATION
class DistrictFilterPrevView(PreventionPropertiesFilter):
column_name = 'district'
class CBOFilterView(TargetFilter):
column_name = 'cbo'
class UserPLFilterView(TargetFilter):
column_name = 'userpl'
class UserGroupsFilter(View):
def get(self, request, *args, **kwargs):
domain = self.kwargs['domain']
groups = Group.by_domain(domain)
options = [{'id': '', 'text': 'All'}]
return JsonResponse(data={
'options': options + [{'id': group.get_id, 'text': group.name} for group in groups]
})
class OrganizationsFilter(View):
def get(self, request, *args, **kwargs):
domain = self.kwargs['domain']
locations = SQLLocation.objects.filter(domain=domain).exclude(location_type__code='dic')
options = [{'id': '', 'value': 'All'}]
return JsonResponse(data={
'options': options + [{'id': loc.location_id, 'value': loc.name} for loc in locations]
})
class HierarchyFilter(View):
def get(self, request, *args, **kwargs):
domain = self.kwargs['domain']
districts = FixtureDataItem.get_item_list(domain, 'district')
cbos = FixtureDataItem.get_item_list(domain, 'cbo')
clienttypes = FixtureDataItem.get_item_list(domain, 'clienttype')
userpls = FixtureDataItem.get_item_list(domain, 'userpl')
def to_filter_format(data, parent_key=None):
locations = [dict(
id='',
text='All'
)]
for row in data:
loc_id = row.fields['id'].field_list[0].field_value
loc = dict(
id=loc_id,
text=loc_id
)
if parent_key:
parent_id = row.fields[parent_key].field_list[0].field_value
loc.update({'parent_id': parent_id})
locations.append(loc)
return locations
hierarchy = {
'districts': to_filter_format(districts),
'cbos': to_filter_format(cbos, 'district_id'),
'clienttypes': to_filter_format(clienttypes, 'cbo_id'),
'userpls': to_filter_format(userpls, 'clienttype_id')
}
return JsonResponse(data=hierarchy)
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import time
import traceback
import inspect
import shutil
import os
import sqlalchemy as sa
import tempfile
from buildbot.process import metrics
from twisted.internet import reactor, threads
from twisted.python import threadpool, log
# set this to True for *very* verbose query debugging output; this can
# be monkey-patched from master.cfg, too:
# from buildbot.db import pool
# pool.debug = True
debug = False
_debug_id = 1
def timed_do_fn(f):
"""Decorate a do function to log before, after, and elapsed time,
with the name of the calling function. This is not speedy!"""
def wrap(callable, *args, **kwargs):
global _debug_id
# get a description of the function that called us
st = traceback.extract_stack(limit=2)
file, line, name, _ = st[0]
# and its locals
frame = inspect.currentframe(1)
locals = frame.f_locals
# invent a unique ID for the description
id, _debug_id = _debug_id, _debug_id+1
descr = "%s-%08x" % (name, id)
start_time = time.time()
log.msg("%s - before ('%s' line %d)" % (descr, file, line))
for name in locals:
if name in ('self', 'thd'):
continue
log.msg("%s - %s = %r" % (descr, name, locals[name]))
# wrap the callable to log the begin and end of the actual thread
# function
def callable_wrap(*args, **kargs):
log.msg("%s - thd start" % (descr,))
try:
return callable(*args, **kwargs)
finally:
log.msg("%s - thd end" % (descr,))
d = f(callable_wrap, *args, **kwargs)
def after(x):
end_time = time.time()
elapsed = (end_time - start_time) * 1000
log.msg("%s - after (%0.2f ms elapsed)" % (descr, elapsed))
return x
d.addBoth(after)
return d
wrap.__name__ = f.__name__
wrap.__doc__ = f.__doc__
return wrap
class DBThreadPool(threadpool.ThreadPool):
running = False
# Some versions of SQLite incorrectly cache metadata about which tables are
# and are not present on a per-connection basis. This cache can be flushed
# by querying the sqlite_master table. We currently assume all versions of
# SQLite have this bug, although it has only been observed in 3.4.2. A
# dynamic check for this bug would be more appropriate. This is documented
# in bug #1810.
__broken_sqlite = None
def __init__(self, engine, verbose=False):
# verbose is used by upgrade scripts, and if it is set we should print
# messages about versions and other warnings
log_msg = log.msg
if verbose:
def log_msg(m):
print m
pool_size = 5
# If the engine has an C{optimal_thread_pool_size} attribute, then the
# maxthreads of the thread pool will be set to that value. This is
# most useful for SQLite in-memory connections, where exactly one
# connection (and thus thread) should be used.
if hasattr(engine, 'optimal_thread_pool_size'):
pool_size = engine.optimal_thread_pool_size
threadpool.ThreadPool.__init__(self,
minthreads=1,
maxthreads=pool_size,
name='DBThreadPool')
self.engine = engine
if engine.dialect.name == 'sqlite':
vers = self.get_sqlite_version()
if vers < (3,7):
log_msg("Using SQLite Version %s" % (vers,))
log_msg("NOTE: this old version of SQLite does not support "
"WAL journal mode; a busy master may encounter "
"'Database is locked' errors. Consider upgrading.")
if vers < (3,4):
log_msg("NOTE: this old version of SQLite is not "
"supported.")
raise RuntimeError("unsupported SQLite version")
if self.__broken_sqlite is None:
self.__class__.__broken_sqlite = self.detect_bug1810()
brkn = self.__broken_sqlite
if brkn:
log_msg("Applying SQLite workaround from Buildbot bug #1810")
self._start_evt = reactor.callWhenRunning(self._start)
# patch the do methods to do verbose logging if necessary
if debug:
self.do = timed_do_fn(self.do)
self.do_with_engine = timed_do_fn(self.do_with_engine)
def _start(self):
self._start_evt = None
if not self.running:
self.start()
self._stop_evt = reactor.addSystemEventTrigger(
'during', 'shutdown', self._stop)
self.running = True
def _stop(self):
self._stop_evt = None
self.stop()
self.engine.dispose()
self.running = False
def shutdown(self):
"""Manually stop the pool. This is only necessary from tests, as the
pool will stop itself when the reactor stops under normal
circumstances."""
if not self._stop_evt:
return # pool is already stopped
reactor.removeSystemEventTrigger(self._stop_evt)
self._stop()
# Try about 170 times over the space of a day, with the last few tries
# being about an hour apart. This is designed to span a reasonable amount
# of time for repairing a broken database server, while still failing
# actual problematic queries eventually
BACKOFF_START = 1.0
BACKOFF_MULT = 1.05
MAX_OPERATIONALERROR_TIME = 3600*24 # one day
def __thd(self, with_engine, callable, args, kwargs):
# try to call callable(arg, *args, **kwargs) repeatedly until no
# OperationalErrors occur, where arg is either the engine (with_engine)
# or a connection (not with_engine)
backoff = self.BACKOFF_START
start = time.time()
while True:
if with_engine:
arg = self.engine
else:
arg = self.engine.contextual_connect()
if self.__broken_sqlite: # see bug #1810
arg.execute("select * from sqlite_master")
try:
try:
rv = callable(arg, *args, **kwargs)
assert not isinstance(rv, sa.engine.ResultProxy), \
"do not return ResultProxy objects!"
except sa.exc.OperationalError, e:
text = e.orig.args[0]
if not isinstance(text, basestring):
raise
if "Lost connection" in text \
or "database is locked" in text:
# see if we've retried too much
elapsed = time.time() - start
if elapsed > self.MAX_OPERATIONALERROR_TIME:
raise
metrics.MetricCountEvent.log(
"DBThreadPool.retry-on-OperationalError")
log.msg("automatically retrying query after "
"OperationalError (%ss sleep)" % backoff)
# sleep (remember, we're in a thread..)
time.sleep(backoff)
backoff *= self.BACKOFF_MULT
# and re-try
continue
else:
raise
finally:
if not with_engine:
arg.close()
break
return rv
def do(self, callable, *args, **kwargs):
return threads.deferToThreadPool(reactor, self,
self.__thd, False, callable, args, kwargs)
def do_with_engine(self, callable, *args, **kwargs):
return threads.deferToThreadPool(reactor, self,
self.__thd, True, callable, args, kwargs)
def detect_bug1810(self):
# detect buggy SQLite implementations; call only for a known-sqlite
# dialect
try:
import pysqlite2.dbapi2 as sqlite
sqlite = sqlite
except ImportError:
import sqlite3 as sqlite
tmpdir = tempfile.mkdtemp()
dbfile = os.path.join(tmpdir, "detect_bug1810.db")
def test(select_from_sqlite_master=False):
conn1 = None
conn2 = None
try:
conn1 = sqlite.connect(dbfile)
curs1 = conn1.cursor()
curs1.execute("PRAGMA table_info('foo')")
conn2 = sqlite.connect(dbfile)
curs2 = conn2.cursor()
curs2.execute("CREATE TABLE foo ( a integer )")
if select_from_sqlite_master:
curs1.execute("SELECT * from sqlite_master")
curs1.execute("SELECT * from foo")
finally:
if conn1:
conn1.close()
if conn2:
conn2.close()
os.unlink(dbfile)
try:
test()
except sqlite.OperationalError:
# this is the expected error indicating it's broken
shutil.rmtree(tmpdir)
return True
# but this version should not fail..
test(select_from_sqlite_master=True)
shutil.rmtree(tmpdir)
return False # not broken - no workaround required
def get_sqlite_version(self):
engine = sa.create_engine('sqlite://')
conn = engine.contextual_connect()
try:
r = conn.execute("SELECT sqlite_version()")
vers_row = r.fetchone()
r.close()
except:
return (0,)
if vers_row:
try:
return tuple(map(int, vers_row[0].split('.')))
except (TypeError, ValueError):
return (0,)
else:
return (0,)
|
|
"""Support for Minut Point."""
import asyncio
import logging
from httpx import ConnectTimeout
from pypoint import PointSession
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import webhook
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_TOKEN,
CONF_WEBHOOK_ID,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from homeassistant.util.dt import as_local, parse_datetime, utc_from_timestamp
from . import config_flow
from .const import (
CONF_WEBHOOK_URL,
DOMAIN,
EVENT_RECEIVED,
POINT_DISCOVERY_NEW,
SCAN_INTERVAL,
SIGNAL_UPDATE_ENTITY,
SIGNAL_WEBHOOK,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG_ENTRY_LOCK = "point_config_entry_lock"
CONFIG_ENTRY_IS_SETUP = "point_config_entry_is_setup"
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Minut Point component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
config_flow.register_flow_implementation(
hass, DOMAIN, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET]
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Point from a config entry."""
async def token_saver(token, **kwargs):
_LOGGER.debug("Saving updated token %s", token)
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_TOKEN: token}
)
session = PointSession(
async_get_clientsession(hass),
entry.data["refresh_args"][CONF_CLIENT_ID],
entry.data["refresh_args"][CONF_CLIENT_SECRET],
token=entry.data[CONF_TOKEN],
token_saver=token_saver,
)
try:
await session.ensure_active_token()
except ConnectTimeout as err:
_LOGGER.debug("Connection Timeout")
raise ConfigEntryNotReady from err
except Exception: # pylint: disable=broad-except
_LOGGER.error("Authentication Error")
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
await async_setup_webhook(hass, entry, session)
client = MinutPointClient(hass, entry, session)
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: client})
hass.async_create_task(client.update())
return True
async def async_setup_webhook(hass: HomeAssistant, entry: ConfigEntry, session):
"""Set up a webhook to handle binary sensor events."""
if CONF_WEBHOOK_ID not in entry.data:
webhook_id = webhook.async_generate_id()
webhook_url = webhook.async_generate_url(hass, webhook_id)
_LOGGER.info("Registering new webhook at: %s", webhook_url)
hass.config_entries.async_update_entry(
entry,
data={
**entry.data,
CONF_WEBHOOK_ID: webhook_id,
CONF_WEBHOOK_URL: webhook_url,
},
)
await session.update_webhook(
entry.data[CONF_WEBHOOK_URL],
entry.data[CONF_WEBHOOK_ID],
["*"],
)
webhook.async_register(
hass, DOMAIN, "Point", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
webhook.async_unregister(hass, entry.data[CONF_WEBHOOK_ID])
session = hass.data[DOMAIN].pop(entry.entry_id)
await session.remove_webhook()
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
try:
data = await request.json()
_LOGGER.debug("Webhook %s: %s", webhook_id, data)
except ValueError:
return None
if isinstance(data, dict):
data["webhook_id"] = webhook_id
async_dispatcher_send(hass, SIGNAL_WEBHOOK, data, data.get("hook_id"))
hass.bus.async_fire(EVENT_RECEIVED, data)
class MinutPointClient:
"""Get the latest data and update the states."""
def __init__(
self, hass: HomeAssistant, config_entry: ConfigEntry, session: PointSession
) -> None:
"""Initialize the Minut data object."""
self._known_devices: set[str] = set()
self._known_homes: set[str] = set()
self._hass = hass
self._config_entry = config_entry
self._is_available = True
self._client = session
async_track_time_interval(self._hass, self.update, SCAN_INTERVAL)
async def update(self, *args):
"""Periodically poll the cloud for current state."""
await self._sync()
async def _sync(self):
"""Update local list of devices."""
if not await self._client.update():
self._is_available = False
_LOGGER.warning("Device is unavailable")
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
return
async def new_device(device_id, platform):
"""Load new device."""
config_entries_key = f"{platform}.{DOMAIN}"
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if config_entries_key not in self._hass.data[CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, platform
)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async_dispatcher_send(
self._hass, POINT_DISCOVERY_NEW.format(platform, DOMAIN), device_id
)
self._is_available = True
for home_id in self._client.homes:
if home_id not in self._known_homes:
await new_device(home_id, "alarm_control_panel")
self._known_homes.add(home_id)
for device in self._client.devices:
if device.device_id not in self._known_devices:
for platform in PLATFORMS:
await new_device(device.device_id, platform)
self._known_devices.add(device.device_id)
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
if not self._is_available:
return False
return device_id in self._client.device_ids
async def remove_webhook(self):
"""Remove the session webhook."""
return await self._client.remove_webhook()
@property
def homes(self):
"""Return known homes."""
return self._client.homes
async def async_alarm_disarm(self, home_id):
"""Send alarm disarm command."""
return await self._client.alarm_disarm(home_id)
async def async_alarm_arm(self, home_id):
"""Send alarm arm command."""
return await self._client.alarm_arm(home_id)
class MinutPointEntity(Entity):
"""Base Entity used by the sensors."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the entity."""
self._async_unsub_dispatcher_connect = None
self._client = point_client
self._id = device_id
self._name = self.device.name
self._device_class = device_class
self._updated = utc_from_timestamp(0)
self._value = None
def __str__(self):
"""Return string representation of device."""
return f"MinutPoint {self.name}"
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug("Created device %s", self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback
)
await self._update_callback()
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def _update_callback(self):
"""Update the value of the sensor."""
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def extra_state_attributes(self):
"""Return status of device."""
attrs = self.device.device_status
attrs["last_heard_from"] = as_local(self.last_update).strftime(
"%Y-%m-%d %H:%M:%S"
)
return attrs
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
device = self.device.device
return DeviceInfo(
connections={
(device_registry.CONNECTION_NETWORK_MAC, device["device_mac"])
},
identifiers={(DOMAIN, device["device_id"])},
manufacturer="Minut",
model=f"Point v{device['hardware_version']}",
name=device["description"],
sw_version=device["firmware"]["installed"],
via_device=(DOMAIN, device["home"]),
)
@property
def name(self):
"""Return the display name of this device."""
return f"{self._name} {self.device_class.capitalize()}"
@property
def is_updated(self):
"""Return true if sensor have been updated."""
return self.last_update > self._updated
@property
def last_update(self):
"""Return the last_update time for the device."""
last_update = parse_datetime(self.device.last_update)
return last_update
@property
def should_poll(self):
"""No polling needed for point."""
return False
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return f"point.{self._id}-{self.device_class}"
@property
def value(self):
"""Return the sensor value."""
return self._value
|
|
from __future__ import print_function, division
from .str import StrPrinter
from sympy.utilities import default_sort_key
class LambdaPrinter(StrPrinter):
"""
This printer converts expressions into strings that can be used by
lambdify.
"""
def _print_MatrixBase(self, expr):
return "%s(%s)" % (expr.__class__.__name__,
self._print((expr.tolist())))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_And(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' and ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Or(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' or ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Not(self, expr):
result = ['(', 'not (', self._print(expr.args[0]), '))']
return ''.join(result)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_ITE(self, expr):
result = [
'((', self._print(expr.args[1]),
') if (', self._print(expr.args[0]),
') else (', self._print(expr.args[2]), '))'
]
return ''.join(result)
class TensorflowPrinter(LambdaPrinter):
"""
Tensorflow printer which handles vectorized piecewise functions,
logical operators, etc.
"""
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
def _print_Piecewise(self, expr, **kwargs):
from sympy import Piecewise
e, cond = expr.args[0].args
if len(expr.args) == 1:
return 'select({0}, {1}, {2})'.format(
self._print(cond, **kwargs),
self._print(e, **kwargs),
0)
return 'select({0}, {1}, {2})'.format(
self._print(cond, **kwargs),
self._print(e, **kwargs),
self._print(Piecewise(*expr.args[1:]), **kwargs))
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=op[expr.rel_op],
lhs=lhs,
rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
class NumPyPrinter(LambdaPrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_default_settings = {
"order": "none",
"full_prec": "auto",
}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "dot(%s, %s)" % (self._print(arg1), self._print(arg2))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return 'select({0}, {1}, default=nan)'.format(conds, exprs)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=op[expr.rel_op],
lhs=lhs,
rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr):
return '{0}(({1}))'.format('amin', ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{0}(({1}))'.format('amax', ','.join(self._print(i) for i in expr.args))
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
class NumExprPrinter(LambdaPrinter):
# key, value pairs correspond to sympy name and numexpr name
# functions not appearing in this dict will raise a TypeError
_numexpr_functions = {
'sin' : 'sin',
'cos' : 'cos',
'tan' : 'tan',
'asin': 'arcsin',
'acos': 'arccos',
'atan': 'arctan',
'atan2' : 'arctan2',
'sinh' : 'sinh',
'cosh' : 'cosh',
'tanh' : 'tanh',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'ln' : 'log',
'log': 'log',
'exp': 'exp',
'sqrt' : 'sqrt',
'Abs' : 'abs',
'conjugate' : 'conj',
'im' : 'imag',
're' : 'real',
'where' : 'where',
'complex' : 'complex',
'contains' : 'contains',
}
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_seq(self, seq, delimiter=', '):
# simplified _print_seq taken from pretty.py
s = [self._print(item) for item in seq]
if s:
return delimiter.join(s)
else:
return ""
def _print_Function(self, e):
func_name = e.func.__name__
nstr = self._numexpr_functions.get(func_name, None)
if nstr is None:
# check for implemented_function
if hasattr(e, '_imp_'):
return "(%s)" % self._print(e._imp_(*e.args))
else:
raise TypeError("numexpr does not support function '%s'" %
func_name)
return "%s(%s)" % (nstr, self._print_seq(e.args))
def blacklisted(self, expr):
raise TypeError("numexpr cannot be used with %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def doprint(self, expr):
lstr = super(NumExprPrinter, self).doprint(expr)
return "evaluate('%s', truediv=True)" % lstr
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet V1."""
import collections
import functools
from absl import logging
from flax.deprecated import nn
import jax.numpy as jnp
import ml_collections
from gift.data import dataset_utils
from gift.models import base_model
from gift.nn import dann_utils
from gift.utils import tensor_util
class ResidualBlock(nn.Module):
"""ResNet block."""
def apply(self,
x,
filters,
strides=(1, 1),
dropout_rate=0.0,
epsilon=1e-5,
momentum=0.9,
norm_layer='batch_norm',
train=True,
dtype=jnp.float32):
# TODO(samirabnar): Make 4 a parameter.
needs_projection = x.shape[-1] != filters * 4 or strides != (1, 1)
norm_layer_name = ''
if norm_layer == 'batch_norm':
norm_layer = nn.BatchNorm.partial(
use_running_average=not train,
momentum=momentum,
epsilon=epsilon,
dtype=dtype)
norm_layer_name = 'bn'
elif norm_layer == 'group_norm':
norm_layer = nn.GroupNorm.partial(num_groups=16, dtype=dtype)
norm_layer_name = 'gn'
conv = nn.Conv.partial(bias=False, dtype=dtype)
residual = x
if needs_projection:
residual = conv(residual, filters * 4, (1, 1), strides, name='proj_conv')
residual = norm_layer(residual, name=f'proj_{norm_layer_name}')
y = conv(x, filters, (1, 1), name='conv1')
y = norm_layer(y, name=f'{norm_layer_name}1')
y = nn.relu(y)
y = conv(y, filters, (3, 3), strides, name='conv2')
y = norm_layer(y, name=f'{norm_layer_name}2')
y = nn.relu(y)
if dropout_rate > 0.0:
y = nn.dropout(y, dropout_rate, deterministic=not train)
y = conv(y, filters * 4, (1, 1), name='conv3')
y = norm_layer(
y, name=f'{norm_layer_name}3', scale_init=nn.initializers.zeros)
y = nn.relu(residual + y)
return y
class ResNet(base_model.BaseModel):
"""ResNetV1."""
# A dictionary mapping the number of layers in a resnet to the number of
# blocks in each stage of the model.
_block_size_options = {
1: [1],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
def apply(self,
inputs,
num_outputs,
num_filters=64,
num_layers=50,
dropout_rate=0.0,
input_dropout_rate=0.0,
train=True,
dtype=jnp.float32,
head_bias_init=jnp.zeros,
return_activations=False,
input_layer_key='input',
has_discriminator=False,
discriminator=False):
"""Apply a ResNet network on the input.
Args:
inputs: jnp array; Inputs.
num_outputs: int; Number of output units.
num_filters: int; Determines base number of filters. Number of filters in
block i is num_filters * 2 ** i.
num_layers: int; Number of layers (should be one of the predefined ones.)
dropout_rate: float; Rate of dropping out the output of different hidden
layers.
input_dropout_rate: float; Rate of dropping out the input units.
train: bool; Is train?
dtype: jnp type; Type of the outputs.
head_bias_init: fn(rng_key, shape)--> jnp array; Initializer for head bias
parameters.
return_activations: bool; If True hidden activation are also returned.
input_layer_key: str; Determines where to plugin the input (this is to
enable providing inputs to slices of the model). If `input_layer_key` is
`layer_i` we assume the inputs are the activations of `layer_i` and pass
them to `layer_{i+1}`.
has_discriminator: bool; Whether the model should have discriminator
layer.
discriminator: bool; Whether we should return discriminator logits.
Returns:
Unnormalized Logits with shape `[bs, num_outputs]`,
if return_activations:
Logits, dict of hidden activations and the key to the representation(s)
which will be used in as ``The Representation'', e.g., for computing
losses.
"""
if num_layers not in ResNet._block_size_options:
raise ValueError('Please provide a valid number of layers')
block_sizes = ResNet._block_size_options[num_layers]
layer_activations = collections.OrderedDict()
input_is_set = False
current_rep_key = 'input'
if input_layer_key == current_rep_key:
x = inputs
input_is_set = True
if input_is_set:
# Input dropout
x = nn.dropout(x, input_dropout_rate, deterministic=not train)
layer_activations[current_rep_key] = x
rep_key = current_rep_key
current_rep_key = 'init_conv'
if input_layer_key == current_rep_key:
x = inputs
input_is_set = True
layer_activations[current_rep_key] = x
rep_key = current_rep_key
elif input_is_set:
# First block
x = nn.Conv(
x,
num_filters, (7, 7), (2, 2),
padding=[(3, 3), (3, 3)],
bias=False,
dtype=dtype,
name='init_conv')
x = nn.BatchNorm(
x,
use_running_average=not train,
momentum=0.9,
epsilon=1e-5,
dtype=dtype,
name='init_bn')
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
layer_activations[current_rep_key] = x
rep_key = current_rep_key
# Residual blocks
for i, block_size in enumerate(block_sizes):
# Stage i (each stage contains blocks of the same size).
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
current_rep_key = f'block_{i + 1}+{j}'
if input_layer_key == current_rep_key:
x = inputs
input_is_set = True
layer_activations[current_rep_key] = x
rep_key = current_rep_key
elif input_is_set:
x = ResidualBlock(
x,
num_filters * 2**i,
strides=strides,
dropout_rate=dropout_rate,
train=train,
dtype=dtype,
name=f'block_{i + 1}_{j}')
layer_activations[current_rep_key] = x
rep_key = current_rep_key
current_rep_key = 'avg_pool'
if input_layer_key == current_rep_key:
x = inputs
input_is_set = True
layer_activations[current_rep_key] = x
rep_key = current_rep_key
elif input_is_set:
# Global Average Pool
x = jnp.mean(x, axis=(1, 2))
layer_activations[current_rep_key] = x
rep_key = current_rep_key
# DANN module
if has_discriminator:
z = dann_utils.flip_grad_identity(x)
z = nn.Dense(z, 2, name='disc_l1', bias=True)
z = nn.relu(z)
z = nn.Dense(z, 2, name='disc_l2', bias=True)
current_rep_key = 'head'
if input_layer_key == current_rep_key:
x = inputs
layer_activations[current_rep_key] = x
rep_key = current_rep_key
logging.warn('Input was never used')
elif input_is_set:
x = nn.Dense(
x, num_outputs, dtype=dtype, bias_init=head_bias_init, name='head')
# Make sure that the output is float32, even if our previous computations
# are in float16, or other types.
x = jnp.asarray(x, jnp.float32)
outputs = x
if return_activations:
outputs = (x, layer_activations, rep_key)
if discriminator and has_discriminator:
outputs = outputs + (z,)
else:
del layer_activations
if discriminator and has_discriminator:
outputs = (x, z)
if discriminator and (not has_discriminator):
raise ValueError(
'Incosistent values passed for discriminator and has_discriminator')
return outputs
@classmethod
def build_flax_module(cls, hparams=None, dataset_metadata=None):
"""Build flax module (partially build by passing the hparams).
API use to initialize a flax Model:
```
model_def = model_cls.build_flax_module(hparams)
_, initial_params = model_def.init_by_shape(
rng, [((device_batch_size,)+dataset.meta_data['input_shape'][1:],
jnp.float32)])
model = nn.Model(model_def, initial_params)
```
Args:
hparams: ConfigDict; contains the hyperparams of the model architecture.
dataset_metadata: dict; if hparams is None, dataset_meta data should be
passed to provide the output_dim for the default hparam set.
Returns:
partially build class and hparams.
"""
hparams = super(ResNet, cls).build_flax_module(hparams, dataset_metadata)
model_dtype = dataset_utils.DATA_TYPE[hparams.get('model_dtype_str',
'float32')].jax_dtype
return cls.partial(
num_outputs=hparams.output_dim,
num_filters=hparams.num_filters,
num_layers=hparams.num_layers,
dropout_rate=hparams.dropout_rate,
input_dropout_rate=hparams.input_dropout_rate,
head_bias_init=functools.partial(
tensor_util.constant_initializer,
fill_value=jnp.float32(hparams.get('head_bias_init', 0.0))),
dtype=model_dtype,
has_discriminator=hparams.get('has_discriminator', False)), hparams
@classmethod
def default_flax_module_hparams(cls, dataset_metadata):
"""Default hparams for the flax module that is built in `build_flax_module`.
This function in particular serves the testing functions and supposed to
provide hparams tha are passed to the flax_module when it's build in
`build_flax_module` function, e.g., `model_dtype_str`.
Args:
dataset_metadata: dict; Passed to provide output dim.
Returns:
default hparams.
"""
return ml_collections.ConfigDict(
dict(
output_dim=dataset_metadata['num_classes'],
num_filters=32,
num_layers=1,
dropout_rate=0.1,
input_dropout_rate=0.0,
data_dtype_str='float32',
has_discriminator=False,
))
|
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base GCP client which uses the discovery API.
"""
# modifications (c7n)
# - flight recorder support
# - env creds sourcing
# - various minor bug fixes
# todo:
# - consider forking googleapiclient to get rid of httplib2
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import threading
import os
import socket
import ssl
from googleapiclient import discovery, errors # NOQA
from googleapiclient.http import set_user_agent
from google.auth.credentials import with_scopes_if_required
import google.oauth2.credentials
import google_auth_httplib2
import httplib2
from ratelimiter import RateLimiter
from retrying import retry
from six.moves import http_client
from six.moves.urllib.error import URLError
HTTPLIB_CA_BUNDLE = os.environ.get('HTTPLIB_CA_BUNDLE')
CLOUD_SCOPES = frozenset(['https://www.googleapis.com/auth/cloud-platform'])
# Per request max wait timeout.
HTTP_REQUEST_TIMEOUT = 30.0
# Per thread storage.
LOCAL_THREAD = threading.local()
log = logging.getLogger('c7n_gcp.client')
# Default value num_retries within HttpRequest execute method
NUM_HTTP_RETRIES = 5
RETRYABLE_EXCEPTIONS = (
http_client.ResponseNotReady,
http_client.IncompleteRead,
httplib2.ServerNotFoundError,
socket.error,
ssl.SSLError,
URLError, # include "no network connection"
)
class PaginationNotSupported(Exception):
"""Pagination not supported on this api."""
def is_retryable_exception(e):
"""Whether exception should be retried.
Args:
e (Exception): Exception object.
Returns:
bool: True for exceptions to retry. False otherwise.
"""
return isinstance(e, RETRYABLE_EXCEPTIONS)
@retry(retry_on_exception=is_retryable_exception,
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=5)
def _create_service_api(credentials, service_name, version, developer_key=None,
cache_discovery=False, http=None):
"""Builds and returns a cloud API service object.
Args:
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
service_name (str): The name of the API.
version (str): The version of the API to use.
developer_key (str): The api key to use to determine the project
associated with the API call, most API services do not require
this to be set.
cache_discovery (bool): Whether or not to cache the discovery doc.
Returns:
object: A Resource object with methods for interacting with the service.
"""
# The default logging of the discovery obj is very noisy in recent versions.
# Lower the default logging level of just this module to WARNING unless
# debug is enabled.
if log.getEffectiveLevel() > logging.DEBUG:
logging.getLogger(discovery.__name__).setLevel(logging.WARNING)
discovery_kwargs = {
'serviceName': service_name,
'version': version,
'developerKey': developer_key,
'cache_discovery': cache_discovery,
}
if http:
discovery_kwargs['http'] = http
else:
discovery_kwargs['credentials'] = credentials
return discovery.build(**discovery_kwargs)
def _build_http(http=None):
"""Construct an http client suitable for googleapiclient usage w/ user agent.
"""
if not http:
http = httplib2.Http(
timeout=HTTP_REQUEST_TIMEOUT, ca_certs=HTTPLIB_CA_BUNDLE)
user_agent = 'Python-httplib2/{} (gzip), {}/{}'.format(
httplib2.__version__,
'custodian-gcp',
'0.1')
return set_user_agent(http, user_agent)
class Session(object):
"""Base class for API repository for a specified Cloud API."""
def __init__(self,
credentials=None,
quota_max_calls=None,
quota_period=None,
use_rate_limiter=False,
http=None,
project_id=None,
**kwargs):
"""Constructor.
Args:
api_name (str): The API name to wrap. More details here:
https://developers.google.com/api-client-library/python/apis/
versions (list): A list of version strings to initialize.
credentials (object): GoogleCredentials.
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
**kwargs (dict): Additional args such as version.
"""
self._use_cached_http = False
if not credentials:
# Only share the http object when using the default credentials.
self._use_cached_http = True
credentials, _ = google.auth.default()
self._credentials = with_scopes_if_required(credentials, list(CLOUD_SCOPES))
if use_rate_limiter:
self._rate_limiter = RateLimiter(max_calls=quota_max_calls,
period=quota_period)
else:
self._rate_limiter = None
self._http = http
self.project_id = project_id
def __repr__(self):
"""The object representation.
Returns:
str: The object representation.
"""
return '<gcp-session: http=%s>' % (self._http,)
def get_default_project(self):
if self.project_id:
return self.project_id
for k in ('GOOGLE_PROJECT', 'GCLOUD_PROJECT',
'GOOGLE_CLOUD_PROJECT', 'CLOUDSDK_CORE_PROJECT'):
if k in os.environ:
return os.environ[k]
raise ValueError("No GCP Project ID set - set CLOUDSDK_CORE_PROJECT")
def get_default_region(self):
for k in ('GOOGLE_REGION', 'GCLOUD_REGION', 'CLOUDSDK_COMPUTE_REGION'):
if k in os.environ:
return os.environ[k]
def get_default_zone(self):
for k in ('GOOGLE_ZONE', 'GCLOUD_ZONE', 'CLOUDSDK_COMPUTE_ZONE'):
if k in os.environ:
return os.environ[k]
def client(self, service_name, version, component, **kw):
"""Safely initialize a repository class to a property.
Args:
repository_class (class): The class to initialize.
version (str): The gcp service version for the repository.
Returns:
object: An instance of repository_class.
"""
service = _create_service_api(
self._credentials,
service_name,
version,
kw.get('developer_key'),
kw.get('cache_discovery', False),
self._http)
return ServiceClient(
gcp_service=service,
component=component,
credentials=self._credentials,
rate_limiter=self._rate_limiter,
use_cached_http=self._use_cached_http,
http=self._http)
# pylint: disable=too-many-instance-attributes, too-many-arguments
class ServiceClient(object):
"""Base class for GCP APIs."""
def __init__(self, gcp_service, credentials, component=None,
num_retries=NUM_HTTP_RETRIES, key_field='project',
entity_field=None, list_key_field=None, get_key_field=None,
max_results_field='maxResults', search_query_field='query',
rate_limiter=None, use_cached_http=True, http=None):
"""Constructor.
Args:
gcp_service (object): A Resource object with methods for interacting
with the service.
credentials (OAuth2Credentials): A Credentials object
component (str): The subcomponent of the gcp service for this
repository instance. E.g. 'instances' for compute.instances().*
APIs
num_retries (int): The number of http retriable errors to retry on
before hard failing.
key_field (str): The field name representing the project to
query in the API.
entity_field (str): The API entity returned generally by the .get()
api. E.g. 'instance' for compute.instances().get()
list_key_field (str): Optional override of key field for calls to
list methods.
get_key_field (str): Optional override of key field for calls to
get methods.
max_results_field (str): The field name that represents the maximum
number of results to return in one page.
search_query_field (str): The field name used to filter search
results.
rate_limiter (object): A RateLimiter object to manage API quota.
use_cached_http (bool): If set to true, calls to the API will use
a thread local shared http object. When false a new http object
is used for each request.
"""
self.gcp_service = gcp_service
self._credentials = credentials
self._component = None
if component:
component_api = gcp_service
for c in component.split('.'):
component_api = getattr(component_api, c)()
self._component = component_api
self._entity_field = entity_field
self._num_retries = num_retries
if list_key_field:
self._list_key_field = list_key_field
else:
self._list_key_field = key_field
if get_key_field:
self._get_key_field = get_key_field
else:
self._get_key_field = key_field
self._max_results_field = max_results_field
self._search_query_field = search_query_field
self._rate_limiter = rate_limiter
self._use_cached_http = use_cached_http
self._local = LOCAL_THREAD
self._http_replay = http
@property
def http(self):
"""A thread local instance of httplib2.Http.
Returns:
httplib2.Http: An Http instance authorized by the credentials.
"""
if self._use_cached_http and hasattr(self._local, 'http'):
return self._local.http
if self._http_replay is not None:
# httplib2 instance is not thread safe
http = self._http_replay
else:
http = _build_http()
authorized_http = google_auth_httplib2.AuthorizedHttp(
self._credentials, http=http)
if self._use_cached_http:
self._local.http = authorized_http
return authorized_http
def get_http(self):
"""Return an http instance sans credentials"""
if self._http_replay:
return self._http_replay
return _build_http()
def _build_request(self, verb, verb_arguments):
"""Builds HttpRequest object.
Args:
verb (str): Request verb (ex. insert, update, delete).
verb_arguments (dict): Arguments to be passed with the request.
Returns:
httplib2.HttpRequest: HttpRequest to be sent to the API.
"""
method = getattr(self._component, verb)
# Python insists that keys in **kwargs be strings (not variables).
# Since we initially build our kwargs as a dictionary where one of the
# keys is a variable (target), we need to convert keys to strings,
# even though the variable in question is of type str.
method_args = {str(k): v for k, v in verb_arguments.items()}
return method(**method_args)
def _build_next_request(self, verb, prior_request, prior_response):
"""Builds pagination-aware request object.
More details:
https://developers.google.com/api-client-library/python/guide/pagination
Args:
verb (str): Request verb (ex. insert, update, delete).
prior_request (httplib2.HttpRequest): Request that may trigger
paging.
prior_response (dict): Potentially partial response.
Returns:
httplib2.HttpRequest: HttpRequest or None. None is returned when
there is nothing more to fetch - request completed.
"""
method = getattr(self._component, verb + '_next')
return method(prior_request, prior_response)
def supports_pagination(self, verb):
"""Determines if the API action supports pagination.
Args:
verb (str): Request verb (ex. insert, update, delete).
Returns:
bool: True when API supports pagination, False otherwise.
"""
return getattr(self._component, verb + '_next', None)
def execute_command(self, verb, verb_arguments):
"""Executes command (ex. add) via a dedicated http object.
Async APIs may take minutes to complete. Therefore, callers are
encouraged to leverage concurrent.futures (or similar) to place long
running commands on a separate threads.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _build_request.
Returns:
dict: An async operation Service Response.
"""
request = self._build_request(verb, verb_arguments)
return self._execute(request)
def execute_paged_query(self, verb, verb_arguments):
"""Executes query (ex. list) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _BuildRequest.
Yields:
dict: Service Response.
Raises:
PaginationNotSupportedError: When an API does not support paging.
"""
if not self.supports_pagination(verb=verb):
raise PaginationNotSupported('{} does not support pagination')
request = self._build_request(verb, verb_arguments)
number_of_pages_processed = 0
while request is not None:
response = self._execute(request)
number_of_pages_processed += 1
log.debug('Executing paged request #%s', number_of_pages_processed)
request = self._build_next_request(verb, request, response)
yield response
def execute_search_query(self, verb, verb_arguments):
"""Executes query (ex. search) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. search).
verb_arguments (dict): key-value pairs to be passed to _BuildRequest.
Yields:
dict: Service Response.
"""
# Implementation of search does not follow the standard API pattern.
# Fields need to be in the body rather than sent seperately.
next_page_token = None
number_of_pages_processed = 0
while True:
req_body = verb_arguments.get('body', dict())
if next_page_token:
req_body['pageToken'] = next_page_token
request = self._build_request(verb, verb_arguments)
response = self._execute(request)
number_of_pages_processed += 1
log.debug('Executing paged request #%s', number_of_pages_processed)
next_page_token = response.get('nextPageToken')
yield response
if not next_page_token:
break
def execute_query(self, verb, verb_arguments):
"""Executes query (ex. get) via a dedicated http object.
Args:
verb (str): Method to execute on the component (ex. get, list).
verb_arguments (dict): key-value pairs to be passed to _BuildRequest.
Returns:
dict: Service Response.
"""
request = self._build_request(verb, verb_arguments)
return self._execute(request)
@retry(retry_on_exception=is_retryable_exception,
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=5)
def _execute(self, request):
"""Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
"""
if self._rate_limiter:
# Since the ratelimiter library only exposes a context manager
# interface the code has to be duplicated to handle the case where
# no rate limiter is defined.
with self._rate_limiter:
return request.execute(http=self.http,
num_retries=self._num_retries)
return request.execute(http=self.http,
num_retries=self._num_retries)
|
|
#
# Client connect to an Electrum server.
#
# Runtime check for optional modules
from importlib import util as importutil
import json, warnings, asyncio, ssl
from .protocol import StratumProtocol
# Check if aiosocks is present, and load it if it is.
if importutil.find_spec("aiosocks") is not None:
import aiosocks
have_aiosocks = True
else:
have_aiosocks = False
from collections import defaultdict
from .exc import ElectrumErrorResponse
import logging
logger = logging.getLogger(__name__)
class StratumClient:
def __init__(self, loop=None):
'''
Setup state needed to handle req/resp from a single Stratum server.
Requires a transport (TransportABC) object to do the communication.
'''
self.protocol = None
self.next_id = 1
self.inflight = {}
self.subscriptions = defaultdict(list)
self.actual_connection = {}
self.ka_task = None
self.loop = loop or asyncio.get_event_loop()
self.reconnect = None # call connect() first
# next step: call connect()
def connection_lost(self, protocol):
# Ignore connection_lost for old connections
if protocol is not self.protocol:
return
self.protocol = None
logger.warn("Electrum server connection lost")
# cleanup keep alive task
if self.ka_task:
self.ka_task.cancel()
self.ka_task = None
def close(self):
if self.protocol:
self.protocol.close()
self.protocol = None
if self.ka_task:
self.ka_task.cancel()
self.ka_task = None
async def connect(self, server_info, proto_code=None, *,
use_tor=False, disable_cert_verify=False,
proxy=None, short_term=False):
'''
Start connection process.
Destination must be specified in a ServerInfo() record (first arg).
'''
self.server_info = server_info
if not proto_code:
proto_code,*_ = server_info.protocols
self.proto_code = proto_code
logger.debug("Connecting to: %r" % server_info)
if proto_code == 'g': # websocket
# to do this, we'll need a websockets implementation that
# operates more like a asyncio.Transport
# maybe: `asyncws` or `aiohttp`
raise NotImplementedError('sorry no WebSocket transport yet')
hostname, port, use_ssl = server_info.get_port(proto_code)
if use_tor:
if have_aiosocks:
# Connect via Tor proxy proxy, assumed to be on localhost:9050
# unless a tuple is given with another host/port combo.
try:
socks_host, socks_port = use_tor
except TypeError:
socks_host, socks_port = 'localhost', 9050
# basically no-one has .onion SSL certificates, and
# pointless anyway.
disable_cert_verify = True
assert not proxy, "Sorry not yet supporting proxy->tor->dest"
logger.debug(" .. using TOR")
proxy = aiosocks.Socks5Addr(socks_host, int(socks_port))
else:
logger.debug("Error: want to use tor, but no aiosocks module.")
if use_ssl == True and disable_cert_verify:
# Create a more liberal SSL context that won't
# object to self-signed certicates. This is
# very bad on public Internet, but probably ok
# over Tor
use_ssl = ssl.create_default_context()
use_ssl.check_hostname = False
use_ssl.verify_mode = ssl.CERT_NONE
logger.debug(" .. SSL cert check disabled")
async def _reconnect():
if self.protocol: return # race/duplicate work
if proxy:
if have_aiosocks:
transport, protocol = await aiosocks.create_connection(
StratumProtocol, proxy=proxy,
proxy_auth=None,
remote_resolve=True, ssl=use_ssl,
dst=(hostname, port))
else:
logger.debug("Error: want to use proxy, but no aiosocks module.")
else:
transport, protocol = await self.loop.create_connection(
StratumProtocol, host=hostname,
port=port, ssl=use_ssl)
self.protocol = protocol
protocol.client = self
# capture actual values used
self.actual_connection = dict(hostname=hostname, port=int(port),
ssl=bool(use_ssl), tor=bool(proxy))
self.actual_connection['ip_addr'] = transport.get_extra_info('peername',
default=['unknown'])[0]
if not short_term:
self.ka_task = self.loop.create_task(self._keepalive())
logger.debug("Connected to: %r" % server_info)
# close whatever we had
if self.protocol:
logger.warn("connect called when already connected, closing previous connection")
self.protocol.close()
self.protocol = None
self.reconnect = _reconnect
await self.reconnect()
async def _keepalive(self):
'''
Keep our connect to server alive forever, with some
pointless traffic.
'''
while self.protocol:
vers = await self.RPC('server.version')
logger.debug("Server version: " + repr(vers))
# Five minutes isn't really enough anymore; looks like
# servers are killing 2-minute old idle connections now.
# But decreasing interval this seems rude.
await asyncio.sleep(600)
def _send_request(self, method, params=[], is_subscribe = False):
'''
Send a new request to the server. Serialized the JSON and
tracks id numbers and optional callbacks.
'''
# pick a new ID
self.next_id += 1
req_id = self.next_id
# serialize as JSON
msg = {'id': req_id, 'method': method, 'params': params}
# subscriptions are a Q, normal requests are a future
if is_subscribe:
waitQ = asyncio.Queue()
self.subscriptions[method].append(waitQ)
fut = asyncio.Future(loop=self.loop)
self.inflight[req_id] = (msg, fut)
# send it via the transport, which serializes it
if not self.protocol:
logger.debug("Need to reconnect to server")
async def connect_first():
await self.reconnect()
self.protocol.send_data(msg)
self.loop.create_task(connect_first())
else:
# typical case, send request immediatedly, response is a future
self.protocol.send_data(msg)
return fut if not is_subscribe else (fut, waitQ)
def _got_response(self, msg):
'''
Decode and dispatch responses from the server.
Has already been unframed and deserialized into an object.
'''
#logger.debug("MSG: %r" % msg)
resp_id = msg.get('id', None)
if resp_id is None:
# subscription traffic comes with method set, but no req id.
method = msg.get('method', None)
if not method:
logger.error("Incoming server message had no ID nor method in it", msg)
return
# not obvious, but result is on params, not result, for subscriptions
result = msg.get('params', None)
logger.debug("Traffic on subscription: %s" % method)
subs = self.subscriptions.get(method)
for q in subs:
self.loop.create_task(q.put(result))
return
assert 'method' not in msg
result = msg.get('result')
# fetch and forget about the request
inf = self.inflight.pop(resp_id)
if not inf:
logger.error("Incoming server message had unknown ID in it: %s" % resp_id)
return
# it's a future which is done now
req, rv = inf
if 'error' in msg:
err = msg['error']
logger.info("Error response: '%s'" % err)
rv.set_exception(ElectrumErrorResponse(err, req))
else:
rv.set_result(result)
def RPC(self, method, *params):
'''
Perform a remote command.
Expects a method name, which look like:
blockchain.address.get_balance
.. and sometimes take arguments, all of which are positional.
Returns a future which will you should await for
the result from the server. Failures are returned as exceptions.
'''
assert '.' in method
#assert not method.endswith('subscribe')
return self._send_request(method, params)
def subscribe(self, method, *params):
'''
Perform a remote command which will stream events/data to us.
Expects a method name, which look like:
server.peers.subscribe
.. and sometimes take arguments, all of which are positional.
Returns a tuple: (Future, asyncio.Queue).
The future will have the result of the initial
call, and the queue will receive additional
responses as they happen.
'''
assert '.' in method
assert method.endswith('subscribe')
return self._send_request(method, params, is_subscribe=True)
if __name__ == '__main__':
from transport import SocketTransport
from svr_info import KnownServers, ServerInfo
logging.basicConfig(format="%(asctime)-11s %(message)s", datefmt="[%d/%m/%Y-%H:%M:%S]")
loop = asyncio.get_event_loop()
loop.set_debug(True)
proto_code = 's'
if 0:
ks = KnownServers()
ks.from_json('servers.json')
which = ks.select(proto_code, is_onion=True, min_prune=1000)[0]
else:
which = ServerInfo({
"seen_at": 1465686119.022801,
"ports": "t s",
"nickname": "dunp",
"pruning_limit": 10000,
"version": "1.0",
"hostname": "erbium1.sytes.net" })
c = StratumClient(loop=loop)
loop.run_until_complete(c.connect(which, proto_code, disable_cert_verify=True, use_tor=True))
rv = loop.run_until_complete(c.RPC('server.peers.subscribe'))
print("DONE!: this server has %d peers" % len(rv))
loop.close()
#c.blockchain.address.get_balance(23)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations(object):
"""ServiceEndpointPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicy"]
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicy"]
"""Updates service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyListResult"]
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyListResult"]
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
|
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains base implementation of both spines and leafs
"""
from db import *
from dm_utils import DMUtils
from ansible_conf import AnsibleConf
from ansible_conf import JunosInterface
from abstract_device_api.abstract_device_xsd import *
import abc
class AnsibleRoleCommon(AnsibleConf):
@classmethod
def is_role_supported(cls, role):
if role and role.lower().startswith('e2-'):
return False
for _role in cls._roles or []:
if role.lower().startswith(_role.lower()):
return True
return False
# end is_role_supported
def __init__(self, logger, params={}):
super(AnsibleRoleCommon, self).__init__(logger, params)
# end __init__
def is_gateway(self):
if self.physical_router.routing_bridging_roles:
gateway_roles = [r for r in self.physical_router.routing_bridging_roles if 'Gateway' in r]
if gateway_roles:
return True
return False
# end is_spine
def underlay_config(self, is_delete=False):
self._logger.info("underlay config start: %s(%s)\n" %
(self.physical_router.name,
self.physical_router.uuid))
if not is_delete:
self.build_underlay_bgp()
self.send_conf(is_delete=is_delete, retry=False)
self._logger.info("underlay config end: %s(%s)\n" %
(self.physical_router.name,
self.physical_router.uuid))
# end underlay_config
def initialize(self):
super(AnsibleRoleCommon, self).initialize()
self.irb_interfaces = []
self.internal_vn_ris = []
# end initialize
def attach_irb(self, ri_conf, ri):
if not self.is_gateway():
return
is_l2 = ri_conf.get("is_l2", False)
is_l2_l3 = ri_conf.get("is_l2_l3", False)
vni = ri_conf.get("vni", None)
network_id = ri_conf.get("network_id", None)
if (is_l2 and vni is not None and
self.is_family_configured(self.bgp_params, "e-vpn")):
if is_l2_l3:
self.irb_interfaces.append("irb." + str(network_id))
# end attach_irb
def set_internal_vn_irb_config(self):
if self.internal_vn_ris and self.irb_interfaces:
for int_ri in self.internal_vn_ris:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(int_ri.name)
lr = LogicalRouterDM.get(lr_uuid)
if not lr:
continue
vn_list = lr.get_connected_networks(include_internal=False)
for vn in vn_list:
vn_obj = VirtualNetworkDM.get(vn)
irb_name = "irb." + str(vn_obj.vn_network_id)
if irb_name in self.irb_interfaces:
int_ri.add_routing_interfaces(LogicalInterface(name=irb_name))
# end set_internal_vn_irb_config
def add_irb_config(self, ri_conf):
vn = ri_conf.get("vn")
is_l2_l3 = ri_conf.get("is_l2_l3", False)
gateways = ri_conf.get("gateways", [])
network_id = ri_conf.get("network_id", None)
self.interfaces_config = self.interfaces_config or []
irb_intf = PhysicalInterface(name='irb', interface_type='irb')
self.interfaces_config.append(irb_intf)
self._logger.info("Vn=" + vn.name + ", IRB: " + str(gateways) + ", pr="
+ self.physical_router.name)
if gateways is not None:
intf_unit = LogicalInterface(
name='irb.' + str(network_id), unit=network_id,
comment=DMUtils.vn_irb_comment(vn, False, is_l2_l3))
irb_intf.add_logical_interfaces(intf_unit)
for (irb_ip, gateway) in gateways:
intf_unit.add_ip_list(irb_ip)
if len(gateway) and gateway != '0.0.0.0':
intf_unit.set_gateway(gateway)
# end add_irb_config
# lo0 interface in RI for route lookup to happen for Inter VN traffic
# qfx10k pfe limitation
def add_bogus_lo0(self, ri, network_id, vn):
self.interfaces_config = self.interfaces_config or []
ifl_num = 1000 + int(network_id)
lo_intf = PhysicalInterface(name="lo0", interface_type='loopback')
self.interfaces_config.append(lo_intf)
intf_unit = LogicalInterface(
name="lo0." + str(ifl_num), unit=ifl_num,
comment=DMUtils.l3_bogus_lo_intf_comment(vn))
intf_unit.add_ip_list("127.0.0.1")
lo_intf.add_logical_interfaces(intf_unit)
ri.add_loopback_interfaces(LogicalInterface(name="lo0." + str(ifl_num)))
# end add_bogus_lo0
def add_inet_public_vrf_filter(self, firewall_config, inet_type):
firewall_config.set_family(inet_type)
f = FirewallFilter(name=DMUtils.make_public_vrf_filter_name(inet_type))
f.set_comment(DMUtils.public_vrf_filter_comment())
firewall_config.add_firewall_filters(f)
term = Term(name="default-term", then=Then(accept_or_reject=True))
f.add_terms(term)
return f
# end add_inet_public_vrf_filter
def add_inet_filter_term(self, ri_name, prefixes, inet_type):
if inet_type == 'inet6':
prefixes = DMUtils.get_ipv6_prefixes(prefixes)
else:
prefixes = DMUtils.get_ipv4_prefixes(prefixes)
from_ = From()
for prefix in prefixes:
from_.add_destination_address(self.get_subnet_for_cidr(prefix))
then_ = Then()
then_.add_routing_instance(ri_name)
return Term(name=DMUtils.make_vrf_term_name(ri_name),
fromxx=from_, then=then_)
# end add_inet_filter_term
def add_routing_instance(self, ri_conf):
ri_name = ri_conf.get("ri_name")
vn = ri_conf.get("vn")
is_l2 = ri_conf.get("is_l2", False)
is_l2_l3 = ri_conf.get("is_l2_l3", False)
import_targets = ri_conf.get("import_targets", set())
export_targets = ri_conf.get("export_targets", set())
prefixes = ri_conf.get("prefixes", [])
gateways = ri_conf.get("gateways", [])
router_external = ri_conf.get("router_external", False)
interfaces = ri_conf.get("interfaces", [])
vni = ri_conf.get("vni", None)
fip_map = ri_conf.get("fip_map", None)
network_id = ri_conf.get("network_id", None)
is_internal_vn = True if '_contrail_lr_internal_vn_' in vn.name else False
encapsulation_priorities = \
ri_conf.get("encapsulation_priorities") or ["MPLSoGRE"]
self.ri_config = self.ri_config or []
ri = RoutingInstance(name=ri_name)
if vn:
is_nat = True if fip_map else False
ri.set_comment(DMUtils.vn_ri_comment(vn, is_l2, is_l2_l3, is_nat,
router_external))
self.ri_config.append(ri)
ri.set_virtual_network_id(str(network_id))
ri.set_vxlan_id(str(vni))
ri.set_virtual_network_is_internal(is_internal_vn)
ri.set_is_public_network(router_external)
if is_l2_l3:
ri.set_virtual_network_mode('l2-l3')
elif is_l2:
ri.set_virtual_network_mode('l2')
else:
ri.set_virtual_network_mode('l3')
has_ipv6_prefixes = DMUtils.has_ipv6_prefixes(prefixes)
has_ipv4_prefixes = DMUtils.has_ipv4_prefixes(prefixes)
if not is_l2:
ri.set_routing_instance_type("vrf")
if fip_map is None:
for interface in interfaces:
ri.add_interfaces(LogicalInterface(name=interface.name))
if prefixes:
for prefix in prefixes:
ri.add_static_routes(self.get_route_for_cidr(prefix))
ri.add_prefixes(self.get_subnet_for_cidr(prefix))
else:
if encapsulation_priorities[0] == "VXLAN":
ri.set_routing_instance_type("virtual-switch")
elif (any(x in encapsulation_priorities for x in ["MPLSoGRE", "MPLSoUDP"])):
ri.set_routing_instance_type("evpn")
if is_internal_vn:
self.internal_vn_ris.append(ri)
if is_internal_vn or router_external:
self.add_bogus_lo0(ri, network_id, vn)
if self.is_gateway() and is_l2_l3:
self.add_irb_config(ri_conf)
self.attach_irb(ri_conf, ri)
if fip_map is not None:
ri.add_interfaces(LogicalInterface(name=interfaces[0].name))
public_vrf_ips = {}
for pip in fip_map.values():
if pip["vrf_name"] not in public_vrf_ips:
public_vrf_ips[pip["vrf_name"]] = set()
public_vrf_ips[pip["vrf_name"]].add(pip["floating_ip"])
for public_vrf, fips in public_vrf_ips.items():
ri.add_interfaces(LogicalInterface(name=interfaces[1].name))
floating_ips = []
for fip in fips:
ri.add_static_routes(
Route(prefix=fip,
prefix_len=32,
next_hop=interfaces[1].name,
comment=DMUtils.fip_egress_comment()))
floating_ips.append(FloatingIpMap(floating_ip=fip + "/32"))
ri.add_floating_ip_list(FloatingIpList(
public_routing_instance=public_vrf,
floating_ips=floating_ips))
# add firewall config for public VRF
if router_external and is_l2 is False:
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
if has_ipv4_prefixes and not self.inet4_forwarding_filter:
# create single instance inet4 filter
self.inet4_forwarding_filter = self.add_inet_public_vrf_filter(
self.firewall_config, "inet")
if has_ipv6_prefixes and not self.inet6_forwarding_filter:
# create single instance inet6 filter
self.inet6_forwarding_filter = self.add_inet_public_vrf_filter(
self.firewall_config, "inet6")
if has_ipv4_prefixes:
# add terms to inet4 filter
term = self.add_inet_filter_term(ri_name, prefixes, "inet4")
# insert before the last term
terms = self.inet4_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet4_forwarding_filter.set_terms(terms)
if has_ipv6_prefixes:
# add terms to inet6 filter
term = self.add_inet_filter_term(ri_name, prefixes, "inet6")
# insert before the last term
terms = self.inet6_forwarding_filter.get_terms()
terms = [term] + (terms or [])
self.inet6_forwarding_filter.set_terms(terms)
if fip_map is not None:
self.firewall_config = self.firewall_config or Firewall(
comment=DMUtils.firewall_comment())
f = FirewallFilter(
name=DMUtils.make_private_vrf_filter_name(ri_name))
f.set_comment(DMUtils.vn_firewall_comment(vn, "private"))
self.firewall_config.add_firewall_filters(f)
term = Term(name=DMUtils.make_vrf_term_name(ri_name))
from_ = From()
for fip_user_ip in fip_map.keys():
from_.add_source_address(self.get_subnet_for_cidr(fip_user_ip))
term.set_from(from_)
term.set_then(Then(routing_instance=[ri_name]))
f.add_terms(term)
irb_intf = PhysicalInterface(name='irb', interface_type='irb')
self.interfaces_config.append(irb_intf)
intf_unit = LogicalInterface(
name="irb." + str(network_id), unit=network_id,
comment=DMUtils.vn_irb_fip_inet_comment(vn))
irb_intf.add_logical_interfaces(intf_unit)
intf_unit.set_family("inet")
intf_unit.add_firewall_filters(
DMUtils.make_private_vrf_filter_name(ri_name))
ri.add_routing_interfaces(intf_unit)
if gateways is not None:
for (ip, gateway) in gateways:
ri.add_gateways(GatewayRoute(
ip_address=self.get_subnet_for_cidr(ip),
gateway=self.get_subnet_for_cidr(gateway)))
# add L2 EVPN and BD config
if (is_l2 and vni is not None and
self.is_family_configured(self.bgp_params, "e-vpn")):
vlan = None
if encapsulation_priorities[0] == "VXLAN":
self.vlans_config = self.vlans_config or []
vlan = Vlan(name=DMUtils.make_bridge_name(vni), vxlan_id=vni)
vlan.set_comment(DMUtils.vn_bd_comment(vn, "VXLAN"))
self.vlans_config.append(vlan)
for interface in interfaces:
vlan.add_interfaces(LogicalInterface(name=interface.name))
if is_l2_l3:
# network_id is unique, hence irb
irb_intf = "irb." + str(network_id)
vlan.add_interfaces(LogicalInterface(name=irb_intf))
elif (any(x in encapsulation_priorities for x in ["MPLSoGRE", "MPLSoUDP"])):
self.init_evpn_config(encapsulation_priorities[1])
self.evpn.set_comment(
DMUtils.vn_evpn_comment(vn, encapsulation_priorities[1]))
for interface in interfaces:
self.evpn.add_interfaces(LogicalInterface(name=interface.name))
self.interfaces_config = self.interfaces_config or []
self.build_l2_evpn_interface_config(self.interfaces_config,
interfaces, vn, vlan)
if (not is_l2 and vni is not None and
self.is_family_configured(self.bgp_params, "e-vpn")):
self.init_evpn_config()
if not is_internal_vn:
# add vlans
self.add_ri_vlan_config(ri_name, vni)
if (not is_l2 and not is_l2_l3 and gateways):
self.interfaces_config = self.interfaces_config or []
ifl_num = 1000 + int(network_id)
lo_intf = PhysicalInterface(name="lo0", interface_type='loopback')
self.interfaces_config.append(lo_intf)
intf_unit = LogicalInterface(name="lo0." + str(ifl_num),
unit=ifl_num,
comment=DMUtils.l3_lo_intf_comment(vn))
lo_intf.add_logical_interfaces(intf_unit)
for (lo_ip, _) in gateways:
subnet = lo_ip
(ip, _) = lo_ip.split('/')
if ':' in lo_ip:
lo_ip = ip + '/' + '128'
else:
lo_ip = ip + '/' + '32'
intf_unit.add_ip_list(lo_ip)
ri.add_loopback_interfaces(LogicalInterface(
name="lo0." + str(ifl_num),
comment=DMUtils.lo0_ri_intf_comment(vn)))
# fip services config
if fip_map is not None:
nat_rules = NatRules(allow_overlapping_nat_pools=True,
name=DMUtils.make_services_set_name(ri_name),
comment=DMUtils.service_set_comment(vn))
ri.set_nat_rules(nat_rules)
snat_rule = NatRule(
name=DMUtils.make_snat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "SNAT"),
direction="input", translation_type="basic-nat44")
snat_rule.set_comment(DMUtils.snat_rule_comment())
nat_rules.add_rules(snat_rule)
dnat_rule = NatRule(
name=DMUtils.make_dnat_rule_name(ri_name),
comment=DMUtils.service_set_nat_rule_comment(vn, "DNAT"),
direction="output", translation_type="dnat-44")
dnat_rule.set_comment(DMUtils.dnat_rule_comment())
nat_rules.add_rules(dnat_rule)
nat_rules.set_inside_interface(interfaces[0].name)
nat_rules.set_outside_interface(interfaces[1].name)
for pip, fip_vn in fip_map.items():
fip = fip_vn["floating_ip"]
# private ip
snat_rule.add_source_addresses(self.get_subnet_for_cidr(pip))
# public ip
snat_rule.add_source_prefixes(self.get_subnet_for_cidr(fip))
# public ip
dnat_rule.add_destination_addresses(
self.get_subnet_for_cidr(fip))
# private ip
dnat_rule.add_destination_prefixes(
self.get_subnet_for_cidr(pip))
intf_unit = LogicalInterface(
name=interfaces[0].name,
unit=interfaces[0].unit,
comment=DMUtils.service_intf_comment("Ingress"))
intf_unit.set_family("inet")
ri.add_service_interfaces(intf_unit)
intf_unit = LogicalInterface(
name=interfaces[1].name,
unit=interfaces[1].unit,
comment=DMUtils.service_intf_comment("Egress"))
intf_unit.set_family("inet")
ri.add_service_interfaces(intf_unit)
for target in import_targets:
ri.add_import_targets(target)
for target in export_targets:
ri.add_export_targets(target)
# end add_routing_instance
def attach_acls(self, interface, unit):
pi_list = []
esi_map = self.get_ae_alloc_esi_map()
for esi, ae_id in self.physical_router.ae_id_map.items():
ae_name = "ae" + str(ae_id)
if_name, if_unit = interface.name.split('.')
if ae_name == if_name:
pi_list = esi_map.get(esi)
if pi_list:
self._logger.info("attach acls on AE intf:%s, link_member:%s, unit:%s" %
(ae_name, pi_list[0].name, if_unit))
li_name = pi_list[0].name + '.' + if_unit
break
if not pi_list and not interface.li_uuid:
return
interface = LogicalInterfaceDM.find_by_name_or_uuid(interface.li_uuid)
if not interface:
interface = LogicalInterfaceDM.find_by_name_or_uuid(li_name)
if not interface:
return
sg_list = interface.get_attached_sgs()
filter_list = []
for sg in sg_list:
flist = self.get_configured_filters(sg)
filter_list += flist
if filter_list:
for fname in filter_list:
unit.add_firewall_filters(fname)
# end attach_acls
def build_l2_evpn_interface_config(self, interfaces_config, interfaces, vn,
vlan_conf):
ifd_map = {}
for interface in interfaces:
ifd_map.setdefault(interface.ifd_name, []).append(interface)
for ifd_name, interface_list in ifd_map.items():
intf = PhysicalInterface(name=ifd_name)
interfaces_config.append(intf)
if interface_list[0].is_untagged():
if len(interface_list) > 1:
self._logger.error(
"invalid logical interfaces config for ifd %s" % (
ifd_name))
continue
unit_name = ifd_name + "." + str(interface_list[0].unit)
unit = LogicalInterface(
name=unit_name,
unit=interface_list[0].unit,
comment=DMUtils.l2_evpn_intf_unit_comment(vn, False),
is_tagged=False,
vlan_tag="4094")
# attach acls
self.attach_acls(interface_list[0], unit)
intf.add_logical_interfaces(unit)
if vlan_conf:
vlan_conf.add_interfaces(LogicalInterface(name=unit_name))
else:
for interface in interface_list:
unit_name = ifd_name + "." + str(interface.unit)
unit = LogicalInterface(
name=unit_name,
unit=interface.unit,
comment=DMUtils.l2_evpn_intf_unit_comment(
vn, True, interface.vlan_tag),
is_tagged=True,
vlan_tag=str(interface.vlan_tag))
# attach acls
self.attach_acls(interface, unit)
intf.add_logical_interfaces(unit)
if vlan_conf:
vlan_conf.add_interfaces(LogicalInterface(
name=unit_name))
# end build_l2_evpn_interface_config
def init_evpn_config(self, encapsulation='vxlan'):
if not self.ri_config:
# no vn config then no need to configure evpn
return
if self.evpn:
# evpn init done
return
self.evpn = Evpn(encapsulation=encapsulation)
# end init_evpn_config
def add_vlan_config(self, vrf_name, vni, is_l2_l3=False, irb_intf=None):
self.vlans_config = self.vlans_config or []
vlan = Vlan(name=vrf_name[1:], vxlan_id=vni)
if is_l2_l3:
if not irb_intf:
self._logger.error("Missing irb interface config l3 vlan: %s" % vrf_name)
else:
vlan.set_vlan_id(vni)
vlan.add_interfaces(LogicalInterface(name=irb_intf))
self.vlans_config.append(vlan)
return vlan
# end add_vlan_config
def add_ri_vlan_config(self, vrf_name, vni):
self.vlans_config = self.vlans_config or []
self.vlans_config.append(Vlan(name=vrf_name[1:], vlan_id=vni, vxlan_id=vni))
# end add_ri_vlan_config
def build_esi_config(self):
pr = self.physical_router
if not pr:
return
self.interfaces_config = self.interfaces_config or []
for pi_uuid in pr.physical_interfaces:
pi = PhysicalInterfaceDM.get(pi_uuid)
if not pi or not pi.esi or pi.esi == "0" or pi.get_parent_ae_id():
continue
intf = PhysicalInterface(name=pi.name,
ethernet_segment_identifier=pi.esi)
self.interfaces_config.append(intf)
# end build_esi_config
def build_lag_config(self):
pr = self.physical_router
if not pr:
return
self.interfaces_config = self.interfaces_config or []
for lag_uuid in pr.link_aggregation_groups or []:
link_members = []
lag_obj = LinkAggregationGroupDM.get(lag_uuid)
if not lag_obj:
continue
for pi_uuid in lag_obj.physical_interfaces or []:
pi = PhysicalInterfaceDM.get(pi_uuid)
if not pi:
continue
if pi.interface_type != 'lag':
link_members.append(pi.name)
else:
ae_intf_name = pi.name
self._logger.info("LAG obj_uuid: %s, link_members: %s, name: %s" %
(lag_uuid, link_members, ae_intf_name))
lag = LinkAggrGroup(lacp_enabled=lag_obj.lacp_enabled,
link_members=link_members)
intf = PhysicalInterface(name=ae_intf_name,
interface_type='lag',
link_aggregation_group=lag)
self.interfaces_config.append(intf)
# end build_lag_config
def get_vn_li_map(self):
pr = self.physical_router
vn_list = []
# get all logical router connected networks
for lr_id in pr.logical_routers or []:
lr = LogicalRouterDM.get(lr_id)
if not lr:
continue
vn_list += lr.get_connected_networks(include_internal=True)
vn_dict = {}
for vn_id in vn_list:
vn_dict[vn_id] = []
for vn_id in pr.virtual_networks:
vn_dict[vn_id] = []
li_set = pr.logical_interfaces
for pi_uuid in pr.physical_interfaces:
pi = PhysicalInterfaceDM.get(pi_uuid)
if pi is None:
continue
li_set |= pi.logical_interfaces
for li_uuid in li_set:
li = LogicalInterfaceDM.get(li_uuid)
if li is None:
continue
vmi_id = li.virtual_machine_interface
vmi = VirtualMachineInterfaceDM.get(vmi_id)
if vmi is None:
continue
vn_id = vmi.virtual_network
if li.physical_interface:
pi = PhysicalInterfaceDM.get(li.physical_interface)
ae_id = pi.get_parent_ae_id()
if ae_id and li.physical_interface:
_, unit= li.name.split('.')
ae_name = "ae" + str(ae_id) + "." + unit
vn_dict.setdefault(vn_id, []).append(
JunosInterface(ae_name, li.li_type, li.vlan_tag))
continue
vn_dict.setdefault(vn_id, []).append(
JunosInterface(li.name, li.li_type, li.vlan_tag, li_uuid=li.uuid))
return vn_dict
# end
def get_vn_associated_physical_interfaces(self):
pr = self.physical_router
li_set = set()
pi_list = []
for pi_uuid in pr.physical_interfaces:
pi = PhysicalInterfaceDM.get(pi_uuid)
if pi is None or not pi.esi or pi.esi == "0":
continue
if self.has_vmi(pi.logical_interfaces):
pi_list.append(pi)
return pi_list
# end get_vn_associated_physical_interfaces
def has_vmi(self, li_set):
if not li_set:
return False
for li_uuid in li_set:
li = LogicalInterfaceDM.get(li_uuid)
if not li or not li.virtual_machine_interface \
or not VirtualMachineInterfaceDM.get(li.virtual_machine_interface):
continue
return True
return False
# end has_vmi
def get_ae_alloc_esi_map(self):
pi_list = self.get_vn_associated_physical_interfaces()
esi_map = {}
for pi in pi_list:
if not pi.name.startswith("ae") and pi.esi:
esi_map.setdefault(pi.esi, []).append(pi)
return esi_map
# end get_ae_alloc_esi_map
def build_ae_config(self, esi_map):
self.interfaces_config = self.interfaces_config or []
# self.ae_id_map should have all esi => ae_id mapping
# esi_map should have esi => interface memberships
for esi, ae_id in self.physical_router.ae_id_map.items():
# config ae interface
ae_name = "ae" + str(ae_id)
# associate 'ae' membership
pi_list = esi_map.get(esi)
link_members = []
for pi in pi_list or []:
link_members.append(pi.name)
lag = LinkAggrGroup(link_members=link_members)
intf = PhysicalInterface(name=ae_name,
ethernet_segment_identifier=esi,
link_aggregation_group=lag)
self.interfaces_config.append(intf)
# end build_ae_config
def add_addr_term(self, term, addr_match, is_src):
if not addr_match:
return None
subnet = addr_match.get_subnet()
if not subnet:
return None
subnet_ip = subnet.get_ip_prefix()
subnet_len = subnet.get_ip_prefix_len()
if not subnet_ip or not subnet_len:
return None
from_ = term.get_from() or From()
term.set_from(from_)
if is_src:
from_.add_source_address(Subnet(prefix=subnet_ip,
prefix_len=subnet_len))
else:
from_.add_destination_address(Subnet(prefix=subnet_ip,
prefix_len=subnet_len))
# end add_addr_term
def add_port_term(self, term, port_match, is_src):
if not port_match:
return None
start_port = port_match.get_start_port()
end_port = port_match.get_end_port()
if not start_port or not end_port:
return None
port_str = str(start_port) + "-" + str(end_port)
from_ = term.get_from() or From()
term.set_from(from_)
if is_src:
from_.add_source_ports(port_str)
else:
from_.add_destination_ports(port_str)
# end add_port_term
def add_protocol_term(self, term, protocol_match):
if not protocol_match or protocol_match == 'any':
return None
from_ = term.get_from() or From()
term.set_from(from_)
from_.set_ip_protocol(protocol_match)
# end add_protocol_term
def add_filter_term(self, ff, name):
term = Term()
term.set_name(name)
ff.add_terms(term)
term.set_then(Then(accept_or_reject=True))
return term
def add_dns_dhcp_terms(self, ff):
port_list = [67, 68, 53]
term = Term()
term.set_name("allow-dns-dhcp")
from_ = From()
from_.set_ip_protocol("udp")
term.set_from(from_)
for port in port_list:
from_.add_source_ports(str(port))
term.set_then(Then(accept_or_reject=True))
ff.add_terms(term)
# end add_dns_dhcp_terms
def add_ether_type_term(self, ff, ether_type_match):
if not ether_type_match:
return None
term = Term()
from_ = From()
term.set_from(from_)
term.set_name("ether-type")
from_.set_ether_type(ether_type_match.lower())
term.set_then(Then(accept_or_reject=True))
ff.add_terms(term)
# end add_ether_type_term
def build_firewall_filters(self, sg, acl, is_egress=False):
acl_rule_present = False
if not sg or not acl or not acl.vnc_obj:
return
acl = acl.vnc_obj
entries = acl.get_access_control_list_entries()
if not entries:
return
rules = entries.get_acl_rule() or []
if not rules:
return
self.firewall_config = self.firewall_config or\
Firewall(DMUtils.firewall_comment())
for rule in rules:
if not self.has_terms(rule):
continue
match = rule.get_match_condition()
if not match:
continue
acl_rule_present = True
break
if acl_rule_present:
filter_name = DMUtils.make_sg_firewall_name(sg.name, acl.uuid)
f = FirewallFilter(name=filter_name)
f.set_comment(DMUtils.make_sg_firewall_comment(sg.name, acl.uuid))
# allow arp ether type always
self.add_ether_type_term(f, 'arp')
# allow dhcp/dns always
self.add_dns_dhcp_terms(f)
for rule in rules:
if not self.has_terms(rule):
continue
match = rule.get_match_condition()
if not match:
continue
rule_uuid = rule.get_rule_uuid()
dst_addr_match = match.get_dst_address()
dst_port_match = match.get_dst_port()
ether_type_match = match.get_ethertype()
protocol_match = match.get_protocol()
src_addr_match = match.get_src_address()
src_port_match = match.get_src_port()
term = self.add_filter_term(f, rule_uuid)
self.add_addr_term(term, dst_addr_match, False)
self.add_addr_term(term, src_addr_match, True)
self.add_port_term(term, dst_port_match, False)
# source port match is not needed for now (BMS source port)
#self.add_port_term(term, src_port_match, True)
self.add_protocol_term(term, protocol_match)
self.firewall_config.add_firewall_filters(f)
# end build_firewall_filters
def build_firewall_config(self):
sg_list = LogicalInterfaceDM.get_sg_list()
for sg in sg_list or []:
acls = sg.access_control_lists
for acl in acls or []:
acl = AccessControlListDM.get(acl)
if acl and not acl.is_ingress:
self.build_firewall_filters(sg, acl)
# end build_firewall_config
def is_default_sg(self, match):
if (not match.get_dst_address()) or \
(not match.get_dst_port()) or \
(not match.get_ethertype()) or \
(not match.get_src_address()) or \
(not match.get_src_port()) or \
(not match.get_protocol()):
return False
if not match.get_dst_address().get_subnet():
return False
if ((str(match.get_dst_address().get_subnet().get_ip_prefix()) == "0.0.0.0") or \
(str(match.get_dst_address().get_subnet().get_ip_prefix()) == "::")) and \
(str(match.get_dst_address().get_subnet().get_ip_prefix_len()) == "0") and \
(str(match.get_dst_port().get_start_port()) == "0") and \
(str(match.get_dst_port().get_end_port()) == "65535") and \
((str(match.get_ethertype()) == "IPv4") or \
(str(match.get_ethertype()) == "IPv6")) and \
(not match.get_src_address().get_subnet()) and \
(not match.get_src_address().get_subnet_list()) and \
(str(match.get_src_port().get_start_port()) == "0") and \
(str(match.get_src_port().get_end_port()) == "65535") and \
(str(match.get_protocol()) == "any"):
return True
return False
# end is_default_sg
def has_terms(self, rule):
match = rule.get_match_condition()
if not match:
return False
# return False if it is default SG, no filter is applied
if self.is_default_sg(match):
return False
return match.get_dst_address() or match.get_dst_port() or \
match.get_ethertype() or match.get_src_address() or match.get_src_port() or \
(match.get_protocol() and match.get_protocol() != 'any')
def get_firewall_filters(self, sg, acl, is_egress=False):
acl_rule_present = False
if not sg or not acl or not acl.vnc_obj:
return []
acl = acl.vnc_obj
entries = acl.get_access_control_list_entries()
if not entries:
return []
rules = entries.get_acl_rule() or []
if not rules:
return []
filter_names = []
for rule in rules:
if not self.has_terms(rule):
continue
match = rule.get_match_condition()
if not match:
continue
rule_uuid = rule.get_rule_uuid()
ether_type_match = match.get_ethertype()
if not ether_type_match:
continue
if 'ipv6' in ether_type_match.lower():
continue
acl_rule_present = True
break
if acl_rule_present:
filter_name = DMUtils.make_sg_firewall_name(sg.name, acl.uuid)
filter_names.append(filter_name)
return filter_names
# end get_firewall_filters
def get_configured_filters(self, sg):
if not sg:
return []
filter_names = []
acls = sg.access_control_lists
for acl in acls or []:
acl = AccessControlListDM.get(acl)
if acl and not acl.is_ingress:
fnames = self.get_firewall_filters(sg, acl)
filter_names += fnames
return filter_names
# end get_configured_filters
def build_ri_config(self):
esi_map = self.get_ae_alloc_esi_map()
self.physical_router.evaluate_ae_id_map(esi_map)
self.build_ae_config(esi_map)
vn_dict = self.get_vn_li_map()
vn_irb_ip_map = None
if self.is_gateway():
self.physical_router.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l2_l3', 'irb', False)
self.physical_router.evaluate_vn_irb_ip_map(set(vn_dict.keys()), 'l3', 'lo0', True)
vn_irb_ip_map = self.physical_router.get_vn_irb_ip_map()
for vn_id, interfaces in vn_dict.items():
vn_obj = VirtualNetworkDM.get(vn_id)
if (vn_obj is None or
vn_obj.get_vxlan_vni() is None or
vn_obj.vn_network_id is None):
continue
export_set = None
import_set = None
for ri_id in vn_obj.routing_instances:
# Find the primary RI by matching the name
ri_obj = RoutingInstanceDM.get(ri_id)
if ri_obj is None:
continue
if ri_obj.fq_name[-1] == vn_obj.fq_name[-1]:
vrf_name_l2 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id, 'l2')
vrf_name_l3 = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id, 'l3')
if vn_obj.route_targets:
export_set = vn_obj.route_targets & ri_obj.export_targets
import_set = vn_obj.route_targets & ri_obj.import_targets
else:
export_set = copy.copy(ri_obj.export_targets)
import_set = copy.copy(ri_obj.import_targets)
for ri2_id in ri_obj.routing_instances:
ri2 = RoutingInstanceDM.get(ri2_id)
if ri2 is None:
continue
import_set |= ri2.export_targets
if vn_obj.get_forwarding_mode() in ['l2', 'l2_l3']:
irb_ips = None
if vn_obj.get_forwarding_mode() == 'l2_l3' and self.is_gateway():
irb_ips = vn_irb_ip_map['irb'].get(vn_id, [])
ri_conf = {'ri_name': vrf_name_l2, 'vn': vn_obj,
'is_l2': True, 'is_l2_l3': (
vn_obj.get_forwarding_mode() == 'l2_l3'),
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(),
'gateways': irb_ips,
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'vni': vn_obj.get_vxlan_vni(),
'network_id': vn_obj.vn_network_id,
'encapsulation_priorities':
GlobalVRouterConfigDM.
global_encapsulation_priorities}
self.add_routing_instance(ri_conf)
if vn_obj.get_forwarding_mode() in ['l3', 'l2_l3']:
interfaces = []
lo0_ips = None
if vn_obj.get_forwarding_mode() == 'l2_l3':
interfaces = [
JunosInterface(
'irb.' + str(vn_obj.vn_network_id),
'l3', 0)]
elif self.is_gateway():
lo0_ips = vn_irb_ip_map['lo0'].get(vn_id, [])
is_internal_vn = True if '_contrail_lr_internal_vn_' in vn_obj.name else False
ri_conf = {'ri_name': vrf_name_l3, 'vn': vn_obj,
'is_l2': False,
'is_l2_l3': vn_obj.get_forwarding_mode() ==
'l2_l3',
'import_targets': import_set,
'export_targets': export_set,
'prefixes': vn_obj.get_prefixes(),
'router_external': vn_obj.router_external,
'interfaces': interfaces,
'gateways': lo0_ips,
'network_id': vn_obj.vn_network_id}
if is_internal_vn:
ri_conf['vni'] = vn_obj.get_vxlan_vni(is_internal_vn = is_internal_vn)
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(vrf_name_l3)
lr = LogicalRouterDM.get(lr_uuid)
if lr:
ri_conf['router_external'] = lr.logical_router_gateway_external
self.add_routing_instance(ri_conf)
break
if export_set and\
self.physical_router.is_junos_service_ports_enabled() and\
len(vn_obj.instance_ip_map) > 0:
service_port_ids = DMUtils.get_service_ports(
vn_obj.vn_network_id)
if not self.physical_router \
.is_service_port_id_valid(service_port_ids[0]):
self._logger.error("DM can't allocate service interfaces"
" for (vn, vn-id)=(%s,%s)" %
(vn_obj.fq_name,
vn_obj.vn_network_id))
else:
vrf_name = DMUtils.make_vrf_name(vn_obj.fq_name[-1],
vn_obj.vn_network_id,
'l3', True)
interfaces = []
service_ports = self.physical_router.junos_service_ports.\
get('service_port')
interfaces.append(
JunosInterface(
service_ports[0] + "." + str(service_port_ids[0]),
'l3', 0))
interfaces.append(
JunosInterface(
service_ports[0] + "." + str(service_port_ids[1]),
'l3', 0))
ri_conf = {'ri_name': vrf_name, 'vn': vn_obj,
'import_targets': import_set,
'interfaces': interfaces,
'fip_map': vn_obj.instance_ip_map,
'network_id': vn_obj.vn_network_id,
'restrict_proxy_arp': vn_obj.router_external}
self.add_routing_instance(ri_conf)
return
# end build_ri_config
def set_common_config(self):
self.build_underlay_bgp()
if not self.ensure_bgp_config():
return
self.build_bgp_config()
self.build_ri_config()
self.set_internal_vn_irb_config()
self.init_evpn_config()
self.build_firewall_config()
self.build_esi_config()
self.build_lag_config()
# end set_common_config
@staticmethod
def get_subnet_for_cidr(cidr):
cidr_parts = cidr.split('/', 1)
return Subnet(prefix=cidr_parts[0],
prefix_len=int(cidr_parts[1]) if len(cidr_parts) > 1
else 32)
# end get_subnet_for_cidr
@staticmethod
def get_route_for_cidr(cidr):
cidr_parts = cidr.split('/', 1)
return Route(prefix=cidr_parts[0],
prefix_len=int(cidr_parts[1]) if len(cidr_parts) > 1
else 32)
# end get_route_for_cidr
# end AnsibleRoleCommon
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import os
import time
import fixtures
import nose.plugins.attrib
import testresources
import testtools
from tempest import clients
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# All the successful HTTP status codes from RFC 2616
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
def attr(*args, **kwargs):
"""A decorator which applies the nose and testtools attr decorator
This decorator applies the nose attr decorator as well as the
the testtools.testcase.attr if it is in the list of attributes
to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return nose.plugins.attrib.attr(*args, **kwargs)(f)
return decorator
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
valid_service_list = ['compute', 'image', 'volume', 'orchestration',
'network', 'identity', 'object', 'dashboard']
def decorator(f):
for service in args:
if service not in valid_service_list:
raise exceptions.InvalidServiceTag('%s is not a valid service'
% service)
attr(type=list(args))(f)
return f
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def skip_because(*args, **kwargs):
"""A decorator useful to skip tests hitting known bugs
@param bug: bug number causing the test to skip
@param condition: optional condition to be True for the skip to have place
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
if "bug" in kwargs:
if "condition" not in kwargs or kwargs["condition"] is True:
msg = "Skipped until Bug: %s is resolved." % kwargs["bug"]
raise testtools.TestCase.skipException(msg)
return f(*func_args, **func_kwargs)
return wrapper
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
configs = config.TempestConfig()
config_dict = {
'compute': configs.compute_feature_enabled.api_extensions,
'compute_v3': configs.compute_feature_enabled.api_v3_extensions,
'volume': configs.volume_feature_enabled.api_extensions,
'network': configs.network_feature_enabled.api_extensions,
}
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
# there is a mis-match between nose and testtools for older pythons.
# testtools will set skipException to be either
# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
# exception, depending on what it can find. Python <2.7 doesn't have
# unittest.case.SkipTest; so if unittest2 is not installed it falls
# back to the internal class.
#
# The current nose skip plugin will decide to raise either
# unittest.case.SkipTest or its own internal exception; it does not
# look for unittest2 or the internal unittest exception. Thus we must
# monkey-patch testtools.TestCase.skipException to be the exception
# the nose skip plugin expects.
#
# However, with the switch to testr nose may not be available, so we
# require you to opt-in to this fix with an environment variable.
#
# This is temporary until upstream nose starts looking for unittest2
# as testtools does; we can then remove this and ensure unittest2 is
# available for older pythons; then nose and testtools will agree
# unittest2.case.SkipTest is the one-true skip test exception.
#
# https://review.openstack.org/#/c/33056
# https://github.com/nose-devs/nose/pull/699
if 'TEMPEST_PY26_NOSE_COMPAT' in os.environ:
try:
import unittest.case.SkipTest
# convince pep8 we're using the import...
if unittest.case.SkipTest:
pass
raise RuntimeError("You have unittest.case.SkipTest; "
"no need to override")
except ImportError:
LOG.info("Overriding skipException to nose SkipTest")
testtools.TestCase.skipException = nose.plugins.skip.SkipTest
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
raise RuntimeError("tearDownClass does not calls the super's "
"tearDownClass in these classes: "
+ str(at_exit_set) + "\n"
"If you see the exception, with another "
"exception please do not report this one!"
"If you are changing tempest code, make sure you",
"are calling the super class's tearDownClass!")
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
config = config.TempestConfig()
setUpClassCalled = False
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
log_format = '%(asctime)-15s %(message)s'
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=None))
@classmethod
def get_client_manager(cls):
"""
Returns an Openstack client manager
"""
cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if (cls.config.compute.allow_tenant_isolation or
force_tenant_isolation):
creds = cls.isolated_creds.get_primary_creds()
username, tenant_name, password = creds
os = clients.Manager(username=username,
password=password,
tenant_name=tenant_name,
interface=cls._interface)
else:
os = clients.Manager(interface=cls._interface)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if getattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(interface=cls._interface)
admin_client = os.identity_client
return admin_client
@classmethod
def _get_client_args(cls):
return (
cls.config,
cls.config.identity.admin_username,
cls.config.identity.admin_password,
cls.config.identity.uri
)
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
|
|
import sys
from logging import getLogger
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import (QMainWindow, QHBoxLayout, QVBoxLayout,
QSizePolicy, QWidget, QPushButton,
QLabel, QListWidget, QSplitter, QFrame,
QComboBox, QScrollArea, QListWidgetItem,
QDialogButtonBox, QApplication)
from PyQt5.QtCore import Qt
from .plotgui import PlotGUI
from .._doc import ds
logger = getLogger(__name__)
class PlotGUI_QT5(PlotGUI):
'''QT5 version of plot winfow GUI
We open the figure as a widget inside the qt5 window
Attributes
----------
figure : ``matplotlib.figure.Figure``
app : QT5 App created
app_window : Windows belonging to the QT5 App
databases : list of str
The databases to interact with
'''
@ds.with_indent(8)
def __init__(self, **kwargs):
'''Init the GUI using the Qt5 framework.
Keyword Arguments
-----------------
%(PlotGUI.parameters)s
'''
super().__init__(**kwargs)
# create qt app
app = QtCore.QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
logger.debug('Qt app created')
self.app = app
if not hasattr(app, 'references'):
# store references to all the windows
logger.debug('creating references set')
app.references = set()
# create app window
self.app_window = ApplicationWindow(self)
# add the window reference to the app references list.
# this is in order to prevent garbage collection that will lead
# to closing of the window.
# the reference is removed from the list in the window closeEvent handler
# (called when the window is closed)
app.references.add(self.app_window)
logger.debug('app window references: %r' % app.references)
self.app_window.setWindowTitle("Calour")
self._set_figure(self.app_window.plotfigure, kwargs['tree_size'])
def __call__(self):
logger.debug('opening Qt5 window')
super().__call__()
self.app_window.show()
# move the window to the front
self.app_window.activateWindow()
self.app_window.raise_()
# run the event loop
self.app.exec_()
def show_info(self):
sid, fid, abd, annt = self.get_info()
self._update_info_labels(sid, fid, abd)
self._display_annotation_in_qlistwidget(annt)
def _update_info_labels(self, sid, fid, abd):
self.app_window.w_abund.setText('{:.01f}'.format(abd))
self.app_window.w_fid.setText(str(fid))
self.app_window.w_sid.setText(str(sid))
sample_field = str(self.app_window.w_sfield.currentText())
self.app_window.w_sfield_val.setText(
str(self.exp.sample_metadata[sample_field].iloc[self.current_select[0]]))
feature_field = str(self.app_window.w_ffield.currentText())
self.app_window.w_ffield_val.setText(
str(self.exp.feature_metadata[feature_field].iloc[self.current_select[1]]))
def _display_annotation_in_qlistwidget(self, annt):
'''Add a line to the annotation list
It does not erase previous lines.
Parameters
----------
annt : list of (dict, str)
dict : contains the key 'annotationtype' and determines the annotation color
also contains all other annotation data needed for right click menu/double click
str : The string to add to the list
'''
# clear the previous annotation box
self.app_window.w_dblist.clear()
for cannt in annt:
details = cannt[0]
newitem = QListWidgetItem(cannt[1])
newitem.setData(QtCore.Qt.UserRole, details)
if details['annotationtype'] == 'diffexp':
ccolor = QtGui.QColor(0, 0, 200)
elif details['annotationtype'] == 'contamination':
ccolor = QtGui.QColor(200, 0, 0)
elif details['annotationtype'] == 'common':
ccolor = QtGui.QColor(0, 200, 0)
elif details['annotationtype'] == 'highfreq':
ccolor = QtGui.QColor(0, 200, 0)
else:
ccolor = QtGui.QColor(0, 0, 0)
newitem.setForeground(ccolor)
self.app_window.w_dblist.addItem(newitem)
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).
Parameters
----------
parent :
width, height : Numeric
size of the canvas
dpi : int
"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
# comment out because it draws frame on the whole plotting area
# self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class ApplicationWindow(QMainWindow):
def __init__(self, gui):
QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.main_widget = QWidget(self)
scroll_box_width = 800
# the height of the feature/label boxes (too small makes the horizontal scroll bar
# cover the text)
label_box_height = 35
# set the GUI widgets
# the user side on the right
userside = QVBoxLayout()
# sample field to display
lbox = QHBoxLayout()
self.w_sfield = QComboBox()
self.w_sfield_val = QLabel(text='NA')
self.w_sfield_val.setTextInteractionFlags(Qt.TextSelectableByMouse)
scroll = QScrollArea()
scroll.setFixedHeight(label_box_height)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.w_sfield_val.setMinimumWidth(scroll_box_width)
scroll.setWidget(self.w_sfield_val)
lbox.addWidget(self.w_sfield)
lbox.addWidget(scroll)
userside.addLayout(lbox)
# add the sample field combobox values
for i in gui.exp.sample_metadata.columns:
self.w_sfield.addItem(str(i))
# feature field to display
lbox = QHBoxLayout()
self.w_ffield = QComboBox()
self.w_ffield_val = QLabel(text='NA')
self.w_ffield_val.setTextInteractionFlags(Qt.TextSelectableByMouse)
scroll = QScrollArea()
scroll.setFixedHeight(label_box_height)
self.w_ffield_val.setMinimumWidth(scroll_box_width)
scroll.setWidget(self.w_ffield_val)
lbox.addWidget(self.w_ffield)
lbox.addWidget(scroll)
userside.addLayout(lbox)
for i in gui.exp.feature_metadata.columns:
self.w_ffield.addItem(str(i))
# sample id
lbox = QHBoxLayout()
label = QLabel(text='Sample ID:')
scroll = QScrollArea()
scroll.setFixedHeight(label_box_height)
self.w_sid = QLabel(text='?')
self.w_sid.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.w_sid.setMinimumWidth(scroll_box_width)
scroll.setWidget(self.w_sid)
lbox.addWidget(label)
lbox.addWidget(scroll)
userside.addLayout(lbox)
# feature id
lbox = QHBoxLayout()
label = QLabel(text='Feature ID:')
scroll = QScrollArea()
scroll.setFixedHeight(label_box_height)
self.w_fid = QLabel(text='?')
self.w_fid.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.w_fid.setMinimumWidth(scroll_box_width)
scroll.setWidget(self.w_fid)
lbox.addWidget(label)
lbox.addWidget(scroll)
userside.addLayout(lbox)
# abundance value
lbox = QHBoxLayout()
label = QLabel(text='Abundance:')
self.w_abund = QLabel(text='?')
lbox.addWidget(label)
lbox.addWidget(self.w_abund)
userside.addLayout(lbox)
# buttons
lbox_buttons = QHBoxLayout()
self.w_sequence = QPushButton(text='Copy Seq')
lbox_buttons.addWidget(self.w_sequence)
self.w_info = QPushButton(text='Info')
lbox_buttons.addWidget(self.w_info)
self.w_annotate = QPushButton(text='Annotate')
lbox_buttons.addWidget(self.w_annotate)
userside.addLayout(lbox_buttons)
# db annotations list
self.w_dblist = QListWidget()
self.w_dblist.itemDoubleClicked.connect(self.double_click_annotation)
userside.addWidget(self.w_dblist)
# the annotation list right mouse menu
self.w_dblist.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.w_dblist.customContextMenuRequested.connect(self.annotation_list_right_clicked)
# buttons at bottom
lbox_buttons_bottom = QHBoxLayout()
self.w_save_fasta = QPushButton(text='Save Seqs')
lbox_buttons_bottom.addWidget(self.w_save_fasta)
self.w_enrichment = QPushButton(text='Enrichment')
lbox_buttons_bottom.addWidget(self.w_enrichment)
self.w_save_fig = QPushButton(text='Save Fig')
lbox_buttons_bottom.addWidget(self.w_save_fig)
userside.addLayout(lbox_buttons_bottom)
# the heatmap on the left side
heatmap = MplCanvas(self.main_widget, width=5, height=4, dpi=100)
heatmap.setFocusPolicy(QtCore.Qt.ClickFocus)
heatmap.setFocus()
layout = QHBoxLayout(self.main_widget)
frame = QFrame()
splitter = QSplitter(QtCore.Qt.Horizontal, self.main_widget)
splitter.addWidget(heatmap)
frame.setLayout(userside)
splitter.addWidget(frame)
layout.addWidget(splitter)
self.plotfigure = heatmap.figure
self.gui = gui
# link events to gui
self.w_annotate.clicked.connect(self.annotate)
self.w_sequence.clicked.connect(self.copy_sequence)
self.w_save_fasta.clicked.connect(self.save_fasta)
self.w_enrichment.clicked.connect(self.enrichment)
self.w_save_fig.clicked.connect(self.save_fig)
self.w_sfield.currentIndexChanged.connect(self.info_field_changed)
self.w_ffield.currentIndexChanged.connect(self.info_field_changed)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def fileQuit(self):
# remove the window from the app list - memory can be cleared.
app = QtCore.QCoreApplication.instance()
if app is not None:
if self in app.references:
logger.debug('removing window from app window list')
app.references.remove(self)
else:
logger.warning('window not in app window list. Not removed')
else:
logger.warning('App not found - not removing window from list')
self.close()
def closeEvent(self, ce):
# called when the window is closed.
# in that case, we need to remove the reference to the window from the app
# window list, so it will be garbage collected now.
# happens in fileQuit() method.
self.fileQuit()
def annotation_list_right_clicked(self, QPos):
self.listMenu = QtWidgets.QMenu()
parent_position = self.w_dblist.mapToGlobal(QtCore.QPoint(0, 0))
item = self.w_dblist.itemAt(QPos)
data = item.data(QtCore.Qt.UserRole)
db = data.get('_db_interface', None)
if db is None:
logger.debug('No database for selected item')
return
menu_details = self.listMenu.addAction("Details")
menu_details.triggered.connect(lambda: self.right_menu_details(item))
if db.annotatable:
menu_details = self.listMenu.addAction("Update annotation")
menu_details.triggered.connect(lambda: self.right_menu_update(item))
menu_delete = self.listMenu.addAction("Delete annotation")
menu_delete.triggered.connect(lambda: self.right_menu_delete(item))
menu_remove = self.listMenu.addAction("Remove seq. from annotation")
menu_remove.triggered.connect(lambda: self.right_menu_remove_feature(item))
self.listMenu.move(parent_position + QPos)
self.listMenu.show()
def right_menu_details(self, item):
self.double_click_annotation(item)
def right_menu_delete(self, item):
if QtWidgets.QMessageBox.warning(self, "Delete annotation?", "Are you sure you want to delete the annotation:\n%s\n"
"and all associated features?" % item.text(),
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) == QtWidgets.QMessageBox.No:
return
data = item.data(QtCore.Qt.UserRole)
db = data.get('_db_interface', None)
logger.debug('Deleting annotation %s' % item.text())
err = db.delete_annotation(data)
if err:
logger.error('Annotation not deleted. Error: %s' % err)
self.gui.show_info()
def right_menu_update(self, item):
logger.debug('update annotation %s' % item.text)
data = item.data(QtCore.Qt.UserRole)
db = data.get('_db_interface', None)
db.upadte_annotation(data, self.gui.exp)
def right_menu_remove_feature(self, item):
features = self.gui.get_selected_seqs()
if QtWidgets.QMessageBox.warning(self, "Remove feature from annotation?", "Are you sure you want to remove the %d selected features\n"
"from the annotation:\n%s?" % (len(features), item.text()), QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No) == QtWidgets.QMessageBox.No:
return
data = item.data(QtCore.Qt.UserRole)
db = data.get('_db_interface', None)
logger.debug('Removing %d features from annotation %s' % (features, item.text()))
err = db.remove_features_from_annotation(features, data)
if err:
logger.error('Features not removed from annotation. Error: %s' % err)
def info_field_changed(self):
sid, fid, abd = self.gui.get_selection_info()
self.gui._update_info_labels(sid, fid, abd)
def copy_sequence(self):
'''Copy the sequence to the clipboard
'''
cseq = self.gui.exp.feature_metadata.index[self.gui.current_select[1]]
clipboard = QApplication.clipboard()
clipboard.setText(cseq)
def save_fasta(self):
seqs = self.gui.get_selected_seqs()
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, caption='Save selected seqs to fasta')
self.gui.exp.save_fasta(str(filename), seqs)
def save_fig(self):
'''Save the figure to a pdf/svg/png.
Called from the Save Fig button in the gui.
'''
cfig = self.plotfigure
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, caption='Save figure', filter='PDF (*.pdf);;SVG (*.svg);; PNG (*.png)', initialFilter='PDF (*.pdf)')
cfig.savefig(str(filename))
def enrichment(self):
'''Get and display the list of enriched database terms for the selected features.
Iterate over all databases that support enrichment analysis. For each such database,
get the list of enriched terms in the selected set of features (compared to the other features
in the experiment). Then display the list of these terms in a new qt5 window with blue terms
for ones enriched in the selected group, red terms for ones enriched in the unselected set of features
'''
exp = self.gui.exp
group1_seqs = self.gui.get_selected_seqs()
allseqs = exp.feature_metadata.index.values
group2_seqs = set(allseqs) - set(group1_seqs)
for cdb in self.gui.databases:
if not cdb.can_do_enrichment:
continue
cdb.show_enrichment_qt5(group1_seqs, group2=group2_seqs, exp=exp, max_id=None, group1_name='selected', group2_name='unselected', term_type='term', ignore_exp=True)
def double_click_annotation(self, item):
'''Show database information about the double clicked item in the list.
Call the appropriate database for displaying the info
'''
data = item.data(QtCore.Qt.UserRole)
db = data.get('_db_interface', None)
if db is None:
return
db.show_annotation_info(data)
def annotate(self):
'''Add database annotation to selected features
'''
# get the database used to add annotation
if self.gui._annotation_db is None:
logger.warning('No database with add annotation capability selected (use plot(...,databases=[dbname])')
return
# get the sequences of the selection
seqs = self.gui.get_selected_seqs()
# annotate
err = self.gui._annotation_db.add_annotation(seqs, self.gui.exp)
if err:
logger.error('Error encountered when adding annotaion: %s' % err)
return
logger.info('Annotation added')
class SListWindow(QtWidgets.QDialog):
def __init__(self, listdata=[], listname=None):
'''Create a list window with items in the list and the listname as specified
Parameters
----------
listdata: list of str, optional
the data to show in the list
listname: str, optional
name to display above the list
'''
super().__init__()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
if listname is not None:
self.setWindowTitle(listname)
self.layout = QVBoxLayout(self)
self.w_list = QListWidget()
self.layout.addWidget(self.w_list)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
self.layout.addWidget(buttonBox)
for citem in listdata:
self.w_list.addItem(citem)
self.w_list.itemDoubleClicked.connect(self.list_double_click)
self.show()
self.adjustSize()
def add_item(self, text, color='black', dblclick_data=None):
'''Add an item to the list
Parameters
----------
text : str
the string to add
color : str, optional
the color of the text to add
dblclick_function : function or None
the function to call when this item is double clicked (or None to ignore)
'''
item = QtWidgets.QListWidgetItem()
item.setText(text)
if color == 'black':
ccolor = QtGui.QColor(0, 0, 0)
elif color == 'red':
ccolor = QtGui.QColor(155, 0, 0)
elif color == 'blue':
ccolor = QtGui.QColor(0, 0, 155)
elif color == 'green':
ccolor = QtGui.QColor(0, 155, 0)
item.setForeground(ccolor)
item.setData(QtCore.Qt.UserRole, dblclick_data)
self.w_list.addItem(item)
def list_double_click(self, item):
data = item.data(QtCore.Qt.UserRole)
if data is not None:
data['database'].show_term_details(data['term'], data['exp'], data['features1'], data['features2'], gui='qt5')
|
|
from typing import Tuple
import numpy as np
import numpy.ma as ma
from sdcit.cython_impl.cy_sdcit import cy_sdcit, cy_split_permutation, cy_dense_permutation
from sklearn.linear_model import LinearRegression
from sdcit.utils import K2D, p_value_of, random_seeds, cythonize
def permuted(D, dense=True):
out = np.zeros((len(D),), 'int32')
if dense:
cy_dense_permutation(D, out)
else:
cy_split_permutation(D, out)
return out
def mask_and_perm(Dz: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
n = len(Dz)
full_idx = np.arange(0, n)
perm = permuted(Dz)
# 1 for masked (=excluded)
mask = np.zeros((n, n))
mask[full_idx, full_idx] = 1 # i==j
mask[full_idx, perm] = 1 # pi_i = j
mask[perm, full_idx] = 1 # i = pi_j
return mask, perm
def penalized_distance(Dz: np.ndarray, mask: np.ndarray) -> np.ndarray:
# add some big value to "masked" values except diagonal.
return Dz + (mask - np.diag(np.diag(mask))) * Dz.max() # soft penalty
def MMSD(Ky: np.ndarray, Kz: np.ndarray, Kxz: np.ndarray, Dz: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray]:
"""Maximum Mean Self-Discrepancy"""
n = len(Kxz)
full_idx = np.arange(0, n)
mask, perm = mask_and_perm(Dz)
Ky_fp = Ky[np.ix_(full_idx, perm)]
Ky_pp = Ky[np.ix_(perm, perm)]
kk = (Ky + Ky_pp - 2 * Ky_fp)
statistic = ma.array(Kxz * kk, mask=mask).mean()
error_statistic = ma.array(Kz * kk, mask=mask).mean()
return statistic, error_statistic, mask, perm
def emp_MMSD(Kxz: np.ndarray, Ky: np.ndarray, Kz: np.ndarray, Dz: np.ndarray, num_samples: int) -> Tuple[np.ndarray, np.ndarray]:
"""Empirical distribution of MMSD"""
n = len(Kxz)
mmsd_distr = np.zeros((num_samples,))
error_distr = np.zeros((num_samples,))
for i_th in range(num_samples):
selected = np.random.choice(n, n // 2, replace=False)
grid = np.ix_(selected, selected)
mmsd_distr[i_th], error_distr[i_th], *_ = MMSD(Ky[grid], Kz[grid], Kxz[grid], Dz[grid])
return (0.5 * (mmsd_distr - mmsd_distr.mean()) + mmsd_distr.mean(),
0.5 * (error_distr - error_distr.mean()) + error_distr.mean())
def adjust_errors(null_errors, null, error=None, test_statistic=None):
if error is not None:
assert test_statistic is not None
model = LinearRegression().fit(null_errors[:, None], null[:, None])
beta = max(0, model.coef_[0, 0])
if error is not None:
return null - null_errors * beta, test_statistic - error * beta
else:
return null - null_errors * beta
def SDCIT(Kx: np.ndarray, Ky: np.ndarray, Kz: np.ndarray, Dz=None, size_of_null_sample=1000, with_null=False, seed=None, adjust=True, to_shuffle=True):
"""SDCIT (Lee and Honavar, 2017)
Parameters
----------
Kx : np.ndarray
N by N kernel matrix of X
Ky : np.ndarray
N by N kernel matrix of Y
Kz : np.ndarray
N by N kernel matrix of Z
Dz : np.ndarray
N by N pairwise distance matrix of Z
size_of_null_sample : int
The number of samples in a null distribution
with_null : bool
If true, resulting null distribution is also returned
seed : int
Random seed
adjust : bool
whether to adjust null distribution and test statistics based on 'permutation error' information
to_shuffle : bool
shuffle the order of given data at the beginning, which minimize possible issues with getting a bad permutation
References
----------
Lee, S., Honavar, V. (2017). Self-Discrepancy Conditional Independence Test.
In Proceedings of the Thirty-third Conference on Uncertainty in Artificial Intelligence. Corvallis, Oregon: AUAI Press.
"""
if seed is not None:
np.random.seed(seed)
if Dz is None:
Dz = K2D(Kz)
if to_shuffle:
Kx, Ky, Kz, Dz = shuffling(seed, Kx, Ky, Kz, Dz) # categorical Z may yield an ordered 'block' matrix and it may harm permutation.
Kxz = Kx * Kz
test_statistic, error_statistic, mask, _ = MMSD(Ky, Kz, Kxz, Dz)
mask, Pidx = mask_and_perm(penalized_distance(Dz, mask))
# avoid permutation between already permuted pairs.
mmsd_distr_under_null, error_distr_under_null = emp_MMSD(Kxz, Ky[np.ix_(Pidx, Pidx)], Kz, penalized_distance(Dz, mask), size_of_null_sample)
if adjust:
fix_null, fix_test_statistic = adjust_errors(error_distr_under_null, mmsd_distr_under_null, error_statistic, test_statistic)
fix_null = fix_null - fix_null.mean()
else:
fix_null = mmsd_distr_under_null - mmsd_distr_under_null.mean()
fix_test_statistic = test_statistic
if with_null:
return fix_test_statistic, p_value_of(fix_test_statistic, fix_null), fix_null
else:
return fix_test_statistic, p_value_of(fix_test_statistic, fix_null)
def c_SDCIT(Kx, Ky, Kz, Dz=None, size_of_null_sample=1000, with_null=False, seed=None, n_jobs=1, adjust=True, to_shuffle=True):
"""C-based SDCIT (Lee and Honavar, 2017)
Parameters
----------
Kx : np.ndarray
N by N kernel matrix of X
Ky : np.ndarray
N by N kernel matrix of Y
Kz : np.ndarray
N by N kernel matrix of Z
Dz : np.ndarray
N by N pairwise distance matrix of Z
size_of_null_sample : int
The number of samples in a null distribution
with_null : bool
If true, a resulting null distribution is also returned
seed : int
Random seed
n_jobs: int
number of threads to be used
adjust : bool
whether to adjust null distribution and test statistics based on 'permutation error' information
to_shuffle : bool
shuffle the order of given data at the beginning, which minimize possible issues with getting a bad permutation
References
----------
Lee, S., Honavar, V. (2017). Self-Discrepancy Conditional Independence Test.
In Proceedings of the Thirty-third Conference on Uncertainty in Artificial Intelligence. Corvallis, Oregon: AUAI Press.
"""
if seed is not None:
np.random.seed(seed)
if Dz is None:
Dz = K2D(Kz)
if to_shuffle:
Kx, Ky, Kz, Dz = shuffling(seed, Kx, Ky, Kz, Dz) # categorical Z may yield an ordered 'block' matrix and it may harm permutation.
Kxz = Kx * Kz
# prepare parameters & output variables
Kxz, Ky, Kz, Dz = cythonize(Kxz, Ky, Kz, Dz)
raw_null = np.zeros((size_of_null_sample,), dtype='float64')
error_raw_null = np.zeros((size_of_null_sample,), dtype='float64')
mmsd = np.zeros((1,), dtype='float64')
error_mmsd = np.zeros((1,), dtype='float64')
# run SDCIT
cy_sdcit(Kxz, Ky, Kz, Dz, size_of_null_sample, random_seeds(), n_jobs, mmsd, error_mmsd, raw_null, error_raw_null)
# post-process outputs
test_statistic = mmsd[0]
error_statistic = error_mmsd[0]
raw_null = 0.5 * (raw_null - raw_null.mean()) + raw_null.mean()
error_raw_null = 0.5 * (error_raw_null - error_raw_null.mean()) + error_raw_null.mean()
if adjust:
fix_null, fix_test_statistic = adjust_errors(error_raw_null, raw_null, error_statistic, test_statistic)
fix_null = fix_null - fix_null.mean()
else:
fix_null = raw_null - raw_null.mean()
fix_test_statistic = test_statistic
if with_null:
return fix_test_statistic, p_value_of(fix_test_statistic, fix_null), fix_null
else:
return fix_test_statistic, p_value_of(fix_test_statistic, fix_null)
def shuffling(seed, *matrices):
if seed is not None:
np.random.seed(seed)
n = -1
for matrix in matrices:
if n < 0:
n = len(matrix)
idxs = np.arange(n)
np.random.shuffle(idxs)
yield matrix[np.ix_(idxs, idxs)]
|
|
""" test parquet compat """
import datetime
from io import BytesIO
import os
import pathlib
from warnings import (
catch_warnings,
filterwarnings,
)
import numpy as np
import pytest
from pandas._config import get_option
from pandas.compat import is_platform_windows
from pandas.compat.pyarrow import (
pa_version_under1p0,
pa_version_under2p0,
pa_version_under5p0,
)
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.util.version import Version
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
with catch_warnings():
# `np.bool` is a deprecated alias...
filterwarnings("ignore", "`np.bool`", category=DeprecationWarning)
import fastparquet
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# TODO(ArrayManager) fastparquet relies on BlockManager internals
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET or get_option("mode.data_manager") == "array",
reason="fastparquet is not installed or ArrayManager is used",
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
elif get_option("mode.data_manager") == "array":
pytest.skip("ArrayManager is not supported with fastparquet")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
@pytest.fixture(
params=[
datetime.datetime.now(datetime.timezone.utc),
datetime.datetime.now(datetime.timezone.min),
datetime.datetime.now(datetime.timezone.max),
datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"),
datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"),
]
)
def timezone_aware_date_list(request):
return request.param
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
check_dtype=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected,
actual,
check_names=check_names,
check_like=check_like,
check_dtype=check_dtype,
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def check_partition_names(path, expected):
"""Check partitions of a parquet file are as expected.
Parameters
----------
path: str
Path of the dataset.
expected: iterable of str
Expected partition names.
"""
if pa_version_under5p0:
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == len(expected)
assert dataset.partitions.partition_names == set(expected)
else:
import pyarrow.dataset as ds
dataset = ds.dataset(path, partitioning="hive")
assert dataset.partitioning.schema.names == expected
def test_invalid_engine(df_compat):
msg = "engine must be one of 'pyarrow', 'fastparquet'"
with pytest.raises(ValueError, match=msg):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else Version(pyarrow.__version__) < Version(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else Version(fastparquet.__version__) < Version(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(request, df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc, err_msg):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc, match=err_msg):
to_parquet(df, path, engine, compression=None)
def check_external_error_on_write(self, df, engine, exc):
# check that an external library is raising the exception on writing
with tm.ensure_clean() as path:
with tm.external_error_raised(exc):
to_parquet(df, path, engine, compression=None)
@tm.network
def test_parquet_read_from_url(self, df_compat, engine):
if engine != "auto":
pytest.importorskip(engine)
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/"
"master/pandas/tests/io/data/parquet/simple.parquet"
)
df = read_parquet(url)
tm.assert_frame_equal(df, df_compat)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
msg = "to_parquet only supports IO with DataFrames"
self.check_error_on_write(obj, engine, ValueError, msg)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
msg = "parquet must have string column names"
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError, msg)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError, msg)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError, msg)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesn't round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
def test_write_column_multiindex(self, engine):
# Not able to write column multi-indexes with non-string column names.
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
msg = (
r"\s*parquet must have string column names for all values in\s*"
"each level of the MultiIndex"
)
self.check_error_on_write(df, engine, ValueError, msg)
def test_write_column_multiindex_nonstring(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Not able to write column multi-indexes with non-string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
[1, 2, 1, 2, 1, 2, 1, 2],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["Level1", "Level2"]
msg = (
r"\s*parquet must have string column names for all values in\s*"
"each level of the MultiIndex"
)
self.check_error_on_write(df, engine, ValueError, msg)
def test_write_column_multiindex_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column multi-indexes with string column names
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(np.random.randn(8, 8), columns=arrays)
df.columns.names = ["ColLevel1", "ColLevel2"]
check_round_trip(df, engine)
def test_write_column_index_string(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column indexes with string column names
arrays = ["bar", "baz", "foo", "qux"]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "StringCol"
check_round_trip(df, engine)
def test_write_column_index_nonstring(self, pa):
# GH #34777
# Not supported in fastparquet as of 0.1.3
engine = pa
# Write column indexes with string column names
arrays = [1, 2, 3, 4]
df = pd.DataFrame(np.random.randn(8, 4), columns=arrays)
df.columns.name = "NonStringCol"
msg = r"parquet must have string column names"
self.check_error_on_write(df, engine, ValueError, msg)
def test_use_nullable_dtypes(self, engine):
import pyarrow.parquet as pq
if engine == "fastparquet":
# We are manually disabling fastparquet's
# nullable dtype support pending discussion
pytest.skip("Fastparquet nullable dtype support is disabled")
table = pyarrow.table(
{
"a": pyarrow.array([1, 2, 3, None], "int64"),
"b": pyarrow.array([1, 2, 3, None], "uint8"),
"c": pyarrow.array(["a", "b", "c", None]),
"d": pyarrow.array([True, False, True, None]),
# Test that nullable dtypes used even in absence of nulls
"e": pyarrow.array([1, 2, 3, 4], "int64"),
}
)
with tm.ensure_clean() as path:
# write manually with pyarrow to write integers
pq.write_table(table, path)
result1 = read_parquet(path, engine=engine)
result2 = read_parquet(path, engine=engine, use_nullable_dtypes=True)
assert result1["a"].dtype == np.dtype("float64")
expected = pd.DataFrame(
{
"a": pd.array([1, 2, 3, None], dtype="Int64"),
"b": pd.array([1, 2, 3, None], dtype="UInt8"),
"c": pd.array(["a", "b", "c", None], dtype="string"),
"d": pd.array([True, False, True, None], dtype="boolean"),
"e": pd.array([1, 2, 3, 4], dtype="Int64"),
}
)
if engine == "fastparquet":
# Fastparquet doesn't support string columns yet
# Only int and boolean
result2 = result2.drop("c", axis=1)
expected = expected.drop("c", axis=1)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"dtype",
[
"Int64",
"UInt8",
"boolean",
"object",
"datetime64[ns, UTC]",
"float",
"period[D]",
"Float64",
"string",
],
)
def test_read_empty_array(self, pa, dtype):
# GH #41241
df = pd.DataFrame(
{
"value": pd.array([], dtype=dtype),
}
)
check_round_trip(df, pa, read_kwargs={"use_nullable_dtypes": True})
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesn't round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_to_bytes_without_path_or_buf_provided(self, pa, df_full):
# GH 37105
buf_bytes = df_full.to_parquet(engine=pa)
assert isinstance(buf_bytes, bytes)
buf_stream = BytesIO(buf_bytes)
res = read_parquet(buf_stream)
tm.assert_frame_equal(df_full, res)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
def test_unsupported(self, pa):
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_external_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame()
df["a"] = pd.Categorical(list("abcdef"))
# test for null, out-of-order values, and unobserved category
df["b"] = pd.Categorical(
["bar", "foo", "foo", "bar", None, "bar"],
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
)
# test for ordered flag
df["c"] = pd.Categorical(
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
)
check_round_trip(df, pa)
@pytest.mark.xfail(
is_platform_windows(),
reason="localhost connection rejected",
strict=False,
)
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):
s3fs = pytest.importorskip("s3fs")
s3 = s3fs.S3FileSystem(**s3so)
kw = {"filesystem": s3}
check_round_trip(
df_compat,
pa,
path="pandas-test/pyarrow.parquet",
read_kwargs=kw,
write_kwargs=kw,
)
def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
# GH #19134
s3so = {"storage_options": s3so}
check_round_trip(
df_compat,
pa,
path="s3://pandas-test/pyarrow.parquet",
read_kwargs=s3so,
write_kwargs=s3so,
)
@td.skip_if_no("s3fs") # also requires flask
@pytest.mark.parametrize(
"partition_col",
[
["A"],
[],
],
)
def test_s3_roundtrip_for_dir(
self, df_compat, s3_resource, pa, partition_col, s3so
):
# GH #26388
expected_df = df_compat.copy()
# GH #35791
# read_table uses the new Arrow Datasets API since pyarrow 1.0.0
# Previous behaviour was pyarrow partitioned columns become 'category' dtypes
# These are added to back of dataframe on read. In new API category dtype is
# only used if partition field is string, but this changed again to use
# category dtype for all types (not only strings) in pyarrow 2.0.0
if partition_col:
partition_col_type = (
"int32"
if (not pa_version_under1p0) and pa_version_under2p0
else "category"
)
expected_df[partition_col] = expected_df[partition_col].astype(
partition_col_type
)
check_round_trip(
df_compat,
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
read_kwargs={"storage_options": s3so},
write_kwargs={
"partition_cols": partition_col,
"compression": None,
"storage_options": s3so,
},
check_like=True,
repeat=1,
)
@td.skip_if_no("pyarrow")
def test_read_file_like_obj_support(self, df_compat):
buffer = BytesIO()
df_compat.to_parquet(buffer)
df_from_buf = read_parquet(buffer)
tm.assert_frame_equal(df_compat, df_from_buf)
@td.skip_if_no("pyarrow")
def test_expand_user(self, df_compat, monkeypatch):
monkeypatch.setenv("HOME", "TestingUser")
monkeypatch.setenv("USERPROFILE", "TestingUser")
with pytest.raises(OSError, match=r".*TestingUser.*"):
read_parquet("~/file.parquet")
with pytest.raises(OSError, match=r".*TestingUser.*"):
df_compat.to_parquet("~/file.parquet")
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
check_partition_names(path, partition_cols)
assert read_parquet(path).shape == df.shape
def test_partition_cols_string(self, pa, df_full):
# GH #27117
partition_cols = "bool"
partition_cols_list = [partition_cols]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
check_partition_names(path, partition_cols_list)
assert read_parquet(path).shape == df.shape
@pytest.mark.parametrize("path_type", [str, pathlib.Path])
def test_partition_cols_pathlib(self, pa, df_compat, path_type):
# GH 35902
partition_cols = "B"
partition_cols_list = [partition_cols]
df = df_compat
with tm.ensure_clean_dir() as path_str:
path = path_type(path_str)
df.to_parquet(path, partition_cols=partition_cols_list)
assert read_parquet(path).shape == df.shape
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
def test_write_with_schema(self, pa):
import pyarrow
df = pd.DataFrame({"x": [0, 1]})
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
out_df = df.astype(bool)
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
@td.skip_if_no("pyarrow")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="UInt32"),
"c": pd.Series(["a", None, "c"], dtype="string"),
}
)
check_round_trip(df, pa)
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
check_round_trip(df, pa)
@td.skip_if_no("pyarrow", min_version="1.0.0")
def test_pyarrow_backed_string_array(self, pa, string_storage):
# test ArrowStringArray supported through the __arrow_array__ protocol
df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
with pd.option_context("string_storage", string_storage):
check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
@td.skip_if_no("pyarrow")
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
df = pd.DataFrame(
{
# Arrow does not yet support struct in writing to Parquet (ARROW-1644)
# "c": pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (3, 4)]),
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
}
)
check_round_trip(df, pa)
def test_timestamp_nanoseconds(self, pa):
# with version 2.0, pyarrow defaults to writing the nanoseconds, so
# this should work without error
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
check_round_trip(df, pa, write_kwargs={"version": "2.0"})
def test_timezone_aware_index(self, pa, timezone_aware_date_list):
if not pa_version_under2p0:
# temporary skip this test until it is properly resolved
# https://github.com/pandas-dev/pandas/issues/37286
pytest.skip()
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
# see gh-36004
# compare time(zone) values only, skip their class:
# pyarrow always creates fixed offset timezones using pytz.FixedOffset()
# even if it was datetime.timezone() originally
#
# technically they are the same:
# they both implement datetime.tzinfo
# they both wrap datetime.timedelta()
# this use-case sets the resolution to 1 minute
check_round_trip(df, pa, check_dtype=False)
@td.skip_if_no("pyarrow", min_version="1.0.0")
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
df = pd.DataFrame({"a": list(range(0, 3))})
with tm.ensure_clean() as path:
df.to_parquet(path, pa)
result = read_parquet(
path, pa, filters=[("a", "==", 0)], use_legacy_dataset=False
)
assert len(result) == 1
def test_read_parquet_manager(self, pa, using_array_manager):
# ensure that read_parquet honors the pandas.options.mode.data_manager option
df = pd.DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
with tm.ensure_clean() as path:
df.to_parquet(path, pa)
result = read_parquet(path, pa)
if using_array_manager:
assert isinstance(result._mgr, pd.core.internals.ArrayManager)
else:
assert isinstance(result._mgr, pd.core.internals.BlockManager)
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
dti = dti._with_freq(None) # freq doesn't round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
msg = "Cannot create parquet dataset with duplicate column names"
self.check_error_on_write(df, fp, ValueError, msg)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
# Fastparquet bug in 0.7.1 makes it so that this dtype becomes
# float64
check_round_trip(df, fp, expected=expected, check_dtype=False)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# error from fastparquet -> don't check exact error message
self.check_error_on_write(df, fp, ValueError, None)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
msg = "Can't infer object conversion type"
self.check_error_on_write(df, fp, ValueError, msg)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so):
# GH #19134
check_round_trip(
df_compat,
fp,
path="s3://pandas-test/fastparquet.parquet",
read_kwargs={"storage_options": s3so},
write_kwargs={"compression": None, "storage_options": s3so},
)
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_cols_string(self, fp, df_full):
# GH #27117
partition_cols = "bool"
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 1
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
msg = (
"Cannot use both partition_on and partition_cols. Use partition_cols for "
"partitioning data"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame()
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_timezone_aware_index(self, fp, timezone_aware_date_list):
idx = 5 * [timezone_aware_date_list]
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
def test_use_nullable_dtypes_not_supported(self, monkeypatch, fp):
df = pd.DataFrame({"a": [1, 2]})
with tm.ensure_clean() as path:
df.to_parquet(path)
with pytest.raises(ValueError, match="not supported for the fastparquet"):
read_parquet(path, engine="fastparquet", use_nullable_dtypes=True)
|
|
# -*- coding: utf-8 -*-
"""
This code uses a loop along with our set of coupled differential equations and
matrix math to create arrays of 4-vector quaternions.
The old plotting functions need to be updated and incorperated into the end of
this code or a better visualization solution needs to be found.
"""
#------------------------------------------------------------------------------
# Importing modules and copying functions
# AKA "Setting stuff up"
#------------------------------------------------------------------------------
import numpy as np
# a set of init quaternions and the identity matrix for building general q-matrices
rm = np.identity(2)
im = np.array([[-1j,0],[0,1j]])
jm = np.array([[0,1],[-1,0]])
km = np.array([[0,-1j],[-1j,0]])
def vec_mat(v):
'''
Converts a quaternion vector into the 2x2 imaginary matrix representation
'''
return v[0]*rm + v[1]*im + v[2]*jm + v[3]*km
def mat_vec(M):
'''
Converts a 2x2 imaginary matrix quaternion into its vector representation
'''
return np.array([ M[1,1].real , M[1,1].imag , M[0,1].real , -M[0,1].imag ])
def qvecmult(vec1,vec2):
'''
Multiplies two 4-vector quaternions via matrix math
'''
return mat_vec(np.dot(vec_mat(vec1),vec_mat(vec2)))
def qmatcon(M):
'''
conjugates a 2x2 imaginary matrix quaternion
'''
return vec_mat(mat_vec(M)*np.array([1,-1,-1,-1]))
def qveccon(vec):
'''
conjugates 4-vector quaternion
'''
return vec*np.array([1,-1,-1,-1])
def qvecnorm(vec):
'''
normalizes a 4-vector quaternion
'''
return vec/np.sqrt(qvecmult(qveccon(vec),vec)[0])
def qmatnorm(M):
'''
piggy-backs off the previous function to normalize 2x2 imaginary matrices
'''
return vec_mat(qvecnorm(mat_vec(M)))
def qvecmagsqr(vec):
'''
returns the magnitude squared of a 4-vector quaternion
'''
return qvecmult(qveccon(vec),vec)[0]
def qmatmagsqr(M):
'''
piggy-backs off the previous function to give the magnitude squared of 2x2 imaginary matrix
quaternions
'''
return qvecmagsqr(mat_vec(M))
#------------------------------------------------------------------------------
# Defining the differential equations
# AKA "Bringing (first) order to the universe"
#------------------------------------------------------------------------------
def q1_dot(q1,q2,p1,p2,a):
'''
takes the current value of things that we know and calculates derivatives
Function assumes 2x2 complex matrices as inputs for q1,q2,p1,p2
a is the coupling constant
'''
return (p1 - a*np.dot(q1,np.dot(qmatcon(q2),p2))) \
/(1. - qmatmagsqr(q1)*qmatmagsqr(q2)*a**2)
def p1_dot(q1,q2,q1dot,q2dot,a,w):
'''
takes the current values of things we know and the hopefully recently
calculated derivatives of q1,q2 and uses them to find other derivatives
'''
return a*np.dot(q1dot,np.dot(qmatcon(q2dot),q2)) - q1*w**2
#------------------------------------------------------------------------------
# Defining necessary constants and initial conditions
# AKA "on the first day..."
#------------------------------------------------------------------------------
w = 1. # \omega_0 in our notation
a = 0.001 # coupling constant. \alpha in our notation
print 'alpha = ',a
seed = 8675309
np.random.seed(seed)
print 'seed =',seed
q1 = vec_mat(np.random.rand(4))
q2 = vec_mat(np.random.rand(4))
p1 = np.random.rand(4)
p2 = np.random.rand(4)
# this definition of p1 and p2 ensures the condition that the real part of
# p_dagger*q is zero at t=0
p1[0] = -qvecmult(qveccon(p1),mat_vec(q1))[0]
p2[0] = -qvecmult(qveccon(p2),mat_vec(q2))[0]
p1 = vec_mat(p1)
p2 = vec_mat(p2)
q1 = qmatnorm(q1)
q2 = qmatnorm(q2)
p1 = qmatnorm(p1)
p2 = qmatnorm(p2)
#------------------------------------------------------------------------------
# Defining loop parameters
# AKA "Configuring the space-time continuum"
#------------------------------------------------------------------------------
dt = 0.001 #time step
t = 0
print 'dt =',dt
q1a = []
q2a = []
p1a = []
p2a = []
s1a = []
s2a = []
time = []
L1 = [0,0]
L2 = [0,0]
#------------------------------------------------------------------------------
# Checking conserved quantity
# AKA "might as well..."
#------------------------------------------------------------------------------
con = [] #checking to see if our conserved quantity is actually conserved
def conserved(q1,q2,p1,p2):
return np.dot(qmatcon(p1),q1) + np.dot(qmatcon(p2),q2)
#------------------------------------------------------------------------------
# Creating the time loop
# AKA "Let 'er rip"
#------------------------------------------------------------------------------
while t<2000:
q1a.append(mat_vec(q1))
q2a.append(mat_vec(q2))
p1a.append(mat_vec(p1))
p2a.append(mat_vec(p2))
s1a.append(mat_vec(np.dot(qmatcon(p1),q1)))
s2a.append(mat_vec(np.dot(qmatcon(p2),q2)))
time.append(t)
con.append(mat_vec(conserved(q1,q2,p1,p2)))
q1d = q1_dot(q1,q2,p1,p2,a)
q2d = q1_dot(q2,q1,p2,p1,a)
p1d = p1_dot(q1,q2,q1d,q2d,a,w)
p2d = p1_dot(q2,q1,q2d,q1d,a,w)
q1 += q1d*dt
q2 += q2d*dt
p1 += p1d*dt
p2 += p2d*dt
L1[0] = L1[1]
L1[1] = np.sqrt(qmatmagsqr(p1)) - np.sqrt(qmatmagsqr(q1))
L2[0] = L2[1]
L2[1] = np.sqrt(qmatmagsqr(p2)) - np.sqrt(qmatmagsqr(q2))
if L1[0]*L1[1]<0:
q1 = qmatnorm(q1)
p1 = qmatnorm(p1)
if L2[0]*L2[1]<0:
q2 = qmatnorm(q2)
p2 = qmatnorm(p2)
t += dt
q1a = np.array(q1a)
q2a = np.array(q2a)
p1a = np.array(p1a)
p2a = np.array(p2a)
s1a = np.array(s1a)
s2a = np.array(s2a)
time = np.array(time)
con = np.array(con)
#------------------------------------------------------------------------------
# Plotting things
# AKA "Can we see it now?"
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
def vecplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing[:,0],label='Real', color = 'black')
plt.plot(time,thing[:,1],label='i', color = 'red')
plt.plot(time,thing[:,2],label='j', color = 'green')
plt.plot(time,thing[:,3],label='k', color = 'blue')
plt.legend(loc='best')
plt.xlim([time[0], time[-1]])
plt.grid()
plt.show()
def scalarplot(thing,time,name):
plt.clf()
plt.title(name)
plt.plot(time,thing,color = 'black')
plt.grid()
plt.show()
vecplot(con,time,'$p_1^{\dagger}q_1 + p_2^{\dagger}q_2$')
vecplot(q1a,time,'$q_1$')
vecplot(q2a,time,'$q_2$')
vecplot(p1a,time,'$p_1$')
vecplot(p2a,time,'$p_2$')
vecplot(s1a,time,'$p_1^{\dagger}q_1$')
vecplot(s2a,time,'$p_2^{\dagger}q_2$')
|
|
#!/usr/bin/env python
import inspect
import os
import sys
import time
import traceback
import cv2
import numpy as np
import utilscv
dir_file = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(dir_file, "../../.."))
import gabriel
import gabriel.proxy
LOG = gabriel.logging.getLogger(__name__)
MIN_MATCH_COUNT = 20
MIN_INLIER = 5
# "ALL" for comparing all image
# "ODD" for odd image
# "EVEN" for even image
CONFIG_COMP = "ALL"
# If you want to make a pile of Python, please ask ImageFeature
# If there is a case of unauthorized access to the database,
# Reconciliation of the information provided to the information provider
class ImageFeature(object):
def __init__(self, nameFile, shape, imageBinary, kp, desc):
# Nombre del fichero
self.nameFile = nameFile
# Shape de la imagen
self.shape = shape
# Datos binarios de la imagen
self.imageBinary = imageBinary
# Keypoints of the image once applied the feature detection algorithm
self.kp = kp
# Descriptores de las features detectadas
self.desc = desc
# Matching of the image of the database with the image of the webcam
self.matchingWebcam = []
# Matching the webcam with the current image of the database
self.matchingDatabase = []
# It allows to empty the calculated calculations previously, for a new image
def clearMatchingMutuos(self):
self.matchingWebcam = []
self.matchingDatabase = []
def nothing(*arg):
pass
def raw2cv_image(raw_data):
img_array = np.asarray(bytearray(raw_data), dtype=np.int8)
cv_image = cv2.imdecode(img_array, -1)
return cv_image
def print_frame():
callerframerecord = inspect.stack()[1] # 0 represents this line # 1 represents line at caller
frame = callerframerecord[0]
info = inspect.getframeinfo(frame)
# print info.filename # __FILE__ -> Test.py
# print info.function # __FUNCTION__ -> Main
# print info.lineno # __LINE__ -> 13
return info.lineno
class ObjectRecognition:
log_images_counter = 0
def __init__(self, method=4):
self.log_video_writer_created = False
self.log_video_writer = None
LOG.info("ObjectRecognition Class initializing...")
# Creating window and associated sliders, and mouse callback:
cv2.namedWindow('Features')
cv2.namedWindow('ImageDetector')
# Selection of the method to compute the features
cv2.createTrackbar('method', 'Features', 0, 4, nothing)
# Playback error for calculating inliers with RANSAC
cv2.createTrackbar('projer', 'Features', 5, 10, nothing)
# Minimum number of inliers to indicate that an object has been recognized
cv2.createTrackbar('inliers', 'Features', 20, 50, nothing)
# Trackbar to indicate whether features are painted or not
cv2.createTrackbar('drawKP', 'Features', 0, 1, nothing)
# Creation of the feature detector, according to method (only at the beginning):
# self.method = cv2.getTrackbarPos('method', 'Features')
self.method = method
self.method_str = ''
if self.method == 0:
if self.method_str != 'SIFT':
# no sift error kill
self.method_str = 'SIFT'
# The number of features has been limited to 250 for the algorithm to flow.
self.detector = cv2.xfeatures2d.SIFT_create(nfeatures=250)
elif self.method == 1:
if self.method_str != 'AKAZE':
self.method_str = 'AKAZE'
self.detector = cv2.AKAZE_create()
elif self.method == 2:
if self.method_str != 'SURF':
self.method_str = 'SURF'
self.detector = cv2.xfeatures2d.SURF_create(800)
elif self.method == 3:
if self.method_str != 'ORB':
self.method_str = 'ORB'
self.detector = self.orb = cv2.ORB_create(400)
elif self.method == 4:
if self.method_str != 'BRISK':
self.method_str = 'BRISK'
self.detector = cv2.BRISK_create()
self.dataBase = dict([('SIFT', []), ('AKAZE', []), ('SURF', []),
('ORB', []), ('BRISK', [])])
self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
self.load_models_from_directory()
# Funcion encircled the calcular, for example, the calculation of the features of the calculations,
# Load features of the imagery of the directory of "models"
def load_models_from_directory(self):
# The method returns a dictionary. The key is the features algorithm
# while the value is a list of objects of type ImageFeature
# where all the data of the features of the images of the Database are stored
config_index = 0
for imageFile in os.listdir(os.path.join(dir_file, "models")):
if CONFIG_COMP == "ODD" and config_index % 2:
config_index += 1
continue
elif CONFIG_COMP == "EVEN" and not config_index % 2:
config_index += 1
continue
config_index += 1
# The image is loaded with the OpenCV
colorImage = cv2.imread(os.path.join(dir_file, "models/" + str(imageFile)))
# We pass the grayscale image
currentImage = cv2.cvtColor(colorImage, cv2.COLOR_BGR2GRAY)
# We perform a resize of the image, so that the compared image is equal
kp, desc = self.detector.detectAndCompute(currentImage, None)
self.dataBase[self.method_str].append(ImageFeature(imageFile, currentImage.shape, colorImage, kp, desc))
return self.dataBase
# Function responsible for calculating mutual matching, but nesting loops
# It is a very slow solution because it does not take advantage of Numpy power
# We do not even put a slider to use this method as it is very slow
def find_matching_mutuos(self, method_str, desc, kp):
for key, item in enumerate(self.dataBase[method_str]):
self.dataBase[method_str][key].clearMatchingMutuos()
for i in range(len(desc)):
primerMatching = None
canditatoDataBase = None
matchingSegundo = None
candidateWebCam = None
for j in range(len(self.dataBase[method_str][key].desc)):
valorMatching = np.linalg.norm(desc[i] - self.dataBase[method_str][key].desc[j])
if (primerMatching is None or valorMatching < primerMatching):
primerMatching = valorMatching
canditatoDataBase = j
for k in range(len(desc)):
valorMatching = np.linalg.norm(self.dataBase[method_str][key].desc[canditatoDataBase] - desc[k])
if (matchingSegundo is None or valorMatching < matchingSegundo):
matchingSegundo = valorMatching
candidateWebCam = k
if not candidateWebCam is None and i == candidateWebCam:
self.dataBase[method_str][key].matchingWebcam.append(kp[i].pt)
self.dataBase[method_str][key].matchingDatabase.append(
self.dataBase[method_str][key].kp[canditatoDataBase].pt)
return self.dataBase[method_str]
# Function responsible for calculating the mutual matching of a webcam image,
# With all the images of the database. Receive as input parameter
# The database based on the method of calculation of features used
# In the image input of the webcam.
def find_matching_mutuos_optimum(self, method_str, desc, kp):
# The algorithm is repeated for each image in the database.
for key, item in enumerate(self.dataBase[method_str]):
self.dataBase[method_str][key].clearMatchingMutuos()
for i in range(len(desc)):
# The standard of the difference of
# the current descriptor, with all descriptors of the image of the database, is calculated.
# We got Without loops and making use of Numpy broadcasting,
# all distances Between the current descriptor with all descriptors of the current image
distanceListFromWebCam = np.linalg.norm(desc[i] - self.dataBase[method_str][key].desc, axis=-1)
# You get the candidate who is the shortest distance from the current descriptor
candidatoDataBase = distanceListFromWebCam.argmin()
# It is checked if the matching is mutual, that is,
# if it is true In the other direction.
# That is, it is verified that the candidateDatabase Has the current descriptor as best matching
distanceListFromDataBase = np.linalg.norm(self.dataBase[method_str][key].desc[candidatoDataBase] - desc,
axis=-1)
candidatoWebCam = distanceListFromDataBase.argmin()
# If mutual matching is fulfilled, it is stored for later processing
if abs(i - candidatoWebCam) < MIN_MATCH_COUNT:
self.dataBase[method_str][key].matchingWebcam.append(kp[i].pt)
self.dataBase[method_str][key].matchingDatabase.append(
self.dataBase[method_str][key].kp[candidatoDataBase].pt)
# For convenience they become Numpy ND-Array
self.dataBase[method_str][key].matchingWebcam = np.array(self.dataBase[method_str][key].matchingWebcam)
self.dataBase[method_str][key].matchingDatabase = np.array(self.dataBase[method_str][key].matchingDatabase)
return self.dataBase[method_str]
# This function calculates the best image based on the number of inliers
# Which has each image of the database with the image obtained from
# The web camera.
def calculate_best_image_by_num_inliers(self, method_str, projer, minInliers):
if minInliers < MIN_INLIER:
minInliers = MIN_INLIER
bestIndex = None
bestMask = None
numInliers = 0
# For each of the images
for index, imgWithMatching in enumerate(self.dataBase[method_str]):
# The RANSAC algorithm is computed to calculate the number of inliers
try:
_, mask = cv2.findHomography(imgWithMatching.matchingDatabase,
imgWithMatching.matchingWebcam, cv2.RANSAC, projer)
except Exception as e:
LOG.info("Line: %s :: Error: %s", print_frame(), str(e))
mask = None
if not mask is None:
# It is checked, from the mask the number of inliers.
# If the number of inliers is greater than the minimum number of inliers,
# and is a maximum (it has more inliers than the previous image)
# then it is considered to be the image that matches the object stored in the database.
countNonZero = np.count_nonzero(mask)
if countNonZero >= minInliers and countNonZero > numInliers:
numInliers = countNonZero
bestIndex = index
bestMask = (mask >= 1).reshape(-1)
# If an image has been obtained as the best image and therefore must have a minimum number of inlers,
# then finally the keypoints that are inliers are calculated
# from the mask obtained in findHomography and returned as the best image.
LOG.info("Line: %s :: bestIndex: %s, bestMask: %s", print_frame(), bestIndex, bestMask)
if not bestIndex is None:
bestImage = self.dataBase[method_str][bestIndex]
inliersWebCam = bestImage.matchingWebcam[bestMask]
inliersDataBase = bestImage.matchingDatabase[bestMask]
return bestImage, inliersWebCam, inliersDataBase
return None, None, None
# This function calculates the affinity matrix A, paints a rectangle around
# Of the detected object and paints in a new window the image of the database
# Corresponding to the recognized object.
def calculateAffinityMatrixAndDraw(self, bestImage, inliersDataBase, inliersWebCam, imgout):
# The affinity matrix A
A = cv2.estimateRigidTransform(inliersDataBase, inliersWebCam, fullAffine=True)
A = np.vstack((A, [0, 0, 1]))
# Calculate the points of the rectangle occupied by the recognized object
a = np.array([0, 0, 1], np.float)
b = np.array([bestImage.shape[1], 0, 1], np.float)
c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
d = np.array([0, bestImage.shape[0], 1], np.float)
centro = np.array([float(bestImage.shape[0]) / 2,
float(bestImage.shape[1]) / 2, 1], np.float)
# Multiply the points of the virtual space, to convert them into Real image points
a = np.dot(A, a)
b = np.dot(A, b)
c = np.dot(A, c)
d = np.dot(A, d)
centro = np.dot(A, centro)
# The points are dehomogenized
areal = (int(a[0] / a[2]), int(a[1] / b[2]))
breal = (int(b[0] / b[2]), int(b[1] / b[2]))
creal = (int(c[0] / c[2]), int(c[1] / c[2]))
dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
centroreal = (int(centro[0] / centro[2]), int(centro[1] / centro[2]))
# The polygon and the file name of the image are painted in the center of the polygon
points = np.array([areal, breal, creal, dreal], np.int32)
cv2.polylines(imgout, np.int32([points]), 1, (255, 255, 255), thickness=2)
utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())
# The detected object is displayed in a separate window
# self.display_image('ImageDetector', bestImage.imageBinary, resize_max=640, wait_time=1)
def debug_save_image(self, image_data):
if gabriel.Debug.SAVE_IMAGES:
self.log_images_counter += 1
with open(os.path.join(gabriel.Const.LOG_IMAGES_PATH,
"frame-" + gabriel.util.add_preceding_zeros(self.log_images_counter) + ".jpeg"),
"w") as f:
f.write(image_data)
if gabriel.Debug.SAVE_VIDEO:
import cv2
img_array = np.asarray(bytearray(image_data), dtype=np.int8)
cv_image = cv2.imdecode(img_array, -1)
if not self.log_video_writer_created:
self.log_video_writer_created = True
self.log_video_writer = cv2.VideoWriter(gabriel.Const.LOG_VIDEO_PATH,
cv2.VideoWriter_fourcc(*'XVID'), 10,
(cv_image.shape[1], cv_image.shape[0]))
self.log_video_writer.write(cv_image)
def display_image(self, display_name, img, wait_time=-1, is_resize=True, resize_method="max", resize_max=-1,
resize_scale=1, save_image=False):
'''
Display image at appropriate size. There are two ways to specify the size:
1. If resize_max is greater than zero, the longer edge (either width or height) of the image is set to this value
2. If resize_scale is greater than zero, the image is scaled by this factor
'''
if is_resize:
img_shape = img.shape
height = img_shape[0];
width = img_shape[1]
if resize_max > 0:
if height > width:
img_display = cv2.resize(img, (resize_max * width / height, resize_max),
interpolation=cv2.INTER_NEAREST)
else:
img_display = cv2.resize(img, (resize_max, resize_max * height / width),
interpolation=cv2.INTER_NEAREST)
elif resize_scale > 0:
img_display = cv2.resize(img, (width * resize_scale, height * resize_scale),
interpolation=cv2.INTER_NEAREST)
else:
LOG.info("Unexpected parameter in image display. About to exit...")
else:
img_display = img
if save_image:
self.debug_save_image(img_display)
cv2.imshow(display_name, img_display)
cv2.waitKey(wait_time)
def recognize_image(self, image_data):
"""
Ref can be found in
1. https://stackoverflow.com/a/41122349/2049763
2. https://stackoverflow.com/q/37716120/2049763
:param image_data:
:return: computed image
"""
kp, desc = self.orb.detectAndCompute(image_data, None)
try:
for imageFeature in self.dataBase["ORB"]:
# Match descriptors.
matches = self.bf.match(imageFeature.desc, desc)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance) # compute the descriptors with ORB
if len(matches) > MIN_MATCH_COUNT:
# Draw first 10 matches.
image_data = cv2.drawMatches(imageFeature.imageBinary, imageFeature.kp, image_data, kp,
matches[:MIN_MATCH_COUNT], None, flags=2)
except Exception as e:
LOG.info("%s\n" % str(e))
# self.display_image('input', image_data, resize_max=640, wait_time=1)
return image_data
def detect_image(self, image_data):
"""
Ref can be found in
1. https://stackoverflow.com/a/41122349/2049763
2. https://stackoverflow.com/q/37716120/2049763
:param image_data:
:return: computed image
"""
kp, desc = self.orb.detectAndCompute(image_data, None)
MAX_MATCH_COUNT = MIN_MATCH_COUNT
MAX_MATCH = None
imageFeature = None
matchesMask = False
try:
for imageFeatures in self.dataBase["ORB"]:
matches = self.bf.match(imageFeatures.desc, desc)
matches = sorted(matches, key=lambda x: x.distance) # compute the descriptors with ORB
if len(matches) > MAX_MATCH_COUNT:
MAX_MATCH_COUNT = len(matches)
MAX_MATCH = matches
imageFeature = imageFeatures
if not matchesMask:
matchesMask = True
if matchesMask:
LOG.info("Match found ! !! !!!")
image_data = cv2.drawMatches(imageFeature.imageBinary, imageFeature.kp, image_data, kp,
MAX_MATCH[:MIN_MATCH_COUNT],
None, flags=2)
else:
LOG.info("No match found")
except Exception as e:
LOG.info("%s\n" % str(e))
# self.display_image('input', image, resize_max=640, wait_time=1)
return image_data
def p4_object_recog(self, frame):
try:
t1 = time.time()
image_in = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
image_out = frame.copy()
kp, desc = self.detector.detectAndCompute(image_in, None)
selectedDataBase = self.dataBase[self.method_str]
if len(selectedDataBase) > 0:
# We perform the mutual matching
imgsMatchingMutuos = self.find_matching_mutuos_optimum(self.method_str, desc, kp)
minInliers = int(cv2.getTrackbarPos('inliers', 'Features'))
projer = float(cv2.getTrackbarPos('projer', 'Features'))
LOG.info("Line: %s :: minInliers: %s, projer: %s", print_frame(), minInliers, projer)
# The best image is calculated based on the number of inliers.
# The best image is one that has more number of inliers,
# but always exceeding the minimum that is indicated in the trackbar 'minInliers'
bestImage, inliersWebCam, inliersDataBase = self.calculate_best_image_by_num_inliers(self.method_str,
projer, minInliers)
if not bestImage is None:
# If we find a good image, we calculate the affinity matrix
# and paint the recognized object on the screen.
self.calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam, image_out)
# Get descriptor dimension of each feature:
if desc is not None:
if len(desc) > 0:
dim = len(desc[0])
else:
dim = -1
# We draw features, and write informative text about the image
# Only the features are dibuban if the slider indicates it
if (int(cv2.getTrackbarPos('drawKP', 'Features')) > 0):
cv2.drawKeypoints(image_out, kp, image_out,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
t1 = 1000 * (time.time() - t1)
utilscv.draw_str(image_out, (20, 20),
"Method {0}, {1} features found, desc. dim. = {2} ".
format(self.method_str, len(kp), dim))
utilscv.draw_str(image_out, (20, 40), "Time (ms): {0}".format(str(t1)))
# Show results and check keys:
# self.display_image('Features', imgage_out, resize_max=640, wait_time=1)
return image_out
except Exception as e:
LOG.info("* ** p4_object_recog Error: %s\n" % str(e))
traceback.print_exc()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrganizationDocument.created'
db.add_column(u'organizations_organizationdocument', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'OrganizationDocument.updated'
db.add_column(u'organizations_organizationdocument', 'updated',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'OrganizationDocument.deleted'
db.add_column(u'organizations_organizationdocument', 'deleted',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'OrganizationMember.created'
db.add_column(u'organizations_organizationmember', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'OrganizationMember.updated'
db.add_column(u'organizations_organizationmember', 'updated',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True),
keep_default=False)
# Adding field 'OrganizationMember.deleted'
db.add_column(u'organizations_organizationmember', 'deleted',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OrganizationDocument.created'
db.delete_column(u'organizations_organizationdocument', 'created')
# Deleting field 'OrganizationDocument.updated'
db.delete_column(u'organizations_organizationdocument', 'updated')
# Deleting field 'OrganizationDocument.deleted'
db.delete_column(u'organizations_organizationdocument', 'deleted')
# Deleting field 'OrganizationMember.created'
db.delete_column(u'organizations_organizationmember', 'created')
# Deleting field 'OrganizationMember.updated'
db.delete_column(u'organizations_organizationmember', 'updated')
# Deleting field 'OrganizationMember.deleted'
db.delete_column(u'organizations_organizationmember', 'deleted')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
u'members.member': {
'Meta': {'object_name': 'Member'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organizations.organizationdocument': {
'Meta': {'object_name': 'OrganizationDocument'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['members.Member']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['organizations.Organization']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'organizations.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': u"orm['organizations.Organization']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['members.Member']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['organizations']
|
|
"""The tests for the Template light platform."""
import logging
import pytest
from homeassistant import setup
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
)
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.core import callback
from tests.common import assert_setup_component, get_test_home_assistant
from tests.components.light import common
_LOGGER = logging.getLogger(__name__)
# Represent for light's availability
_STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state"
class TestTemplateLight:
"""Test the Template light."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls."""
self.calls.append(service)
self.hass.services.register("test", "automation", record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_invalid(self):
"""Test template state with render error."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{states.test['big.fat...']}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{ states.light.test_state.state }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set("light.test_state", STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_ON
state = self.hass.states.set("light.test_state", STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
@pytest.mark.parametrize(
"expected_state,template",
[(STATE_ON, "{{ 1 == 1 }}"), (STATE_OFF, "{{ 1 == 2 }}")],
)
def test_template_state_boolean(self, expected_state, template):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": template,
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == expected_state
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{%- if false -%}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"bad name here": {
"value_template": "{{ 1== 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_light_does_not_create(self):
"""Test invalid light."""
with assert_setup_component(0, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"switches": {"test_template_light": "Invalid"},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_lights_does_not_create(self):
"""Test if there are no lights no creation."""
with assert_setup_component(0, "light"):
assert setup.setup_component(
self.hass, "light", {"light": {"platform": "template"}}
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
@pytest.mark.parametrize(
"missing_key, count", [("value_template", 1), ("turn_on", 0), ("turn_off", 0)]
)
def test_missing_key(self, missing_key, count):
"""Test missing template."""
light = {
"light": {
"platform": "template",
"lights": {
"light_one": {
"value_template": "{{ 1== 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
}
del light["light"]["lights"]["light_one"][missing_key]
with assert_setup_component(count, "light"):
assert setup.setup_component(self.hass, "light", light)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
if count:
assert self.hass.states.all() != []
else:
assert self.hass.states.all() == []
def test_on_action(self):
"""Test on action."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{states.light.test_state.state}}",
"turn_on": {"service": "test.automation"},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
self.hass.states.set("light.test_state", STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
common.turn_on(self.hass, "light.test_template_light")
self.hass.block_till_done()
assert len(self.calls) == 1
def test_on_action_optimistic(self):
"""Test on action with optimistic state."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"turn_on": {"service": "test.automation"},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
self.hass.states.set("light.test_state", STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
common.turn_on(self.hass, "light.test_template_light")
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert len(self.calls) == 1
assert state.state == STATE_ON
def test_off_action(self):
"""Test off action."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{states.light.test_state.state}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {"service": "test.automation"},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
self.hass.states.set("light.test_state", STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_ON
common.turn_off(self.hass, "light.test_template_light")
self.hass.block_till_done()
assert len(self.calls) == 1
def test_off_action_optimistic(self):
"""Test off action with optimistic state."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {"service": "test.automation"},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
common.turn_off(self.hass, "light.test_template_light")
self.hass.block_till_done()
assert len(self.calls) == 1
state = self.hass.states.get("light.test_template_light")
assert state.state == STATE_OFF
def test_white_value_action_no_template(self):
"""Test setting white value with optimistic template."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{1 == 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_white_value": {
"service": "test.automation",
"data_template": {
"entity_id": "test.test_state",
"white_value": "{{white_value}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("white_value") is None
common.turn_on(
self.hass, "light.test_template_light", **{ATTR_WHITE_VALUE: 124}
)
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data["white_value"] == 124
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("white_value") == 124
@pytest.mark.parametrize(
"expected_white_value,template",
[
(255, "{{255}}"),
(None, "{{256}}"),
(None, "{{x - 12}}"),
(None, "{{ none }}"),
(None, ""),
],
)
def test_white_value_template(self, expected_white_value, template):
"""Test the template for the white value."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_white_value": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"white_value": "{{white_value}}",
},
},
"white_value_template": template,
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("white_value") == expected_white_value
def test_level_action_no_template(self):
"""Test setting brightness with optimistic template."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{1 == 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "test.automation",
"data_template": {
"entity_id": "test.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("brightness") is None
common.turn_on(self.hass, "light.test_template_light", **{ATTR_BRIGHTNESS: 124})
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data["brightness"] == 124
state = self.hass.states.get("light.test_template_light")
_LOGGER.info(str(state.attributes))
assert state is not None
assert state.attributes.get("brightness") == 124
@pytest.mark.parametrize(
"expected_level,template",
[
(255, "{{255}}"),
(None, "{{256}}"),
(None, "{{x - 12}}"),
(None, "{{ none }}"),
(None, ""),
],
)
def test_level_template(self, expected_level, template):
"""Test the template for the level."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
"level_template": template,
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("brightness") == expected_level
@pytest.mark.parametrize(
"expected_temp,template",
[
(500, "{{500}}"),
(None, "{{501}}"),
(None, "{{x - 12}}"),
(None, "None"),
(None, "{{ none }}"),
(None, ""),
],
)
def test_temperature_template(self, expected_temp, template):
"""Test the template for the temperature."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_temperature": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"color_temp": "{{color_temp}}",
},
},
"temperature_template": template,
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("color_temp") == expected_temp
def test_temperature_action_no_template(self):
"""Test setting temperature with optimistic template."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{1 == 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_temperature": {
"service": "test.automation",
"data_template": {
"entity_id": "test.test_state",
"color_temp": "{{color_temp}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("color_template") is None
common.turn_on(self.hass, "light.test_template_light", **{ATTR_COLOR_TEMP: 345})
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data["color_temp"] == 345
state = self.hass.states.get("light.test_template_light")
_LOGGER.info(str(state.attributes))
assert state is not None
assert state.attributes.get("color_temp") == 345
def test_friendly_name(self):
"""Test the accessibility of the friendly_name attribute."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"friendly_name": "Template light",
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("friendly_name") == "Template light"
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"friendly_name": "Template light",
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
"icon_template": "{% if states.light.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("icon") == ""
state = self.hass.states.set("light.test_state", STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes["icon"] == "mdi:check"
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"friendly_name": "Template light",
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
"entity_picture_template": "{% if states.light.test_state.state %}"
"/local/light.png"
"{% endif %}",
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("entity_picture") == ""
state = self.hass.states.set("light.test_state", STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes["entity_picture"] == "/local/light.png"
def test_color_action_no_template(self):
"""Test setting color with optimistic template."""
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{1 == 1}}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_color": [
{
"service": "test.automation",
"data_template": {
"entity_id": "test.test_state",
"h": "{{h}}",
"s": "{{s}}",
},
},
{
"service": "test.automation",
"data_template": {
"entity_id": "test.test_state",
"s": "{{s}}",
"h": "{{h}}",
},
},
],
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state.attributes.get("hs_color") is None
common.turn_on(
self.hass, "light.test_template_light", **{ATTR_HS_COLOR: (40, 50)}
)
self.hass.block_till_done()
assert len(self.calls) == 2
assert self.calls[0].data["h"] == 40
assert self.calls[0].data["s"] == 50
assert self.calls[1].data["h"] == 40
assert self.calls[1].data["s"] == 50
state = self.hass.states.get("light.test_template_light")
_LOGGER.info(str(state.attributes))
assert state is not None
assert self.calls[0].data["h"] == 40
assert self.calls[0].data["s"] == 50
assert self.calls[1].data["h"] == 40
assert self.calls[1].data["s"] == 50
@pytest.mark.parametrize(
"expected_hs,template",
[
((360, 100), "{{(360, 100)}}"),
((359.9, 99.9), "{{(359.9, 99.9)}}"),
(None, "{{(361, 100)}}"),
(None, "{{(360, 101)}}"),
(None, "{{x - 12}}"),
(None, ""),
(None, "{{ none }}"),
],
)
def test_color_template(self, expected_hs, template):
"""Test the template for the color."""
with assert_setup_component(1, "light"):
assert setup.setup_component(
self.hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"value_template": "{{ 1 == 1 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_color": [
{
"service": "input_number.set_value",
"data_template": {
"entity_id": "input_number.h",
"color_temp": "{{h}}",
},
}
],
"color_template": template,
}
},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get("light.test_template_light")
assert state is not None
assert state.attributes.get("hs_color") == expected_hs
async def test_available_template_with_entities(hass):
"""Test availability templates with values from other entities."""
await setup.async_setup_component(
hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"availability_template": "{{ is_state('availability_boolean.state', 'on') }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
# When template returns true..
hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON)
await hass.async_block_till_done()
# Device State should not be unavailable
assert hass.states.get("light.test_template_light").state != STATE_UNAVAILABLE
# When Availability template returns false
hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF)
await hass.async_block_till_done()
# device state should be unavailable
assert hass.states.get("light.test_template_light").state == STATE_UNAVAILABLE
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
await setup.async_setup_component(
hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light": {
"availability_template": "{{ x - 12 }}",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
"set_level": {
"service": "light.turn_on",
"data_template": {
"entity_id": "light.test_state",
"brightness": "{{brightness}}",
},
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("light.test_template_light").state != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_unique_id(hass):
"""Test unique_id option only creates one light per id."""
await setup.async_setup_component(
hass,
"light",
{
"light": {
"platform": "template",
"lights": {
"test_template_light_01": {
"unique_id": "not-so-unique-anymore",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
},
"test_template_light_02": {
"unique_id": "not-so-unique-anymore",
"turn_on": {
"service": "light.turn_on",
"entity_id": "light.test_state",
},
"turn_off": {
"service": "light.turn_off",
"entity_id": "light.test_state",
},
},
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
|
|
# -*- coding: utf-8 -*-
# File: conv2d.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..tfutils.common import get_tf_version_tuple
from ..utils.argtools import get_data_format, shape2d, shape4d, log_once
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
__all__ = ['Conv2D', 'Deconv2D', 'Conv2DTranspose']
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2D(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
split=1):
"""
Similar to `tf.layers.Conv2D`, but with some differences:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'.
3. Support 'split' argument to do group convolution.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
dilation_rate = shape2d(dilation_rate)
if split == 1 and dilation_rate == [1, 1]:
# tf.layers.Conv2D has bugs with dilations (https://github.com/tensorflow/tensorflow/issues/26797)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# group conv implementation
data_format = get_data_format(data_format, keras_mode=False)
in_shape = inputs.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0, in_channel
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Not supported by group conv or dilated conv!"
out_channel = filters
assert out_channel % split == 0, out_channel
assert dilation_rate == [1, 1] or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for dilated conv.'
kernel_shape = shape2d(kernel_size)
filter_shape = kernel_shape + [in_channel // split, out_channel]
stride = shape4d(strides, data_format=data_format)
kwargs = {"data_format": data_format}
if get_tf_version_tuple() >= (1, 5):
kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)
# matching input dtype (ex. tf.float16) since the default dtype of variable if tf.float32
inputs_dtype = inputs.dtype
W = tf.get_variable(
'W', filter_shape, dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [out_channel], dtype=inputs_dtype, initializer=bias_initializer)
if split == 1:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
else:
conv = None
if get_tf_version_tuple() >= (1, 13):
try:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
except ValueError:
log_once("CUDNN group convolution support is only available with "
"https://github.com/tensorflow/tensorflow/pull/25818 . "
"Will fall back to a loop-based slow implementation instead!", 'warn')
if conv is None:
inputs = tf.split(inputs, split, channel_axis)
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size', 'strides'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2DTranspose(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
A wrapper around `tf.layers.Conv2DTranspose`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
if get_tf_version_tuple() <= (1, 12):
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# Our own implementation, to avoid Keras bugs. https://github.com/tensorflow/tensorflow/issues/25946
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Unsupported arguments due to Keras bug in TensorFlow 1.13"
data_format = get_data_format(data_format, keras_mode=False)
shape_dyn = tf.shape(inputs)
shape_sta = inputs.shape.as_list()
strides2d = shape2d(strides)
kernel_shape = shape2d(kernel_size)
assert padding.lower() in ['valid', 'same'], "Padding {} is not supported!".format(padding)
if padding.lower() == 'valid':
shape_res2d = [max(kernel_shape[0] - strides2d[0], 0),
max(kernel_shape[1] - strides2d[1], 0)]
else:
shape_res2d = shape2d(0)
if data_format == 'NCHW':
channels_in = shape_sta[1]
out_shape_dyn = tf.stack(
[shape_dyn[0], filters,
shape_dyn[2] * strides2d[0] + shape_res2d[0],
shape_dyn[3] * strides2d[1] + shape_res2d[1]])
out_shape3_sta = [filters,
None if shape_sta[2] is None else shape_sta[2] * strides2d[0] + shape_res2d[0],
None if shape_sta[3] is None else shape_sta[3] * strides2d[1] + shape_res2d[1]]
else:
channels_in = shape_sta[-1]
out_shape_dyn = tf.stack(
[shape_dyn[0],
shape_dyn[1] * strides2d[0] + shape_res2d[0],
shape_dyn[2] * strides2d[1] + shape_res2d[1],
filters])
out_shape3_sta = [None if shape_sta[1] is None else shape_sta[1] * strides2d[0] + shape_res2d[0],
None if shape_sta[2] is None else shape_sta[2] * strides2d[1] + shape_res2d[1],
filters]
inputs_dtype = inputs.dtype
W = tf.get_variable('W', kernel_shape + [filters, channels_in],
dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [filters], dtype=inputs_dtype, initializer=bias_initializer)
conv = tf.nn.conv2d_transpose(
inputs, W, out_shape_dyn,
shape4d(strides, data_format=data_format),
padding=padding.upper(),
data_format=data_format)
conv.set_shape(tf.TensorShape([shape_sta[0]] + out_shape3_sta))
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
Deconv2D = Conv2DTranspose
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Xcode supports build variable substitutions and CPP; sadly, that doesn't work
# because:
#
# 1. Xcode wants to do the Info.plist work before it runs any build phases,
# this means if we were to generate a .h file for INFOPLIST_PREFIX_HEADER
# we'd have to put it in another target so it runs in time.
# 2. Xcode also doesn't check to see if the header being used as a prefix for
# the Info.plist has changed. So even if we updated it, it's only looking
# at the modtime of the info.plist to see if that's changed.
#
# So, we work around all of this by making a script build phase that will run
# during the app build, and simply update the info.plist in place. This way
# by the time the app target is done, the info.plist is correct.
#
import optparse
import os
from os import environ as env
import plistlib
import re
import subprocess
import sys
import tempfile
TOP = os.path.join(env['SRCROOT'], '..')
def _GetOutput(args):
"""Runs a subprocess and waits for termination. Returns (stdout, returncode)
of the process. stderr is attached to the parent."""
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _GetOutputNoError(args):
"""Similar to _GetOutput() but ignores stderr. If there's an error launching
the child (like file not found), the exception will be caught and (None, 1)
will be returned to mimic quiet failure."""
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return (None, 1)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _RemoveKeys(plist, *keys):
"""Removes a varargs of keys from the plist."""
for key in keys:
try:
del plist[key]
except KeyError:
pass
def _AddVersionKeys(plist, version=None):
"""Adds the product version number into the plist. Returns True on success and
False on error. The error will be printed to stderr."""
if version:
match = re.match('\d+\.\d+\.(\d+\.\d+)$', version)
if not match:
print >>sys.stderr, 'Invalid version string specified: "%s"' % version
return False
full_version = match.group(0)
bundle_version = match.group(1)
else:
# Pull in the Chrome version number.
VERSION_TOOL = os.path.join(TOP, 'chrome/tools/build/version.py')
VERSION_FILE = os.path.join(TOP, 'chrome/VERSION')
(stdout, retval1) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@MAJOR@.@MINOR@.@BUILD@.@PATCH@'])
full_version = stdout.rstrip()
(stdout, retval2) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@BUILD@.@PATCH@'])
bundle_version = stdout.rstrip()
# If either of the two version commands finished with non-zero returncode,
# report the error up.
if retval1 or retval2:
return False
# Add public version info so "Get Info" works.
plist['CFBundleShortVersionString'] = full_version
# Honor the 429496.72.95 limit. The maximum comes from splitting 2^32 - 1
# into 6, 2, 2 digits. The limitation was present in Tiger, but it could
# have been fixed in later OS release, but hasn't been tested (it's easy
# enough to find out with "lsregister -dump).
# http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html
# BUILD will always be an increasing value, so BUILD_PATH gives us something
# unique that meetings what LS wants.
plist['CFBundleVersion'] = bundle_version
# Return with no error.
return True
def _DoSCMKeys(plist, add_keys):
"""Adds the SCM information, visible in about:version, to property list. If
|add_keys| is True, it will insert the keys, otherwise it will remove them."""
scm_revision = None
if add_keys:
# Pull in the Chrome revision number.
VERSION_TOOL = os.path.join(TOP, 'chrome/tools/build/version.py')
LASTCHANGE_FILE = os.path.join(TOP, 'build/util/LASTCHANGE')
(stdout, retval) = _GetOutput([VERSION_TOOL, '-f', LASTCHANGE_FILE, '-t',
'@LASTCHANGE@'])
if retval:
return False
scm_revision = stdout.rstrip()
# See if the operation failed.
_RemoveKeys(plist, 'SCMRevision')
if scm_revision != None:
plist['SCMRevision'] = scm_revision
elif add_keys:
print >>sys.stderr, 'Could not determine SCM revision. This may be OK.'
return True
def _DoPDFKeys(plist, add_keys):
"""Adds PDF support to the document types list. If add_keys is True, it will
add the type information dictionary. If it is False, it will remove it if
present."""
PDF_FILE_EXTENSION = 'pdf'
def __AddPDFKeys(sub_plist):
"""Writes the keys into a sub-dictionary of the plist."""
sub_plist['CFBundleTypeExtensions'] = [PDF_FILE_EXTENSION]
sub_plist['CFBundleTypeIconFile'] = 'document.icns'
sub_plist['CFBundleTypeMIMETypes'] = 'application/pdf'
sub_plist['CFBundleTypeName'] = 'PDF Document'
sub_plist['CFBundleTypeRole'] = 'Viewer'
DOCUMENT_TYPES_KEY = 'CFBundleDocumentTypes'
# First get the list of document types, creating it if necessary.
try:
extensions = plist[DOCUMENT_TYPES_KEY]
except KeyError:
# If this plist doesn't have a type dictionary, create one if set to add the
# keys. If not, bail.
if not add_keys:
return
extensions = plist[DOCUMENT_TYPES_KEY] = []
# Loop over each entry in the list, looking for one that handles PDF types.
for i, ext in enumerate(extensions):
# If an entry for .pdf files is found...
if 'CFBundleTypeExtensions' not in ext:
continue
if PDF_FILE_EXTENSION in ext['CFBundleTypeExtensions']:
if add_keys:
# Overwrite the existing keys with new ones.
__AddPDFKeys(ext)
else:
# Otherwise, delete the entry entirely.
del extensions[i]
return
# No PDF entry exists. If one needs to be added, do so now.
if add_keys:
pdf_entry = {}
__AddPDFKeys(pdf_entry)
extensions.append(pdf_entry)
def _AddBreakpadKeys(plist, branding):
"""Adds the Breakpad keys. This must be called AFTER _AddVersionKeys() and
also requires the |branding| argument."""
plist['BreakpadReportInterval'] = '3600' # Deliberately a string.
plist['BreakpadProduct'] = '%s_Mac' % branding
plist['BreakpadProductDisplay'] = branding
plist['BreakpadVersion'] = plist['CFBundleShortVersionString']
# These are both deliberately strings and not boolean.
plist['BreakpadSendAndExit'] = 'YES'
plist['BreakpadSkipConfirm'] = 'YES'
def _RemoveBreakpadKeys(plist):
"""Removes any set Breakpad keys."""
_RemoveKeys(plist,
'BreakpadURL',
'BreakpadReportInterval',
'BreakpadProduct',
'BreakpadProductDisplay',
'BreakpadVersion',
'BreakpadSendAndExit',
'BreakpadSkipConfirm')
def _TagSuffixes():
# Keep this list sorted in the order that tag suffix components are to
# appear in a tag value. That is to say, it should be sorted per ASCII.
components = ('32bit', 'full')
assert tuple(sorted(components)) == components
components_len = len(components)
combinations = 1 << components_len
tag_suffixes = []
for combination in xrange(0, combinations):
tag_suffix = ''
for component_index in xrange(0, components_len):
if combination & (1 << component_index):
tag_suffix += '-' + components[component_index]
tag_suffixes.append(tag_suffix)
return tag_suffixes
def _AddKeystoneKeys(plist, bundle_identifier):
"""Adds the Keystone keys. This must be called AFTER _AddVersionKeys() and
also requires the |bundle_identifier| argument (com.example.product)."""
plist['KSVersion'] = plist['CFBundleShortVersionString']
plist['KSProductID'] = bundle_identifier
plist['KSUpdateURL'] = 'https://tools.google.com/service/update2'
_RemoveKeys(plist, 'KSChannelID')
for tag_suffix in _TagSuffixes():
if tag_suffix:
plist['KSChannelID' + tag_suffix] = tag_suffix
def _RemoveKeystoneKeys(plist):
"""Removes any set Keystone keys."""
_RemoveKeys(plist,
'KSVersion',
'KSProductID',
'KSUpdateURL')
tag_keys = []
for tag_suffix in _TagSuffixes():
tag_keys.append('KSChannelID' + tag_suffix)
_RemoveKeys(plist, *tag_keys)
def Main(argv):
parser = optparse.OptionParser('%prog [options]')
parser.add_option('--breakpad', dest='use_breakpad', action='store',
type='int', default=False, help='Enable Breakpad [1 or 0]')
parser.add_option('--breakpad_uploads', dest='breakpad_uploads',
action='store', type='int', default=False,
help='Enable Breakpad\'s uploading of crash dumps [1 or 0]')
parser.add_option('--keystone', dest='use_keystone', action='store',
type='int', default=False, help='Enable Keystone [1 or 0]')
parser.add_option('--scm', dest='add_scm_info', action='store', type='int',
default=True, help='Add SCM metadata [1 or 0]')
parser.add_option('--pdf', dest='add_pdf_support', action='store', type='int',
default=False, help='Add PDF file handler support [1 or 0]')
parser.add_option('--branding', dest='branding', action='store',
type='string', default=None, help='The branding of the binary')
parser.add_option('--bundle_id', dest='bundle_identifier',
action='store', type='string', default=None,
help='The bundle id of the binary')
parser.add_option('--version', dest='version', action='store', type='string',
default=None, help='The version string [major.minor.build.patch]')
(options, args) = parser.parse_args(argv)
if len(args) > 0:
print >>sys.stderr, parser.get_usage()
return 1
# Read the plist into its parsed format.
DEST_INFO_PLIST = os.path.join(env['TARGET_BUILD_DIR'], env['INFOPLIST_PATH'])
plist = plistlib.readPlist(DEST_INFO_PLIST)
# Insert the product version.
if not _AddVersionKeys(plist, version=options.version):
return 2
# Add Breakpad if configured to do so.
if options.use_breakpad:
if options.branding is None:
print >>sys.stderr, 'Use of Breakpad requires branding.'
return 1
_AddBreakpadKeys(plist, options.branding)
if options.breakpad_uploads:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/report'
else:
# This allows crash dumping to a file without uploading the
# dump, for testing purposes. Breakpad does not recognise
# "none" as a special value, but this does stop crash dump
# uploading from happening. We need to specify something
# because if "BreakpadURL" is not present, Breakpad will not
# register its crash handler and no crash dumping will occur.
plist['BreakpadURL'] = 'none'
else:
_RemoveBreakpadKeys(plist)
# Only add Keystone in Release builds.
if options.use_keystone and env['CONFIGURATION'] == 'Release':
if options.bundle_identifier is None:
print >>sys.stderr, 'Use of Keystone requires the bundle id.'
return 1
_AddKeystoneKeys(plist, options.bundle_identifier)
else:
_RemoveKeystoneKeys(plist)
# Adds or removes any SCM keys.
if not _DoSCMKeys(plist, options.add_scm_info):
return 3
# Adds or removes the PDF file handler entry.
_DoPDFKeys(plist, options.add_pdf_support)
# Now that all keys have been mutated, rewrite the file.
temp_info_plist = tempfile.NamedTemporaryFile()
plistlib.writePlist(plist, temp_info_plist.name)
# Info.plist will work perfectly well in any plist format, but traditionally
# applications use xml1 for this, so convert it to ensure that it's valid.
proc = subprocess.Popen(['plutil', '-convert', 'xml1', '-o', DEST_INFO_PLIST,
temp_info_plist.name])
proc.wait()
return proc.returncode
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
|
"""The tests for the MQTT statestream component."""
import homeassistant.components.mqtt_statestream as statestream
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY, call
from tests.common import mock_state_change_event
async def add_statestream(
hass,
base_topic=None,
publish_attributes=None,
publish_timestamps=None,
publish_include=None,
publish_exclude=None,
):
"""Add a mqtt_statestream component."""
config = {}
if base_topic:
config["base_topic"] = base_topic
if publish_attributes:
config["publish_attributes"] = publish_attributes
if publish_timestamps:
config["publish_timestamps"] = publish_timestamps
if publish_include:
config["include"] = publish_include
if publish_exclude:
config["exclude"] = publish_exclude
return await async_setup_component(
hass, statestream.DOMAIN, {statestream.DOMAIN: config}
)
async def test_fails_with_no_base(hass, mqtt_mock):
"""Setup should fail if no base_topic is set."""
assert await add_statestream(hass) is False
async def test_setup_succeeds_without_attributes(hass, mqtt_mock):
"""Test the success of the setup with a valid base_topic."""
assert await add_statestream(hass, base_topic="pub")
async def test_setup_succeeds_with_attributes(hass, mqtt_mock):
"""Test setup with a valid base_topic and publish_attributes."""
assert await add_statestream(hass, base_topic="pub", publish_attributes=True)
async def test_state_changed_event_sends_message(hass, mqtt_mock):
"""Test the sending of a new message if event changed."""
e_id = "fake.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(hass, base_topic=base_topic)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
async def test_state_changed_event_sends_message_and_timestamp(hass, mqtt_mock):
"""Test the sending of a message and timestamps if event changed."""
e_id = "another.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(
hass, base_topic=base_topic, publish_attributes=None, publish_timestamps=True
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish("pub/another/entity/state", "on", 1, True),
call.async_publish("pub/another/entity/last_changed", ANY, 1, True),
call.async_publish("pub/another/entity/last_updated", ANY, 1, True),
]
mqtt_mock.async_publish.assert_has_calls(calls, any_order=True)
assert mqtt_mock.async_publish.called
async def test_state_changed_attr_sends_message(hass, mqtt_mock):
"""Test the sending of a new message if attribute changed."""
e_id = "fake.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(hass, base_topic=base_topic, publish_attributes=True)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
test_attributes = {"testing": "YES", "list": ["a", "b", "c"], "bool": False}
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "off", attributes=test_attributes))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish("pub/fake/entity/state", "off", 1, True),
call.async_publish("pub/fake/entity/testing", '"YES"', 1, True),
call.async_publish("pub/fake/entity/list", '["a", "b", "c"]', 1, True),
call.async_publish("pub/fake/entity/bool", "false", 1, True),
]
mqtt_mock.async_publish.assert_has_calls(calls, any_order=True)
assert mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain(hass, mqtt_mock):
"""Test that filtering on included domain works as expected."""
base_topic = "pub"
incl = {"domains": ["fake"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_entity(hass, mqtt_mock):
"""Test that filtering on included entity works as expected."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain(hass, mqtt_mock):
"""Test that filtering on excluded domain works as expected."""
base_topic = "pub"
incl = {}
excl = {"domains": ["fake2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_entity(hass, mqtt_mock):
"""Test that filtering on excluded entity works as expected."""
base_topic = "pub"
incl = {}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain_include_entity(hass, mqtt_mock):
"""Test filtering with excluded domain and included entity."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {"domains": ["fake"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain_exclude_entity(hass, mqtt_mock):
"""Test filtering with included domain and excluded entity."""
base_topic = "pub"
incl = {"domains": ["fake"]}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_globs(hass, mqtt_mock):
"""Test that filtering on included glob works as expected."""
base_topic = "pub"
incl = {"entity_globs": ["*.included_*"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow *.included_* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity with included glob
mock_state_change_event(hass, State("fake2.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake2/included_entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake2/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_globs(hass, mqtt_mock):
"""Test that filtering on excluded globs works as expected."""
base_topic = "pub"
incl = {}
excl = {"entity_globs": ["*.excluded_*"]}
# Add the statestream component for publishing state updates
# Set the filter to allow *.excluded_* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by glob
mock_state_change_event(hass, State("fake.excluded_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain_globs_include_entity(hass, mqtt_mock):
"""Test filtering with excluded domain and glob and included entity."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {"domains": ["fake"], "entity_globs": ["*.excluded_*"]}
# Add the statestream component for publishing state updates
# Set the filter to exclude with include filter
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that doesn't match any filters
mock_state_change_event(hass, State("fake2.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake2/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by domain
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by glob
mock_state_change_event(hass, State("fake.excluded_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain_globs_exclude_entity(hass, mqtt_mock):
"""Test filtering with included domain and glob and excluded entity."""
base_topic = "pub"
incl = {"domains": ["fake"], "entity_globs": ["*.included_*"]}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to include with exclude filter
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity included by domain
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity included by glob
mock_state_change_event(hass, State("fake.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that doesn't match any filters
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
|
|
from proximal.tests.base_test import BaseTest
import proximal as px
from proximal.lin_ops.vstack import vstack
from proximal.algorithms import admm, pc, hqs, ladmm, absorb_offset
from proximal.utils.cuda_codegen import PyCudaAdapter
import cvxpy as cvx
import numpy as np
class TestAlgs(BaseTest):
def test_admm(self):
"""Test ADMM algorithm.
"""
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5)) * 1.
prox_fns = [px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, [], 1.0, eps_abs=1e-4, eps_rel=1e-4)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = admm.solve(prox_fns, [], 1.0)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, [], 1.0, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
psi_fns, omega_fns = admm.partition(prox_fns)
sltn = admm.solve(psi_fns, omega_fns, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
# With parameters for px.sum_squares
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X, b=B, alpha=0.1, beta=2., gamma=1, c=B)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = 0.1 * cvx.sum_squares(2 * cvx_X - B) + cvx.sum_squares(cvx_X) + \
cvx.norm(cvx_X, 1) + cvx.trace(B.T * cvx_X)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X - B, alpha=0.1, beta=2., gamma=1, c=B)]
quad_funcs[0] = absorb_offset(quad_funcs[0])
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
prox_fns = [px.norm1(X)]
cvx_X = cvx.Variable(10, 5)
# With linear operators.
kernel = np.array([1, 2, 3])
x = px.Variable(3)
b = np.array([-41, 413, 2])
prox_fns = [px.nonneg(x), px.sum_squares(px.conv(kernel, x), b=b)]
sltn = admm.solve(prox_fns, [], 1.0, eps_abs=1e-5, eps_rel=1e-5)
kernel_mat = np.matrix("2 1 3; 3 2 1; 1 3 2")
cvx_X = cvx.Variable(3)
cost = cvx.norm(kernel_mat * cvx_X - b)
prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])
prob.solve()
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
self.assertAlmostEqual(np.sqrt(sltn), prob.value, places=2)
prox_fns = [px.nonneg(x)]
quad_funcs = [px.sum_squares(px.conv(kernel, x), b=b)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_abs=1e-5, eps_rel=1e-5)
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
self.assertAlmostEqual(np.sqrt(sltn), prob.value, places=2)
def test_pock_chambolle(self):
self._test_pock_chambolle('numpy')
def test_pock_chambolle_cuda(self):
self._test_pock_chambolle('pycuda')
def _test_pock_chambolle(self, impl):
"""Test pock chambolle algorithm.
"""
#print()
#print("----------------------",impl,"-------------------------")
if impl == 'pycuda':
kw = {'adapter': PyCudaAdapter()}
else:
kw = {}
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5))
prox_fns = [px.sum_squares(X, b=B)]
sltn = pc.solve(prox_fns, [], 1.0, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5, **kw)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = pc.solve(prox_fns, [], 1.0, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5, **kw)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0, places=2)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
#print("----------------------------------------------------")
#print("----------------------------------------------------")
sltn = pc.solve(prox_fns, [], 0.5, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5, conv_check=1, **kw)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
psi_fns, omega_fns = pc.partition(prox_fns)
sltn = pc.solve(psi_fns, omega_fns, 0.5, 1.0, 1.0,
eps_abs=1e-5, eps_rel=1e-5, **kw)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
# With linear operators.
kernel = np.array([1, 2, 3])
kernel_mat = np.matrix("2 1 3; 3 2 1; 1 3 2")
x = px.Variable(3)
b = np.array([-41, 413, 2])
prox_fns = [px.nonneg(x), px.sum_squares(px.conv(kernel, x), b=b)]
sltn = pc.solve(prox_fns, [], 0.1, 0.1, 1.0, max_iters=3000,
eps_abs=1e-5, eps_rel=1e-5, **kw)
cvx_X = cvx.Variable(3)
cost = cvx.norm(kernel_mat * cvx_X - b)
prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])
prob.solve()
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
psi_fns, omega_fns = pc.partition(prox_fns)
sltn = pc.solve(psi_fns, omega_fns, 0.1, 0.1, 1.0, max_iters=3000,
eps_abs=1e-5, eps_rel=1e-5, **kw)
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
# TODO
# Multiple variables.
x = px.Variable(1)
y = px.Variable(1)
prox_fns = [px.nonneg(x), px.sum_squares(vstack([x,y]), b=np.arange(2))]
sltn = pc.solve(prox_fns, [prox_fns[-1]], 0.1, 0.1, 1.0,
max_iters=3000, eps_abs=1e-5, eps_rel=1e-5, try_diagonalize=False)
self.assertItemsAlmostEqual(x.value, [0])
self.assertItemsAlmostEqual(y.value, [1])
sltn = pc.solve(prox_fns, [prox_fns[-1]], 0.1, 0.1, 1.0,
max_iters=3000, eps_abs=1e-5, eps_rel=1e-5, try_diagonalize=True)
self.assertItemsAlmostEqual(x.value, [0])
self.assertItemsAlmostEqual(y.value, [1])
def test_half_quadratic_splitting(self):
"""Test half quadratic splitting.
"""
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5))
prox_fns = [px.sum_squares(X, b=B)]
sltn = hqs.solve(prox_fns, [], eps_rel=1e-4, max_iters=100, max_inner_iters=50)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = hqs.solve(prox_fns, [], eps_rel=1e-4, max_iters=100, max_inner_iters=50)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
sltn = hqs.solve(prox_fns, [], eps_rel=1e-7,
rho_0=1.0, rho_scale=np.sqrt(2.0) * 2.0, rho_max=2**16,
max_iters=20, max_inner_iters=500)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
psi_fns, omega_fns = hqs.partition(prox_fns)
sltn = hqs.solve(psi_fns, omega_fns, eps_rel=1e-7,
rho_0=1.0, rho_scale=np.sqrt(2.0) * 2.0, rho_max=2**16,
max_iters=20, max_inner_iters=500)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
# With linear operators.
kernel = np.array([1, 2, 3])
kernel_mat = np.matrix("2 1 3; 3 2 1; 1 3 2")
x = px.Variable(3)
b = np.array([-41, 413, 2])
prox_fns = [px.nonneg(x), px.sum_squares(px.conv(kernel, x), b=b)]
hqs.solve(prox_fns, [], eps_rel=1e-9, rho_0=4, rho_scale=np.sqrt(2.0) * 1.0,
rho_max=2**16, max_iters=30, max_inner_iters=500)
cvx_X = cvx.Variable(3)
cost = cvx.norm(kernel_mat * cvx_X - b)
prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])
prob.solve()
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=0)
psi_fns, omega_fns = hqs.partition(prox_fns)
hqs.solve(psi_fns, omega_fns, eps_rel=1e-9, rho_0=4, rho_scale=np.sqrt(2.0) * 1.0,
rho_max=2**16, max_iters=30, max_inner_iters=500)
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=0)
def test_lin_admm(self):
"""Test linearized admm. algorithm.
"""
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5))
prox_fns = [px.sum_squares(X, b=B)]
sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0, places=2)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
psi_fns, omega_fns = ladmm.partition(prox_fns)
sltn = ladmm.solve(psi_fns, omega_fns, 0.1, max_iters=500, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
# With linear operators.
kernel = np.array([1, 2, 3])
kernel_mat = np.matrix("2 1 3; 3 2 1; 1 3 2")
x = px.Variable(3)
b = np.array([-41, 413, 2])
prox_fns = [px.nonneg(x), px.sum_squares(px.conv(kernel, x), b=b)]
sltn = ladmm.solve(prox_fns, [], 0.1, max_iters=3000, eps_abs=1e-5,
eps_rel=1e-5)
cvx_X = cvx.Variable(3)
cost = cvx.norm(kernel_mat * cvx_X - b)
prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])
prob.solve()
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
psi_fns, omega_fns = ladmm.partition(prox_fns)
sltn = ladmm.solve(psi_fns, omega_fns, 0.1, max_iters=3000, eps_abs=1e-5,
eps_rel=1e-5)
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
def test_equil(self):
"""Test equilibration.
"""
from proximal.algorithms.equil import newton_equil
np.random.seed(1)
kernel = np.array([1, 1, 1]) / np.sqrt(3)
kernel_mat = np.ones((3, 3)) / np.sqrt(3)
x = px.Variable(3)
wr = np.array([10, 5, 7])
K = px.mul_elemwise(wr, x)
K = px.conv(kernel, K)
wl = np.array([100, 50, 3])
K = px.mul_elemwise(wl, K)
K = px.CompGraph(K)
# Equilibrate
gamma = 1e-1
d, e = px.equil(K, 1000, gamma=gamma, M=5)
tmp = d * wl * kernel_mat * wr * e
u, v = np.log(d), np.log(e)
obj_val = np.square(tmp).sum() / 2 - u.sum() - v.sum() + \
gamma * (np.linalg.norm(v)**2 + np.linalg.norm(u)**2)
d, e = newton_equil(wl * kernel_mat * wr, gamma, 100)
tmp = d * wl * kernel_mat * wr * e
u, v = np.log(d), np.log(e)
sltn_val = np.square(tmp).sum() / 2 - u.sum() - v.sum() + \
gamma * (np.linalg.norm(v)**2 + np.linalg.norm(u)**2)
self.assertAlmostEqual((obj_val - sltn_val) / sltn_val, 0, places=3)
if __name__ == "__main__":
import sys
if 'cuda' in sys.argv:
TestAlgs().test_pock_chambolle_cuda()
else:
TestAlgs().test_pock_chambolle()
|
|
import sqlalchemy as sa
from sqlalchemy import Computed
from sqlalchemy import event
from sqlalchemy import Identity
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import assert_engine
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import Conditional
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class TriggerDefaultsTest(fixtures.MappedTest):
__requires__ = ("row_triggers",)
@classmethod
def define_tables(cls, metadata):
dt = Table(
"dt",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("col1", String(20)),
Column(
"col2", String(20), server_default=sa.schema.FetchedValue()
),
Column(
"col3", String(20), sa.schema.FetchedValue(for_update=True)
),
Column(
"col4",
String(20),
sa.schema.FetchedValue(),
sa.schema.FetchedValue(for_update=True),
),
)
dialect_name = testing.db.dialect.name
for ins in (
sa.DDL(
"CREATE TRIGGER dt_ins AFTER INSERT ON dt "
"FOR EACH ROW BEGIN "
"UPDATE dt SET col2='ins', col4='ins' "
"WHERE dt.id = NEW.id; END"
).execute_if(dialect="sqlite"),
sa.DDL(
"CREATE TRIGGER dt_ins ON dt AFTER INSERT AS "
"UPDATE dt SET col2='ins', col4='ins' "
"WHERE dt.id IN (SELECT id FROM inserted);"
).execute_if(dialect="mssql"),
sa.DDL(
"CREATE TRIGGER dt_ins BEFORE INSERT "
"ON dt "
"FOR EACH ROW "
"BEGIN "
":NEW.col2 := 'ins'; :NEW.col4 := 'ins'; END;"
).execute_if(dialect="oracle"),
sa.DDL(
"CREATE TRIGGER dt_ins BEFORE INSERT "
"ON dt "
"FOR EACH ROW "
"EXECUTE PROCEDURE my_func_ins();"
).execute_if(dialect="postgresql"),
sa.DDL(
"CREATE TRIGGER dt_ins BEFORE INSERT ON dt "
"FOR EACH ROW BEGIN "
"SET NEW.col2='ins'; SET NEW.col4='ins'; END"
).execute_if(
callable_=lambda ddl, target, bind, **kw: bind.engine.name
not in ("oracle", "mssql", "sqlite", "postgresql")
),
):
my_func_ins = sa.DDL(
"CREATE OR REPLACE FUNCTION my_func_ins() "
"RETURNS TRIGGER AS $$ "
"BEGIN "
"NEW.col2 := 'ins'; NEW.col4 := 'ins'; "
"RETURN NEW; "
"END; $$ LANGUAGE PLPGSQL"
).execute_if(dialect="postgresql")
event.listen(dt, "after_create", my_func_ins)
event.listen(dt, "after_create", ins)
if dialect_name == "postgresql":
event.listen(
dt, "before_drop", sa.DDL("DROP TRIGGER dt_ins ON dt")
)
else:
event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_ins"))
for up in (
sa.DDL(
"CREATE TRIGGER dt_up AFTER UPDATE ON dt "
"FOR EACH ROW BEGIN "
"UPDATE dt SET col3='up', col4='up' "
"WHERE dt.id = OLD.id; END"
).execute_if(dialect="sqlite"),
sa.DDL(
"CREATE TRIGGER dt_up ON dt AFTER UPDATE AS "
"UPDATE dt SET col3='up', col4='up' "
"WHERE dt.id IN (SELECT id FROM deleted);"
).execute_if(dialect="mssql"),
sa.DDL(
"CREATE TRIGGER dt_up BEFORE UPDATE ON dt "
"FOR EACH ROW BEGIN "
":NEW.col3 := 'up'; :NEW.col4 := 'up'; END;"
).execute_if(dialect="oracle"),
sa.DDL(
"CREATE TRIGGER dt_up BEFORE UPDATE ON dt "
"FOR EACH ROW "
"EXECUTE PROCEDURE my_func_up();"
).execute_if(dialect="postgresql"),
sa.DDL(
"CREATE TRIGGER dt_up BEFORE UPDATE ON dt "
"FOR EACH ROW BEGIN "
"SET NEW.col3='up'; SET NEW.col4='up'; END"
).execute_if(
callable_=lambda ddl, target, bind, **kw: bind.engine.name
not in ("oracle", "mssql", "sqlite", "postgresql")
),
):
my_func_up = sa.DDL(
"CREATE OR REPLACE FUNCTION my_func_up() "
"RETURNS TRIGGER AS $$ "
"BEGIN "
"NEW.col3 := 'up'; NEW.col4 := 'up'; "
"RETURN NEW; "
"END; $$ LANGUAGE PLPGSQL"
).execute_if(dialect="postgresql")
event.listen(dt, "after_create", my_func_up)
event.listen(dt, "after_create", up)
if dialect_name == "postgresql":
event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_up ON dt"))
else:
event.listen(dt, "before_drop", sa.DDL("DROP TRIGGER dt_up"))
@classmethod
def setup_classes(cls):
class Default(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
Default, dt = cls.classes.Default, cls.tables.dt
cls.mapper_registry.map_imperatively(Default, dt)
def test_insert(self):
Default = self.classes.Default
d1 = Default(id=1)
eq_(d1.col1, None)
eq_(d1.col2, None)
eq_(d1.col3, None)
eq_(d1.col4, None)
session = fixture_session()
session.add(d1)
session.flush()
eq_(d1.col1, None)
eq_(d1.col2, "ins")
eq_(d1.col3, None)
# don't care which trigger fired
assert d1.col4 in ("ins", "up")
def test_update(self):
Default = self.classes.Default
d1 = Default(id=1)
session = fixture_session()
session.add(d1)
session.flush()
d1.col1 = "set"
session.flush()
eq_(d1.col1, "set")
eq_(d1.col2, "ins")
eq_(d1.col3, "up")
eq_(d1.col4, "up")
class ExcludedDefaultsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"dt",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("col1", String(20), default="hello"),
)
def test_exclude(self):
dt = self.tables.dt
class Foo(fixtures.BasicEntity):
pass
self.mapper_registry.map_imperatively(
Foo, dt, exclude_properties=("col1",)
)
f1 = Foo()
sess = fixture_session()
sess.add(f1)
sess.flush()
eq_(sess.connection().execute(dt.select()).fetchall(), [(1, "hello")])
class ComputedDefaultsOnUpdateTest(fixtures.MappedTest):
"""test that computed columns are recognized as server
oninsert/onupdate defaults."""
__backend__ = True
__requires__ = ("computed_columns",)
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
class ThingNoEager(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
cls.mapper_registry.map_imperatively(
Thing, cls.tables.test, eager_defaults=True
)
ThingNoEager = cls.classes.ThingNoEager
cls.mapper_registry.map_imperatively(
ThingNoEager, cls.tables.test, eager_defaults=False
)
@testing.combinations(("eager", True), ("noneager", False), id_="ia")
def test_insert_computed(self, eager):
if eager:
Thing = self.classes.Thing
else:
Thing = self.classes.ThingNoEager
s = fixture_session()
t1, t2 = (Thing(id=1, foo=5), Thing(id=2, foo=10))
s.add_all([t1, t2])
with assert_engine(testing.db) as asserter:
s.flush()
eq_(t1.bar, 5 + 42)
eq_(t2.bar, 10 + 42)
asserter.assert_(
Conditional(
eager and testing.db.dialect.implicit_returning,
[
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO test (id, foo) "
"VALUES (%(id)s, %(foo)s) "
"RETURNING test.bar",
[{"foo": 5, "id": 1}, {"foo": 10, "id": 2}],
dialect="postgresql",
),
],
[
CompiledSQL(
"INSERT INTO test (id, foo) "
"VALUES (%(id)s, %(foo)s) "
"RETURNING test.bar",
[{"foo": 5, "id": 1}],
dialect="postgresql",
),
CompiledSQL(
"INSERT INTO test (id, foo) "
"VALUES (%(id)s, %(foo)s) "
"RETURNING test.bar",
[{"foo": 10, "id": 2}],
dialect="postgresql",
),
],
)
],
[
CompiledSQL(
"INSERT INTO test (id, foo) VALUES (:id, :foo)",
[{"foo": 5, "id": 1}, {"foo": 10, "id": 2}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 1}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 2}],
),
],
)
)
@testing.combinations(
(
"eagerload",
True,
testing.requires.computed_columns_on_update_returning,
),
(
"noneagerload",
False,
),
id_="ia",
)
def test_update_computed(self, eager):
if eager:
Thing = self.classes.Thing
else:
Thing = self.classes.ThingNoEager
s = fixture_session()
t1, t2 = (Thing(id=1, foo=1), Thing(id=2, foo=2))
s.add_all([t1, t2])
s.flush()
t1.foo = 5
t2.foo = 6
with assert_engine(testing.db) as asserter:
s.flush()
eq_(t1.bar, 5 + 42)
eq_(t2.bar, 6 + 42)
if eager and testing.db.dialect.implicit_returning:
asserter.assert_(
CompiledSQL(
"UPDATE test SET foo=%(foo)s "
"WHERE test.id = %(test_id)s "
"RETURNING test.bar",
[{"foo": 5, "test_id": 1}],
dialect="postgresql",
),
CompiledSQL(
"UPDATE test SET foo=%(foo)s "
"WHERE test.id = %(test_id)s "
"RETURNING test.bar",
[{"foo": 6, "test_id": 2}],
dialect="postgresql",
),
)
elif eager:
asserter.assert_(
CompiledSQL(
"UPDATE test SET foo=:foo WHERE test.id = :test_id",
[{"foo": 5, "test_id": 1}],
),
CompiledSQL(
"UPDATE test SET foo=:foo WHERE test.id = :test_id",
[{"foo": 6, "test_id": 2}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 1}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 2}],
),
)
else:
asserter.assert_(
CompiledSQL(
"UPDATE test SET foo=:foo WHERE test.id = :test_id",
[{"foo": 5, "test_id": 1}, {"foo": 6, "test_id": 2}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 1}],
),
CompiledSQL(
"SELECT test.bar AS test_bar FROM test "
"WHERE test.id = :pk_1",
[{"pk_1": 2}],
),
)
class IdentityDefaultsOnUpdateTest(fixtures.MappedTest):
"""test that computed columns are recognized as server
oninsert/onupdate defaults."""
__backend__ = True
__requires__ = ("identity_columns",)
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("id", Integer, Identity(), primary_key=True),
Column("foo", Integer),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
cls.mapper_registry.map_imperatively(Thing, cls.tables.test)
def test_insert_identity(self):
Thing = self.classes.Thing
s = fixture_session()
t1, t2 = (Thing(foo=5), Thing(foo=10))
s.add_all([t1, t2])
with assert_engine(testing.db) as asserter:
s.flush()
eq_(t1.id, 1)
eq_(t2.id, 2)
asserter.assert_(
Conditional(
testing.db.dialect.implicit_returning,
[
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO test (foo) VALUES (%(foo)s) "
"RETURNING test.id",
[{"foo": 5}, {"foo": 10}],
dialect="postgresql",
),
],
[
CompiledSQL(
"INSERT INTO test (foo) VALUES (%(foo)s) "
"RETURNING test.id",
[{"foo": 5}],
dialect="postgresql",
),
CompiledSQL(
"INSERT INTO test (foo) VALUES (%(foo)s) "
"RETURNING test.id",
[{"foo": 10}],
dialect="postgresql",
),
],
)
],
[
CompiledSQL(
"INSERT INTO test (foo) VALUES (:foo)",
[{"foo": 5}],
),
CompiledSQL(
"INSERT INTO test (foo) VALUES (:foo)",
[{"foo": 10}],
),
],
)
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
By default, PySpark serialize objects in batches; the batch size can be
controlled through SparkContext's C{batchSize} parameter
(the default size is 1024 objects):
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> rdd._jrdd.count()
8L
>>> sc.stop()
A batch size of -1 uses an unlimited batch size, and a size of 1 disables
batching:
>>> sc = SparkContext('local', 'test', batchSize=1)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> rdd._jrdd.count()
16L
"""
import cPickle
from itertools import chain, izip, product
import marshal
import struct
import sys
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
return self.load_stream(stream)
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
obj = stream.read(length)
if obj == "":
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __eq__(self, other):
return isinstance(other, BatchedSerializer) and \
other.serializer == self.serializer
def __str__(self):
return "BatchedSerializer<%s>" % str(self.serializer)
class CartesianDeserializer(FramedSerializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def prepare_keys_values(self, stream):
key_stream = self.key_ser._load_stream_without_unbatching(stream)
val_stream = self.val_ser._load_stream_without_unbatching(stream)
key_is_batched = isinstance(self.key_ser, BatchedSerializer)
val_is_batched = isinstance(self.val_ser, BatchedSerializer)
for (keys, vals) in izip(key_stream, val_stream):
keys = keys if key_is_batched else [keys]
vals = vals if val_is_batched else [vals]
yield (keys, vals)
def load_stream(self, stream):
for (keys, vals) in self.prepare_keys_values(stream):
for pair in product(keys, vals):
yield pair
def __eq__(self, other):
return isinstance(other, CartesianDeserializer) and \
self.key_ser == other.key_ser and self.val_ser == other.val_ser
def __str__(self):
return "CartesianDeserializer<%s, %s>" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(CartesianDeserializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def load_stream(self, stream):
for (keys, vals) in self.prepare_keys_values(stream):
for pair in izip(keys, vals):
yield pair
def __eq__(self, other):
return isinstance(other, PairDeserializer) and \
self.key_ser == other.key_ser and self.val_ser == other.val_ser
def __str__(self):
return "PairDeserializer<%s, %s>" % \
(str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj): return obj
def dumps(self, obj): return obj
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's cPickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj): return cPickle.dumps(obj, 2)
loads = cPickle.loads
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj): return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
dumps = marshal.dumps
loads = marshal.loads
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def loads(self, stream):
length = read_int(stream)
return stream.read(length).decode('utf8')
def load_stream(self, stream):
while True:
try:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def read_long(stream):
length = stream.read(8)
if length == "":
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if length == "":
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('picklefield.fields.PickledObjectField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('picklefield.fields.PickledObjectField')())
def backwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('django.db.models.fields.TextField')())
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
|
import datetime
import os
from django import forms
from django.contrib.auth.models import User
from django.core.files import *
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.signals import post_save
from settings import MEDIA_ROOT
# Create the list of years from 2O11 to actual year + 2.
start_date = '2011'
date = datetime.datetime.now()
end_date = date.year + 2
YEARS_CHOICES = () # Starts with an empty list and concatenate.
for year in range(int(start_date), int(end_date)):
YEARS_CHOICES += (
('%d - %d' % (year, year + 1), '%d - %d' % (year, year + 1)),
)
# List of status.
STATUS_CHOICES = (
('admin', 'Admin'),
('teacher', 'Teacher'),
('student', 'Student'),
)
# List of assignments work management
# (free = illimited files - required files = Planned uploads)
REQUIREMENT_CHOICES = (
('none', 'None'),
('user_defined', 'User defined'),
)
# List of possibilities for RequiredFile's file type
FILE_TYPE_CHOICES = (
('none', 'none'),
('pdf', 'pdf'),
('tar.gz', 'tar.gz'),
)
# Add a behavior to File management.
# Removes file when a file object is removed in db
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name):
# If the filename already exists, removes it
if self.exists(name):
os.remove(os.path.join(MEDIA_ROOT, name))
return name
# More field for an user.
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True)
status = models.CharField(max_length=10,
choices=STATUS_CHOICES,
default='student')
# In Admin panel : object = username.
def __unicode__(self):
return self.user.username
# Autocreate a UserProfile when a user is created.
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
# The course model.
class Course(models.Model):
name = models.CharField(max_length=40)
description = models.CharField(max_length=100, blank=True)
owner = \
models.ForeignKey(User,
related_name='course',
limit_choices_to={'userprofile__status': 'teacher'})
editdate = models.DateTimeField(auto_now=True)
years = \
models.CharField(max_length=11,
choices=YEARS_CHOICES,
default='%d - %d' % (date.year, date.year + 1))
subscribed = \
models.ManyToManyField(User,
related_name='course_list',
blank=True,
null=True,
limit_choices_to={'userprofile__status': 'student'})
# In Admin panel : object = name.
def __unicode__(self):
return self.name
# Definition of the model Assignment
class Assignment (models.Model):
name = models.CharField(max_length=40)
course = models.ForeignKey(Course, related_name='assignment')
description = models.CharField(max_length=130)
firm_deadline = models.DateTimeField(blank=True, null=True)
official_deadline = models.DateTimeField(blank=True, null=True)
admins = \
models.ManyToManyField(User,
blank=True,
null=True,
limit_choices_to={'userprofile__status': 'teacher'})
editdate = models.DateTimeField(auto_now=True)
visible = models.BooleanField(blank=True)
requirement = models.CharField(max_length=14,
choices=REQUIREMENT_CHOICES,
default='none')
# In Admin panel : object = username.
def __unicode__(self):
return self.name
# A variable True if official deadline date is in past
def official_deadline_past(self):
# Verify official deadline exists and has been passed.
if (self.official_deadline) and \
(datetime.datetime.now() >= self.official_deadline):
return True
return False
# A variable True if firm deadline date is in past
def firm_deadline_past(self):
# Verify firm deadline exists and has been passed.
if (self.firm_deadline) and \
(datetime.datetime.now() >= self.firm_deadline):
return True
return False
# The definition of the model Group
class Group(models.Model):
name = models.CharField(max_length=30)
assignment = models.ForeignKey(Assignment)
members = models.ManyToManyField(User,
related_name='group_list',
null=True,
blank=True)
# In Admin panel : object = name
def __unicode__(self):
return self.name
# The name_id is a classic name with Group + ID (of the group)
def name_id(self):
ID = u'%s' % self.id
return 'Group ' + ID
# The definition of the model File
class File(models.Model):
# That define the filename as the real name of the file without path
def filename(self):
return os.path.basename(self.file.name)
# The path for a file : MEDIA_ROOT/Work/Coursename_CourseID/
# Assignmname_Assignment_ID/Group Group_ID/filename
def path(instance, filename):
return '/'.join(['Work',
instance.group.assignment.course.name + '_' +
unicode(instance.group.assignment.course.id),
instance.group.assignment.name + '_' +
unicode(instance.group.assignment.id),
'Group ' + unicode(instance.group.id), filename])
file = models.FileField(storage=OverwriteStorage(), upload_to=path)
requiredfile = models.ForeignKey('RequiredFile', blank=True, null=True)
group = models.ForeignKey(Group, related_name='file_list')
uploader = models.ForeignKey(User)
editdate = models.DateTimeField(auto_now=True)
# In Admin panel : object = file.name
def __unicode__(self):
return self.file.name
# Overwrite delete() function to delete the file before delete the model'enter
def delete(self):
try:
self.file.delete()
except:
pass
super(File, self).delete()
# The definition of the model RequiredFile
class RequiredFile (models.Model):
assignment = models.ForeignKey(Assignment)
name = models.CharField(max_length=40)
description = models.CharField(max_length=100, blank=True, null=True)
type = models.CharField(max_length=6,
choices=FILE_TYPE_CHOICES,
default='none')
# In Admin panel : object = name
def __unicode__(self):
return self.name
# The definition of a form to add a course.
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = ('name', 'description', 'years')
# The definition of a form to add students to a course.
class AddSubscribedForm(forms.ModelForm):
class Meta:
model = Course
fields = ('subscribed',)
# The definition of the form to change course's owner.
class ChangeCourseOwnerForm(forms.ModelForm):
class Meta:
model = Course
fields = ('owner',)
# Definition of the form to edit Assignments
class EditAssignmentForm (forms.ModelForm):
class Meta:
model = Assignment
fields = ('name', 'description', 'official_deadline',
'firm_deadline', 'admins', 'visible', 'requirement')
# Definition of the form to add Assignments
class AddAssignmentForm (forms.ModelForm):
class Meta:
model = Assignment
fields = ('name', 'description', 'official_deadline', 'firm_deadline',
'admins', 'visible', 'requirement')
# The definition of the form to send files
class UploadFileForm(forms.ModelForm):
class Meta:
model = File
fields = ('file',)
|
|
"""
tests.test_invalid_forms.py
---------------------------
Flask-User automated tests:
Tests all forms with as many invalid field values as possible
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio (ling.thio@gmail.com)
:license: Simplified BSD License, see LICENSE.txt for more details.
"""
from __future__ import print_function
from datetime import datetime
import time
from flask import current_app, url_for
from flask_user.tests.tst_utils import response_has_string
# **********************
# ** Global Variables **
# **********************
# Using global variable for speed
user1 = None
user2 = None
user3 = None
user4 = None
# *************
# ** Defines **
# *************
SHORT_USERNAME = 'Aa'
INVALID_EMAIL = 'user1.example.com'
invalid_usernames = (
'with space',
'with&symbol',
"with'symbol",
)
invalid_passwords = (
'Abcd1', # too short
'ABCabc', # no digits
'ABC123', # no lower case letters
'abc123', # no upper case letters
)
# *********************
# ** Automated tests **
# *********************
# Function names must start with 'test'
# The 'client' parameter is set up in conftest.py
def test_init(db):
"""
Set up two test users
"""
global user1, user2, user3, user4
# Enable all features
um = current_app.user_manager
um.enable_register = True
um.enable_change_username = True
um.enable_change_password = True
um.enable_confirm_email = True
um.enable_reset_password = True
um.enable_email = True
um.enable_retype_password = True
# Tests have not been written with auto_login in mind
um.auto_login = False
hashed_password = um.hash_password('Password1')
User = um.db_adapter.UserClass
# Create user1 with username and email
user1 = User(username='user1', email='user1@example.com', password=hashed_password, active=True)
assert user1
db.session.add(user1)
# Create user1 with email only
user2 = User(email='user2@example.com', password=hashed_password, active=True)
assert user2
db.session.add(user2)
# Create user3 with username and email
user3 = User(username='user3', email='user3@example.com', password=hashed_password, active=True)
assert user3
db.session.add(user3)
# Create user4 with email only
user4 = User(email='user4@example.com', password=hashed_password, active=True)
assert user4
db.session.add(user4)
db.session.commit()
def test_invalid_register_with_username_form(client):
print("test_invalid_register_with_username_form")
# Choose config
um = current_app.user_manager
um.enable_username = True
User = um.db_adapter.UserClass
# Set default values
url = url_for('user.register')
username = 'user3'
email = 'user3@example.com'
password = 'Password1'
# Test empty username
client.post_invalid_form(url, 'Username is required',
username='', email=email, password=password, retype_password=password)
# Test short username
client.post_invalid_form(url, 'Username must be at least 3 characters long',
username=SHORT_USERNAME, email=email, password=password, retype_password=password)
# Test invalid usernames
for invalid_username in invalid_usernames:
client.post_invalid_form(url, 'Username may only contain letters, numbers, ',
username=invalid_username, email=email, password=password, retype_password=password)
# Test existing username (case INsensitive!)
client.post_invalid_form(url, 'This Username is already in use. Please try another one.',
username='UsEr1', email=email, password=password, retype_password=password)
# Test empty password
client.post_invalid_form(url, 'Password is required',
username=username, email=email, password='', retype_password='')
# Test invalid passwords
for invalid_password in invalid_passwords:
client.post_invalid_form(url, 'Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number',
username=username, email=email, password=invalid_password, retype_password=invalid_password)
# Test non-matching passwords
client.post_invalid_form(url, 'Password and Retype Password did not match',
username=username, email=email, password='Password1', retype_password='Password9')
def test_invalid_register_with_email_form(client):
print("test_invalid_register_with_email_form")
# Choose config
um = current_app.user_manager
um.enable_username = False
User = um.db_adapter.UserClass
# Set default values
url = url_for('user.register')
email = 'user3@example.com'
password = 'Password1'
# Test empty email
client.post_invalid_form(url, 'Email is required',
email='', password=password, retype_password=password)
# Test invalid email
client.post_invalid_form(url, 'Invalid Email',
email=INVALID_EMAIL, password=password, retype_password=password)
# Test existing email (case INsensitive!)
# TODO: Debug
#client.post_invalid_form(url, 'This Email is already in use. Please try another one.',
# email='UsEr1@ExAmPlE.CoM', password=password, retype_password=password)
# Test empty password
client.post_invalid_form(url, 'Password is required',
email=email, password='', retype_password='')
# Test invalid passwords
for invalid_password in invalid_passwords:
client.post_invalid_form(url, 'Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number',
email=email, password=invalid_password, retype_password=invalid_password)
# Test non-matching passwords
client.post_invalid_form(url, 'Password and Retype Password did not match',
email=email, password='Password1', retype_password='Password9')
def test_invalid_confirm_email_page(client):
print("test_invalid_confirm_email_page")
# Test Invalid token
url = url_for('user.confirm_email', token='InvalidToken')
client.get_invalid_page(url, 'Invalid confirmation token')
# Generate valid token
um = current_app.user_manager
token = um.generate_token(user1.id)
url = url_for('user.confirm_email', token=token)
# Test Expired token
um.confirm_email_expiration = 1 # set 1 second expiration
time.sleep(2) # wait for 2 seconds
client.get_invalid_page(url, 'Your confirmation token has expired')
def test_invalid_login_with_username_form(client):
print("test_invalid_login_with_username_form")
# Choose config
um = current_app.user_manager
um.enable_email = True
um.enable_username = True
# Set default values
url = url_for('user.login')
username = 'user1'
password = 'Password1'
# Test empty username
client.post_invalid_form(url, 'Username is required',
username='', password=password)
# Test incorrect username
um.show_username_email_does_not_exist = False
client.post_invalid_form(url, 'Incorrect Username/Email and/or Password',
username='Xuser1', password=password)
um.show_username_email_does_not_exist = True
client.post_invalid_form(url, 'Username/Email does not exist',
username='Xuser1', password=password)
um.show_username_email_does_not_exist = False
# Test empty password
client.post_invalid_form(url, 'Password is required',
username=username, password='')
# Test incorrect password
um.show_username_email_does_not_exist = False
client.post_invalid_form(url, 'Incorrect Username/Email and/or Password',
username=username, password='XPassword1')
um.show_username_email_does_not_exist = True
client.post_invalid_form(url, 'Incorrect Password',
username=username, password='XPassword1')
um.show_username_email_does_not_exist = False
def test_invalid_login_with_email_form(client):
print("test_invalid_login_with_email_form")
# Choose config
um = current_app.user_manager
um.enable_email = True
um.enable_username = False
# Set default values
url = url_for('user.login')
email = 'user2@example.com'
password = 'Password1'
# Test empty email
client.post_invalid_form(url, 'Email is required',
email='', password=password)
# Test incorrect email
um.show_username_email_does_not_exist = False
client.post_invalid_form(url, 'Incorrect Email and/or Password',
email='Xuser2@example.com', password=password)
um.show_username_email_does_not_exist = True
client.post_invalid_form(url, 'Email does not exist',
email='Xuser2@example.com', password=password)
um.show_username_email_does_not_exist = False
# Test empty password
client.post_invalid_form(url, 'Password is required',
email=email, password='')
# Test incorrect password
um.show_username_email_does_not_exist = False
client.post_invalid_form(url, 'Incorrect Email and/or Password',
email=email, password='XPassword1')
um.show_username_email_does_not_exist = True
client.post_invalid_form(url, 'Incorrect Password',
email=email, password='XPassword1')
um.show_username_email_does_not_exist = False
def test_invalid_change_username_form(client):
print("test_invalid_change_username_form")
# Set user manager config
um = current_app.user_manager
um.enable_username = True
um.enable_email = False
# Set default values
username = 'user1'
password = 'Password1'
new_username = 'user4'
url = url_for('user.change_username')
# Log in as 'user1'
client.login(username=username, password=password)
# Test empty username
client.post_invalid_form(url, 'Username is required',
new_username='', old_password=password)
# Test short username
client.post_invalid_form(url, 'Username must be at least 3 characters long',
new_username=SHORT_USERNAME, old_password=password)
# Test existing username
client.post_invalid_form(url, 'This Username is already in use. Please try another one.',
new_username='user3', old_password=password)
# Test empty password
client.post_invalid_form(url, 'Old Password is required',
new_username=username, old_password='')
# Test incorrect password
client.post_invalid_form(url, 'Old Password is incorrect',
new_username=username, old_password='XPassword1')
client.logout()
def test_invalid_change_password_form(client):
print("test_invalid_change_password_form")
# Set user manager config
um = current_app.user_manager
um.enable_username = False
# Set default values
email = 'user2@example.com'
old_password = 'Password1'
new_password = 'Password5'
url = url_for('user.change_password')
# Log in as 'user1'
client.login(email=email, password=old_password)
# Test empty old password
client.post_invalid_form(url, 'Old Password is required',
old_password='', new_password=new_password, retype_password=new_password)
# Test incorrect old password
client.post_invalid_form(url, 'Old Password is incorrect',
old_password='XPassword1', new_password=new_password, retype_password=new_password)
# Test empty password
client.post_invalid_form(url, 'New Password is required',
old_password=old_password, new_password='', retype_password=new_password)
# Test invalid passwords
for invalid_password in invalid_passwords:
client.post_invalid_form(url, 'Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number',
old_password=old_password, new_password=invalid_password, retype_password=new_password)
# Test non-matching passwords
client.post_invalid_form(url, 'New Password and Retype Password did not match',
old_password=old_password, new_password=new_password, retype_password='Xpassword5')
client.logout()
def test_invalid_forgot_password_form(client):
print("test_invalid_forgot_password_form")
url = url_for('user.forgot_password')
# Test invalid email
client.post_invalid_form(url, 'Invalid Email',
email=INVALID_EMAIL)
def test_invalid_reset_password(client):
print("test_invalid_reset_password")
# Set user manager config
um = current_app.user_manager
# Set default values
new_password = 'Password5'
# Simulate a valid forgot password form
token = um.generate_token(user1.id)
# Test invalid token
url = url_for('user.reset_password', token='InvalidToken')
client.post_invalid_form(url, 'Your reset password token is invalid',
new_password=new_password, retype_password=new_password)
# Expired Token
url = url_for('user.reset_password', token=token)
um.reset_password_expiration = 1 # set 1 second expiration
time.sleep(2) # wait for 2 seconds
client.post_invalid_form(url, 'Your reset password token has expired',
new_password=new_password, retype_password=new_password)
um.reset_password_expiration = 2*24*3600 # 2 days
# Invalid retype password
client.post_invalid_form(url, 'New Password and Retype Password did not match',
new_password = new_password, retype_password='XPassword5')
def test_valid_roles(client):
um = current_app.user_manager
User = um.db_adapter.UserClass
# Perform only for roles_required_app
user007 = User.query.filter(User.username=='user007').first()
if not user007: return
print("test_valid_roles")
um.enable_username = True
client.login(username='user007', password='Password1')
url = url_for('special_page')
response = client.get_valid_page(url)
assert not response_has_string(response, 'You must be signed in to access')
client.logout()
def test_invalid_roles(client):
um = current_app.user_manager
User = um.db_adapter.UserClass
# Perform only for roles_required_app
user007 = User.query.filter(User.username=='user007').first()
if not user007: return
print("test_invalid_roles")
um.enable_username = True
client.login(username='user1', password='Password1')
url = url_for('special_page')
response = client.get_invalid_page(url, 'You do not have permission to access')
client.logout()
def test_login_without_confirm_email(client):
print("test_login_without_confirm_email")
um = current_app.user_manager
um.enable_username = False
um.enable_email = True
um.enable_confirm_email = True
um.enable_retype_password = False
email = 'notconfirmed@example.com'
password = 'Password1'
# register user
client.post_valid_form(url_for('user.register'),
email=email,
password=password)
# Try logging in without confirming email
client.post_invalid_form(url_for('user.login'),
'Your email address has not yet been confirmed',
email=email,
password=password)
# Confirm email manually, but disable account
User = um.db_adapter.UserClass
user = User.query.filter(User.email==email).first()
assert(user)
user.active = False
user.confirmed_at = datetime.utcnow()
# Try logging in into disabled account
client.post_invalid_form(url_for('user.login'),
'Your account has not been enabled',
email=email,
password=password)
def test_cleanup(db):
"""
Delete user1 and user2
"""
global user1, user2, user3, user4
db.session.delete(user1)
db.session.delete(user2)
db.session.delete(user3)
db.session.delete(user4)
db.session.commit()
user1 = None
user2 = None
user3 = None
user4 = None
# TODO:
# Register without confirming email and try to log in
# 'Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email and follow the instructions to activate your account.'
#
# Disable account and try to login
# 'Your account has been disabled.'
#
# Logout with user_manager.logout_next set
#
# Reset password with custom user_manager.password_validator
#
# Change password with custom user_manager.password_validator:
#
# Custom db_adapter.EmailClass
|
|
"""
search.py
"""
from flask import Flask, request, redirect, abort, make_response
from flask import render_template, flash
import bibserver.dao
from bibserver import auth
import json, httplib
from bibserver.config import config
import bibserver.util as util
import logging
from logging.handlers import RotatingFileHandler
LOG_FILENAME="./app.log"
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=5)
handler.setFormatter(formatter)
log.addHandler(handler)
class Search(object):
def __init__(self,path,current_user):
self.path = path.replace(".json","")
self.current_user = current_user
# facets -> convert to aggs
self.search_options = {
'search_url': '/query?',
'search_index': 'elasticsearch',
'paging': { 'from': 0, 'size': 10 },
#'predefined_filters': {},
#'facets': config['search_facet_fields'],
'result_display': config['search_result_display'],
'search_sortby': [{'display':'year', 'field':'year.exact'},
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}],
'searchbox_fieldselect': [
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}]#,
#'addremovefacets': config['add_remove_facets'] # (full list could also be pulled from DAO)
}
self.parts = self.path.strip('/').split('/')
def find(self):
log.debug(self.parts[0])
log.debug(self.parts)
log.debug(len(self.parts))
if bibserver.dao.Account.get(self.parts[0]):
if len(self.parts) == 1:
return self.account() # user account
elif len(self.parts) == 2:
if self.parts[1] == "collections":
return self.collections()
else:
return self.collection() # get a collection
elif len(self.parts) == 3:
return self.record() # get a record in collection
elif self.parts[0] == 'collections':
return self.collections() # get search list of all collections
elif len(self.parts) == 1:
if self.parts[0] != 'search':
self.search_options['q'] = self.parts[0]
return self.default() # get search result of implicit search term
elif len(self.parts) == 2:
return self.implicit_facet() # get search result of implicit facet filter
else:
abort(404)
def default(self):
# default search page
if util.request_wants_json():
res = bibserver.dao.Record.query()
resp = make_response(
json.dumps([i['_source'] for i in res._hits],
sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None
)
# TODO: convert facet => aggs
def implicit_facet(self):
self.search_options['predefined_filters'][self.parts[0]+config['facet_field']] = self.parts[1]
# remove the implicit facet from facets
for count,facet in enumerate(self.search_options['facets']):
if facet['field'] == self.parts[0]+config['facet_field']:
del self.search_options['facets'][count]
if util.request_wants_json():
res = bibserver.dao.Record.query(terms=self.search_options['predefined_filters'])
resp = make_response( json.dumps([i['_source'] for i in res._hits], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None,
implicit=self.parts[0]+': ' + self.parts[1]
)
def collections(self):
if len(self.parts) == 1:
if util.request_wants_json():
res = bibserver.dao.Collection.query(size=1000000)
colls = [i['_source'] for i in res._hits]
resp = make_response( json.dumps(colls, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
# search collection records
self.search_options['search_url'] = '/query/collection?'
self.search_options['facets'] = [{'field':'owner','size':100},{'field':'_created','size':100}]
self.search_options['result_display'] = [[{'pre':'<h3>','field':'label','post':'</h3>'}],[{'field':'description'}],[{'pre':'created by ','field':'owner'}]]
self.search_options['result_display'] = config['collections_result_display']
return render_template('collection/index.html', current_user=self.current_user, search_options=json.dumps(self.search_options), collection=None)
elif len(self.parts) == 2:
if self.parts[0] == "collections":
acc = bibserver.dao.Account.get(self.parts[1])
else:
acc = bibserver.dao.Account.get(self.parts[0])
if acc:
resp = make_response( json.dumps([coll.data for coll in acc.collections], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
elif len(self.parts) == 3:
coll = bibserver.dao.Collection.get_by_owner_coll(self.parts[1],self.parts[2])
if coll:
coll.data['records'] = len(coll)
resp = make_response( json.dumps(coll.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
else:
abort(404)
def record(self):
found = None
res = bibserver.dao.Record.query(terms = {
'owner'+config['facet_field']:self.parts[0],
'collection'+config['facet_field']:self.parts[1],
'id'+config['facet_field']:self.parts[2]
})
if res.total == 0:
rec = bibserver.dao.Record.get(self.parts[2])
if rec: found = 1
elif res.total == 1:
rec = bibserver.dao.Record.get(res._hits[0]['_id'])
found = 1
else:
found = 2
if not found:
abort(404)
elif found == 1:
collection = bibserver.dao.Collection.get_by_owner_coll(rec.data['owner'],rec.data['collection'])
if request.method == 'DELETE':
if rec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.delete()
abort(404)
else:
abort(404)
elif request.method == 'POST':
if rec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.data = request.json
rec.save()
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
if util.request_wants_json():
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if auth.collection.update(self.current_user, collection) else False
# make a list of all the values in the record, for autocomplete on the search field
searchvals = []
def valloop(obj):
if isinstance(obj,dict):
for item in obj:
valloop(obj[item])
elif isinstance(obj,list):
for thing in obj:
valloop(thing)
else:
searchvals.append(obj)
valloop(rec.data)
# get fuzzy like this
host = str(config['ELASTIC_SEARCH_HOST']).rstrip('/')
db_path = config['ELASTIC_SEARCH_DB']
fullpath = '/' + db_path + '/record/' + rec.id + '/_mlt?mlt_fields=title&min_term_freq=1&percent_terms_to_match=1&min_word_len=3'
c = httplib.HTTPConnection(host)
c.request('GET', fullpath)
resp = c.getresponse()
res = json.loads(resp.read())
mlt = [i['_source'] for i in res['hits']['hits']]
# get any notes
notes = bibserver.dao.Note.about(rec.id)
# check service core for more data about the record
# TODO: should maybe move this into the record dao or something
# TODO: also, add in any other calls to external APIs
servicecore = ""
apis = config['external_apis']
if apis['servicecore']['key']:
try:
servicecore = "not found in any UK repository"
addr = apis['servicecore']['url'] + rec.data['title'].replace(' ','%20') + "?format=json&api_key=" + apis['servicecore']['key']
import urllib2
response = urllib2.urlopen( addr )
data = json.loads(response.read())
if 'ListRecords' in data and len(data['ListRecords']) != 0:
record = data['ListRecords'][0]['record']['metadata']['oai_dc:dc']
servicecore = "<h3>Availability</h3><p>This article is openly available in an institutional repository:</p>"
servicecore += '<p><a target="_blank" href="' + record["dc:source"] + '">' + record["dc:title"] + '</a><br />'
if "dc:description" in record:
servicecore += record["dc:description"] + '<br /><br />'
servicecore += '</p>'
except:
pass
# render the record with all extras
return render_template('record.html',
record=json.dumps(rec.data),
prettyrecord=self.prettify(rec.data),
objectrecord = rec.data,
searchvals=json.dumps(searchvals),
admin=admin,
notes=notes,
servicecore=servicecore,
mlt=mlt,
searchables=json.dumps(config["searchables"], sort_keys=True)
)
else:
if util.request_wants_json():
resp = make_response( json.dumps([i['_source'] for i in res._hits], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('record.html', multiple=[i['_source'] for i in res._hits])
def account(self):
self.search_options['predefined_filters']['owner'+config['facet_field']] = self.parts[0]
acc = bibserver.dao.Account.get(self.parts[0])
if request.method == 'DELETE':
if not auth.user.update(self.current_user,acc):
abort(401)
if acc: acc.delete()
return ''
elif request.method == 'POST':
if not auth.user.update(self.current_user,acc):
abort(401)
info = request.json
if info.get('_id',False):
if info['_id'] != self.parts[0]:
acc = bibserver.dao.Account.get(info['_id'])
else:
info['api_key'] = acc.data['api_key']
info['_created'] = acc.data['_created']
info['collection'] = acc.data['collection']
info['owner'] = acc.data['collection']
acc.data = info
if 'password' in info and not info['password'].startswith('sha1'):
acc.set_password(info['password'])
acc.save()
resp = make_response( json.dumps(acc.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
if util.request_wants_json():
if not auth.user.update(self.current_user,acc):
abort(401)
resp = make_response( json.dumps(acc.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if auth.user.update(self.current_user,acc) else False
recordcount = bibserver.dao.Record.query(terms={'owner':acc.id}).total
collcount = bibserver.dao.Collection.query(terms={'owner':acc.id}).total
return render_template('account/view.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
record=json.dumps(acc.data),
recordcount=recordcount,
collcount=collcount,
admin=admin,
account=acc,
superuser=auth.user.is_super(self.current_user)
)
def collection(self):
# show the collection that matches parts[1]
self.search_options['predefined_filters']['owner'] = self.parts[0]
self.search_options['predefined_filters']['collection'] = self.parts[1]
# remove the collection facet
for count,facet in enumerate(self.search_options['facets']):
if facet['field'] == 'collection'+config['facet_field']:
del self.search_options['facets'][count]
# look for collection metadata
metadata = bibserver.dao.Collection.get_by_owner_coll(self.parts[0],self.parts[1])
if request.method == 'DELETE':
if metadata != None:
if not auth.collection.update(self.current_user, metadata):
abort(401)
else: metadata.delete()
return ''
else:
if not auth.collection.create(self.current_user, None):
abort(401)
else:
size = bibserver.dao.Record.query(terms={'owner':self.parts[0],'collection':self.parts[1]}).total
for rid in bibserver.dao.Record.query(terms={'owner':self.parts[0],'collection':self.parts[1]},size=size)._hits:
record = bibserver.dao.Record.get(rid['_id'])
if record: record.delete()
return ''
elif request.method == 'POST':
if metadata != None:
metadata.data = request.json
metadata.save()
return ''
else: abort(404)
else:
if util.request_wants_json():
out = {"metadata":metadata.data,"records":[]}
out['metadata']['records'] = len(metadata)
out['metadata']['query'] = request.url
for rec in metadata.records:
out['records'].append(rec.data)
resp = make_response( json.dumps(out, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if metadata != None and auth.collection.update(self.current_user, metadata) else False
if metadata and '_display_settings' in metadata:
self.search_options.update(metadata['_display_settings'])
users = bibserver.dao.Account.query(size=1000000) # pass the userlist for autocomplete admin addition (could be ajax'd)
userlist = [i['_source']['_id'] for i in users['hits']['hits']]
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=metadata.data,
record = json.dumps(metadata.data),
userlist=json.dumps(userlist),
request=request,
admin=admin
)
def prettify(self,record):
result = '<p>'
# given a result record, build how it should look on the page
img = False
if img:
result += '<img class="thumbnail" style="float:left; width:100px; margin:0 5px 10px 0; max-height:150px;" src="' + img[0] + '" />'
# add the record based on display template if available
display = config['search_result_display']
lines = ''
for lineitem in display:
line = ''
for obj in lineitem:
thekey = obj['field']
parts = thekey.split('.')
if len(parts) == 1:
res = record
elif len(parts) == 2:
res = record.get(parts[0],'')
elif len(parts) == 3:
res = record[parts[0]][parts[1]]
counter = len(parts) - 1
if res and isinstance(res, dict):
thevalue = res.get(parts[counter],'') # this is a dict
else:
thevalue = []
for row in res:
thevalue.append(row[parts[counter]])
if thevalue and len(thevalue):
line += obj.get('pre','')
if isinstance(thevalue, list):
for index,val in enumerate(thevalue):
if index != 0 and index != len(thevalue)-1: line += ', '
line += val
else:
line += thevalue
line += obj.get('post','')
if line:
lines += line + "<br />"
if lines:
result += lines
else:
result += json.dumps(record,sort_keys=True,indent=4)
result += '</p>'
return result
|
|
# -*- coding: utf-8 -*-
import random
from cadnano.views.pathview import pathstyles
from cadnano.proxies.cnproxy import UndoCommand
from cadnano.strand import Strand
class SplitCommand(UndoCommand):
""" The SplitCommand takes as input a strand and "splits" the strand in
two, such that one new strand 3' end is at base_idx, and the other
new strand 5' end is at base_idx +/- 1 (depending on the direction
of the strands).
Under the hood:
On redo, this command actually is creates two new copies of the
original strand, resizes each and modifies their connections.
On undo, the new copies are removed and the original is restored.
"""
def __init__(self, strand: Strand, base_idx: int, update_sequence: bool = True):
super(SplitCommand, self).__init__("split strand")
# Store inputs
self._old_strand = strand
# TODO possibly implement selection preserving
# doc = strand.document()
# self.was_selected = was_selected = doc.isModelStrandSelected(strand)
# if was_selected:
# self.select_values = doc.getSelectedStrandValue(strand)
# else:
# self.select_values = (None, None)
old_sequence = strand._sequence
is5to3 = strand.isForward()
self._s_set = strand.strandSet()
self._old_oligo = oligo = strand.oligo()
# Create copies
self.strand_low = strand_low = strand.shallowCopy()
self.strand_high = strand_high = strand.shallowCopy()
if oligo.isCircular():
self._l_oligo = self._h_oligo = l_oligo = h_oligo = oligo.shallowCopy()
else:
self._l_oligo = l_oligo = oligo.shallowCopy()
self._h_oligo = h_oligo = oligo.shallowCopy()
color_list = pathstyles.STAP_COLORS
# Determine oligo retention based on strand priority
if is5to3: # strand_low has priority
i_new_low = base_idx
color_low = oligo.getColor()
color_high = random.choice(color_list)
olg5p, olg3p = l_oligo, h_oligo
std5p, std3p = strand_low, strand_high
else: # strand_high has priority
i_new_low = base_idx - 1
color_low = random.choice(color_list)
color_high = oligo.getColor()
olg5p, olg3p = h_oligo, l_oligo
std5p, std3p = strand_high, strand_low
# this is for updating a connected xover view object
# there is only ever one xover a strand is in charge of
self._strand3p = std3p
self._strand5p = std5p
# Update strand connectivity
strand_low.setConnectionHigh(None)
strand_high.setConnectionLow(None)
# Resize strands and update decorators
strand_low.setIdxs((strand.lowIdx(), i_new_low))
strand_high.setIdxs((i_new_low + 1, strand.highIdx()))
# Update the oligo for things like its 5prime end and isCircular
olg5p._strandSplitUpdate(std5p, std3p, olg3p, strand)
if not oligo.isCircular():
# Update the oligo color if necessary
l_oligo._setColor(color_low)
h_oligo._setColor(color_high)
# settle the oligo length
length = 0
for strand in std3p.generator3pStrand():
length += strand.totalLength()
# end for
olg5p._setLength(olg5p.length() - length, emit_signals=True)
olg3p._setLength(length, emit_signals=True)
# end if
if update_sequence and old_sequence:
if is5to3: # strand_low has priority
tL = strand_low.totalLength()
strand_low._sequence = old_sequence[0:tL]
strand_high._sequence = old_sequence[tL:]
else:
tH = strand_high.totalLength()
strand_high._sequence = old_sequence[0:tH]
strand_low._sequence = old_sequence[tH:]
# end def
def redo(self):
ss = self._s_set
s_low = self.strand_low
s_high = self.strand_high
o_strand = self._old_strand
olg = self._old_oligo
l_olg = self._l_oligo
h_olg = self._h_oligo
was_not_loop = l_olg != h_olg
# Remove old Strand from the s_set
ss._removeFromStrandList(o_strand, update_segments=False)
# Add new strands to the s_set (reusing idx, so order matters)
ss._addToStrandList(s_high, update_segments=False)
ss._addToStrandList(s_low)
# update connectivity of strands
sLcL = s_low.connectionLow()
if sLcL:
if ((o_strand.isForward() and sLcL.isForward()) or
(not o_strand.isForward() and not sLcL.isForward())):
sLcL.setConnectionHigh(s_low)
else:
sLcL.setConnectionLow(s_low)
sHcH = s_high.connectionHigh()
if sHcH:
if ((o_strand.isForward() and sHcH.isForward()) or
(not o_strand.isForward() and not sHcH.isForward())):
sHcH.setConnectionLow(s_high)
else:
sHcH.setConnectionHigh(s_high)
# Traverse the strands via 3'conns to assign the new oligos
fSetOligo = Strand.setOligo
for strand in l_olg.strand5p().generator3pStrand():
fSetOligo(strand, l_olg, emit_signals=True) # emits strandHasNewOligoSignal
if was_not_loop: # do the second oligo which is different
for i, strand in enumerate(h_olg.strand5p().generator3pStrand()):
# emits strandHasNewOligoSignal
fSetOligo(strand, h_olg, emit_signals=True)
# Add new oligo and remove old oligos from the part
olg.removeFromPart(emit_signals=True)
l_olg.addToPart(s_low.part(), emit_signals=True)
if was_not_loop:
h_olg.addToPart(s_high.part(), emit_signals=True)
# Emit Signals related to destruction and addition
o_strand.strandRemovedSignal.emit(o_strand)
ss.strandsetStrandAddedSignal.emit(ss, s_high)
ss.strandsetStrandAddedSignal.emit(ss, s_low)
# if self.was_selected:
# print("WAS SELECTED")
# doc = ss.document()
# select_values = self.select_values
# doc.addStrandToSelection(s_low, select_values)
# doc.addStrandToSelection(s_high, select_values)
# doc.updateStrandSelection()
# end def
def undo(self):
ss = self._s_set
s_low = self.strand_low
s_high = self.strand_high
o_strand = self._old_strand
olg = self._old_oligo
l_olg = self._l_oligo
h_olg = self._h_oligo
was_not_loop = l_olg != h_olg
# Remove new strands from the s_set (reusing idx, so order matters)
ss._removeFromStrandList(s_low, update_segments=False)
ss._removeFromStrandList(s_high, update_segments=False)
# Add the old strand to the s_set
ss._addToStrandList(o_strand)
# update connectivity of strands
oScL = o_strand.connectionLow()
if oScL:
if ((o_strand.isForward() and oScL.isForward()) or
(not o_strand.isForward() and not oScL.isForward())):
oScL.setConnectionHigh(o_strand)
else:
oScL.setConnectionLow(o_strand)
oScH = o_strand.connectionHigh()
if oScH:
if ((o_strand.isForward() and oScH.isForward()) or
(not o_strand.isForward() and not oScH.isForward())):
oScH.setConnectionLow(o_strand)
else:
oScH.setConnectionHigh(o_strand)
# Traverse the strands via 3'conns to assign the old oligo
fSetOligo = Strand.setOligo
for strand in olg.strand5p().generator3pStrand():
fSetOligo(strand, olg, emit_signals=True)
# Add old oligo and remove new oligos from the part
olg.addToPart(ss.part(), emit_signals=True)
l_olg.removeFromPart(emit_signals=True)
if was_not_loop:
h_olg.removeFromPart(emit_signals=True)
# Emit Signals related to destruction and addition
s_low.strandRemovedSignal.emit(s_low)
s_high.strandRemovedSignal.emit(s_high)
ss.strandsetStrandAddedSignal.emit(ss, o_strand)
ss.part().reemitActiveVirtualHelix() # emit last to ensure colors of ticks are correct
# if self.was_selected:
# doc = ss.document()
# doc.addStrandToSelection(o_strand, self.select_values)
# doc.updateStrandSelection()
# end def
# end class
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 Hamilton Kibbe <ham@hamiltonkib.be>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Excellon Statements
====================
**Excellon file statement classes**
"""
import re
import uuid
import itertools
from .utils import (parse_gerber_value, write_gerber_value, decimal_string,
inch, metric)
__all__ = ['ExcellonTool', 'ToolSelectionStmt', 'CoordinateStmt',
'CommentStmt', 'HeaderBeginStmt', 'HeaderEndStmt',
'RewindStopStmt', 'EndOfProgramStmt', 'UnitStmt',
'IncrementalModeStmt', 'VersionStmt', 'FormatStmt', 'LinkToolStmt',
'MeasuringModeStmt', 'RouteModeStmt', 'LinearModeStmt', 'DrillModeStmt',
'AbsoluteModeStmt', 'RepeatHoleStmt', 'UnknownStmt',
'ExcellonStatement', 'ZAxisRoutPositionStmt',
'RetractWithClampingStmt', 'RetractWithoutClampingStmt',
'CutterCompensationOffStmt', 'CutterCompensationLeftStmt',
'CutterCompensationRightStmt', 'ZAxisInfeedRateStmt',
'NextToolSelectionStmt', 'SlotStmt']
class ExcellonStatement(object):
""" Excellon Statement abstract base class
"""
@classmethod
def from_excellon(cls, line):
raise NotImplementedError('from_excellon must be implemented in a '
'subclass')
def __init__(self, unit='inch', id=None):
self.units = unit
self.id = uuid.uuid4().int if id is None else id
def to_excellon(self, settings=None):
raise NotImplementedError('to_excellon must be implemented in a '
'subclass')
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
def offset(self, x_offset=0, y_offset=0):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ExcellonTool(ExcellonStatement):
""" Excellon Tool class
Parameters
----------
settings : FileSettings (dict-like)
File-wide settings.
kwargs : dict-like
Tool settings from the excellon statement. Valid keys are:
- `diameter` : Tool diameter [expressed in file units]
- `rpm` : Tool RPM
- `feed_rate` : Z-axis tool feed rate
- `retract_rate` : Z-axis tool retraction rate
- `max_hit_count` : Number of hits allowed before a tool change
- `depth_offset` : Offset of tool depth from tip of tool.
Attributes
----------
number : integer
Tool number from the excellon file
diameter : float
Tool diameter in file units
rpm : float
Tool RPM
feed_rate : float
Tool Z-axis feed rate.
retract_rate : float
Tool Z-axis retract rate
depth_offset : float
Offset of depth measurement from tip of tool
max_hit_count : integer
Maximum number of tool hits allowed before a tool change
hit_count : integer
Number of tool hits in excellon file.
"""
PLATED_UNKNOWN = None
PLATED_YES = 'plated'
PLATED_NO = 'nonplated'
PLATED_OPTIONAL = 'optional'
@classmethod
def from_tool(cls, tool):
args = {}
args['depth_offset'] = tool.depth_offset
args['diameter'] = tool.diameter
args['feed_rate'] = tool.feed_rate
args['max_hit_count'] = tool.max_hit_count
args['number'] = tool.number
args['plated'] = tool.plated
args['retract_rate'] = tool.retract_rate
args['rpm'] = tool.rpm
return cls(None, **args)
@classmethod
def from_excellon(cls, line, settings, id=None, plated=None):
""" Create a Tool from an excellon file tool definition line.
Parameters
----------
line : string
Tool definition line from an excellon file.
settings : FileSettings (dict-like)
Excellon file-wide settings
Returns
-------
tool : Tool
An ExcellonTool representing the tool defined in `line`
"""
commands = pairwise(re.split('([BCFHSTZ])', line)[1:])
args = {}
args['id'] = id
nformat = settings.format
zero_suppression = settings.zero_suppression
for cmd, val in commands:
if cmd == 'B':
args['retract_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'C':
args['diameter'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'F':
args['feed_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'H':
args['max_hit_count'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'S':
args['rpm'] = 1000 * parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'T':
args['number'] = int(val)
elif cmd == 'Z':
args['depth_offset'] = parse_gerber_value(val, nformat, zero_suppression)
if plated != ExcellonTool.PLATED_UNKNOWN:
# Sometimees we can can parse the plating status
args['plated'] = plated
return cls(settings, **args)
@classmethod
def from_dict(cls, settings, tool_dict):
""" Create an ExcellonTool from a dict.
Parameters
----------
settings : FileSettings (dict-like)
Excellon File-wide settings
tool_dict : dict
Excellon tool parameters as a dict
Returns
-------
tool : ExcellonTool
An ExcellonTool initialized with the parameters in tool_dict.
"""
return cls(settings, **tool_dict)
def __init__(self, settings, **kwargs):
if kwargs.get('id') is not None:
super(ExcellonTool, self).__init__(id=kwargs.get('id'))
self.settings = settings
self.number = kwargs.get('number')
self.feed_rate = kwargs.get('feed_rate')
self.retract_rate = kwargs.get('retract_rate')
self.rpm = kwargs.get('rpm')
self.diameter = kwargs.get('diameter')
self.max_hit_count = kwargs.get('max_hit_count')
self.depth_offset = kwargs.get('depth_offset')
self.plated = kwargs.get('plated')
self.hit_count = 0
def to_excellon(self, settings=None):
if self.settings and not settings:
settings = self.settings
fmt = settings.format
zs = settings.zero_suppression
stmt = 'T%02d' % self.number
if self.retract_rate is not None:
stmt += 'B%s' % write_gerber_value(self.retract_rate, fmt, zs)
if self.feed_rate is not None:
stmt += 'F%s' % write_gerber_value(self.feed_rate, fmt, zs)
if self.max_hit_count is not None:
stmt += 'H%s' % write_gerber_value(self.max_hit_count, fmt, zs)
if self.rpm is not None:
if self.rpm < 100000.:
stmt += 'S%s' % write_gerber_value(self.rpm / 1000., fmt, zs)
else:
stmt += 'S%g' % (self.rpm / 1000.)
if self.diameter is not None:
stmt += 'C%s' % decimal_string(self.diameter, fmt[1], True)
if self.depth_offset is not None:
stmt += 'Z%s' % write_gerber_value(self.depth_offset, fmt, zs)
return stmt
def to_inch(self):
if self.settings.units != 'inch':
self.settings.units = 'inch'
if self.diameter is not None:
self.diameter = inch(self.diameter)
def to_metric(self):
if self.settings.units != 'metric':
self.settings.units = 'metric'
if self.diameter is not None:
self.diameter = metric(self.diameter)
def _hit(self):
self.hit_count += 1
def equivalent(self, other):
"""
Is the other tool equal to this, ignoring the tool number, and other file specified properties
"""
if type(self) != type(other):
return False
return (self.diameter == other.diameter
and self.feed_rate == other.feed_rate
and self.retract_rate == other.retract_rate
and self.rpm == other.rpm
and self.depth_offset == other.depth_offset
and self.max_hit_count == other.max_hit_count
and self.plated == other.plated
and self.settings.units == other.settings.units)
def __repr__(self):
unit = 'in.' if self.settings.units == 'inch' else 'mm'
fmtstr = '<ExcellonTool %%02d: %%%d.%dg%%s dia.>' % self.settings.format
return fmtstr % (self.number, self.diameter, unit)
class ToolSelectionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ToolSelectionStmt from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
tool_statement : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
line = line[1:]
compensation_index = None
# up to 3 characters for tool number (Frizting uses that)
if len(line) <= 3:
tool = int(line)
else:
tool = int(line[:2])
compensation_index = int(line[2:])
return cls(tool, compensation_index, **kwargs)
def __init__(self, tool, compensation_index=None, **kwargs):
super(ToolSelectionStmt, self).__init__(**kwargs)
tool = int(tool)
compensation_index = (int(compensation_index) if compensation_index
is not None else None)
self.tool = tool
self.compensation_index = compensation_index
def to_excellon(self, settings=None):
stmt = 'T%02d' % self.tool
if self.compensation_index is not None:
stmt += '%02d' % self.compensation_index
return stmt
class NextToolSelectionStmt(ExcellonStatement):
# TODO the statement exists outside of the context of the file,
# so it is imposible to know that it is really the next tool
def __init__(self, cur_tool, next_tool, **kwargs):
"""
Select the next tool in the wheel.
Parameters
----------
cur_tool : the tool that is currently selected
next_tool : the that that is now selected
"""
super(NextToolSelectionStmt, self).__init__(**kwargs)
self.cur_tool = cur_tool
self.next_tool = next_tool
def to_excellon(self, settings=None):
stmt = 'M00'
return stmt
class ZAxisInfeedRateStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ZAxisInfeedRate from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
z_axis_infeed_rate : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
rate = int(line[1:])
return cls(rate, **kwargs)
def __init__(self, rate, **kwargs):
super(ZAxisInfeedRateStmt, self).__init__(**kwargs)
self.rate = rate
def to_excellon(self, settings=None):
return 'F%02d' % self.rate
class CoordinateStmt(ExcellonStatement):
@classmethod
def from_point(cls, point, mode=None):
stmt = cls(point[0], point[1])
if mode:
stmt.mode = mode
return stmt
@classmethod
def from_excellon(cls, line, settings, **kwargs):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
c = cls(x_coord, y_coord, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(CoordinateStmt, self).__init__(**kwargs)
self.x = x
self.y = y
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.mode == "ROUT":
stmt += "G00"
if self.mode == "LINEAR":
stmt += "G01"
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x, settings.format,
settings.zero_suppression)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
def __str__(self):
coord_str = ''
if self.x is not None:
coord_str += 'X: %g ' % self.x
if self.y is not None:
coord_str += 'Y: %g ' % self.y
return '<Coordinate Statement: %s>' % coord_str
class RepeatHoleStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'R(?P<rcount>[0-9]*)X?(?P<xdelta>[+\-]?\d*\.?\d*)?Y?'
'(?P<ydelta>[+\-]?\d*\.?\d*)?').match(line)
stmt = match.groupdict()
count = int(stmt['rcount'])
xdelta = (parse_gerber_value(stmt['xdelta'], settings.format,
settings.zero_suppression)
if stmt['xdelta'] is not '' else None)
ydelta = (parse_gerber_value(stmt['ydelta'], settings.format,
settings.zero_suppression)
if stmt['ydelta'] is not '' else None)
c = cls(count, xdelta, ydelta, **kwargs)
c.units = settings.units
return c
def __init__(self, count, xdelta=0.0, ydelta=0.0, **kwargs):
super(RepeatHoleStmt, self).__init__(**kwargs)
self.count = count
self.xdelta = xdelta
self.ydelta = ydelta
def to_excellon(self, settings):
stmt = 'R%d' % self.count
if self.xdelta is not None and self.xdelta != 0.0:
stmt += 'X%s' % write_gerber_value(self.xdelta, settings.format,
settings.zero_suppression)
if self.ydelta is not None and self.ydelta != 0.0:
stmt += 'Y%s' % write_gerber_value(self.ydelta, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.xdelta is not None:
self.xdelta = inch(self.xdelta)
if self.ydelta is not None:
self.ydelta = inch(self.ydelta)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.xdelta is not None:
self.xdelta = metric(self.xdelta)
if self.ydelta is not None:
self.ydelta = metric(self.ydelta)
def __str__(self):
return '<Repeat Hole: %d times, offset X: %g Y: %g>' % (
self.count,
self.xdelta if self.xdelta is not None else 0,
self.ydelta if self.ydelta is not None else 0)
class CommentStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line.lstrip(';'))
def __init__(self, comment, **kwargs):
super(CommentStmt, self).__init__(**kwargs)
self.comment = comment
def to_excellon(self, settings=None):
return ';%s' % self.comment
class HeaderBeginStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderBeginStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M48'
class HeaderEndStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderEndStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M95'
class RewindStopStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RewindStopStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return '%'
class ZAxisRoutPositionStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(ZAxisRoutPositionStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M15'
class RetractWithClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M16'
class RetractWithoutClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithoutClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M17'
class CutterCompensationOffStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationOffStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G40'
class CutterCompensationLeftStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationLeftStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G41'
class CutterCompensationRightStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationRightStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G42'
class EndOfProgramStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'M30X?(?P<x>\d*\.?\d*)?Y?'
'(?P<y>\d*\.?\d*)?').match(line)
stmt = match.groupdict()
x = (parse_gerber_value(stmt['x'], settings.format,
settings.zero_suppression)
if stmt['x'] is not '' else None)
y = (parse_gerber_value(stmt['y'], settings.format,
settings.zero_suppression)
if stmt['y'] is not '' else None)
c = cls(x, y, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(EndOfProgramStmt, self).__init__(**kwargs)
self.x = x
self.y = y
def to_excellon(self, settings=None):
stmt = 'M30'
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
class UnitStmt(ExcellonStatement):
@classmethod
def from_settings(cls, settings):
"""Create the unit statement from the FileSettings"""
return cls(settings.units, settings.zeros)
@classmethod
def from_excellon(cls, line, **kwargs):
units = 'inch' if 'INCH' in line else 'metric'
zeros = 'leading' if 'LZ' in line else 'trailing'
if '0000.00' in line:
format = (4, 2)
elif '000.000' in line:
format = (3, 3)
elif '00.0000' in line:
format = (2, 4)
else:
format = None
return cls(units, zeros, format, **kwargs)
def __init__(self, units='inch', zeros='leading', format=None, **kwargs):
super(UnitStmt, self).__init__(**kwargs)
self.units = units.lower()
self.zeros = zeros
self.format = format
def to_excellon(self, settings=None):
# TODO This won't export the invalid format statement if it exists
stmt = '%s,%s' % ('INCH' if self.units == 'inch' else 'METRIC',
'LZ' if self.zeros == 'leading'
else 'TZ')
return stmt
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class IncrementalModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls('off', **kwargs) if 'OFF' in line else cls('on', **kwargs)
def __init__(self, mode='off', **kwargs):
super(IncrementalModeStmt, self).__init__(**kwargs)
if mode.lower() not in ['on', 'off']:
raise ValueError('Mode may be "on" or "off"')
self.mode = mode
def to_excellon(self, settings=None):
return 'ICI,%s' % ('OFF' if self.mode == 'off' else 'ON')
class VersionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
version = int(line.split(',')[1])
return cls(version, **kwargs)
def __init__(self, version=1, **kwargs):
super(VersionStmt, self).__init__(**kwargs)
version = int(version)
if version not in [1, 2]:
raise ValueError('Valid versions are 1 or 2')
self.version = version
def to_excellon(self, settings=None):
return 'VER,%d' % self.version
class FormatStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
fmt = int(line.split(',')[1])
return cls(fmt, **kwargs)
def __init__(self, format=1, **kwargs):
super(FormatStmt, self).__init__(**kwargs)
format = int(format)
if format not in [1, 2]:
raise ValueError('Valid formats are 1 or 2')
self.format = format
def to_excellon(self, settings=None):
return 'FMAT,%d' % self.format
@property
def format_tuple(self):
return (self.format, 6 - self.format)
class LinkToolStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
linked = [int(tool) for tool in line.split('/')]
return cls(linked, **kwargs)
def __init__(self, linked_tools, **kwargs):
super(LinkToolStmt, self).__init__(**kwargs)
self.linked_tools = [int(x) for x in linked_tools]
def to_excellon(self, settings=None):
return '/'.join([str(x) for x in self.linked_tools])
class MeasuringModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
if not ('M71' in line or 'M72' in line):
raise ValueError('Not a measuring mode statement')
return cls('inch', **kwargs) if 'M72' in line else cls('metric', **kwargs)
def __init__(self, units='inch', **kwargs):
super(MeasuringModeStmt, self).__init__(**kwargs)
units = units.lower()
if units not in ['inch', 'metric']:
raise ValueError('units must be "inch" or "metric"')
self.units = units
def to_excellon(self, settings=None):
return 'M72' if self.units == 'inch' else 'M71'
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class RouteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RouteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G00'
class LinearModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(LinearModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G01'
class DrillModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(DrillModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G05'
class AbsoluteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(AbsoluteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G90'
class UnknownStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line, **kwargs)
def __init__(self, stmt, **kwargs):
super(UnknownStmt, self).__init__(**kwargs)
self.stmt = stmt
def to_excellon(self, settings=None):
return self.stmt
def __str__(self):
return "<Unknown Statement: %s>" % self.stmt
class SlotStmt(ExcellonStatement):
"""
G85 statement. Defines a slot created by multiple drills between two specified points.
Format is two coordinates, split by G85in the middle, for example, XnY0nG85XnYn
"""
@classmethod
def from_points(cls, start, end):
return cls(start[0], start[1], end[0], end[1])
@classmethod
def from_excellon(cls, line, settings, **kwargs):
# Split the line based on the G85 separator
sub_coords = line.split('G85')
(x_start_coord, y_start_coord) = SlotStmt.parse_sub_coords(sub_coords[0], settings)
(x_end_coord, y_end_coord) = SlotStmt.parse_sub_coords(sub_coords[1], settings)
# Some files seem to specify only one of the coordinates
if x_end_coord == None:
x_end_coord = x_start_coord
if y_end_coord == None:
y_end_coord = y_start_coord
c = cls(x_start_coord, y_start_coord, x_end_coord, y_end_coord, **kwargs)
c.units = settings.units
return c
@staticmethod
def parse_sub_coords(line, settings):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
return (x_coord, y_coord)
def __init__(self, x_start=None, y_start=None, x_end=None, y_end=None, **kwargs):
super(SlotStmt, self).__init__(**kwargs)
self.x_start = x_start
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.x_start is not None:
stmt += 'X%s' % write_gerber_value(self.x_start, settings.format,
settings.zero_suppression)
if self.y_start is not None:
stmt += 'Y%s' % write_gerber_value(self.y_start, settings.format,
settings.zero_suppression)
stmt += 'G85'
if self.x_end is not None:
stmt += 'X%s' % write_gerber_value(self.x_end, settings.format,
settings.zero_suppression)
if self.y_end is not None:
stmt += 'Y%s' % write_gerber_value(self.y_end, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x_start is not None:
self.x_start = inch(self.x_start)
if self.y_start is not None:
self.y_start = inch(self.y_start)
if self.x_end is not None:
self.x_end = inch(self.x_end)
if self.y_end is not None:
self.y_end = inch(self.y_end)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x_start is not None:
self.x_start = metric(self.x_start)
if self.y_start is not None:
self.y_start = metric(self.y_start)
if self.x_end is not None:
self.x_end = metric(self.x_end)
if self.y_end is not None:
self.y_end = metric(self.y_end)
def offset(self, x_offset=0, y_offset=0):
if self.x_start is not None:
self.x_start += x_offset
if self.y_start is not None:
self.y_start += y_offset
if self.x_end is not None:
self.x_end += x_offset
if self.y_end is not None:
self.y_end += y_offset
def __str__(self):
start_str = ''
if self.x_start is not None:
start_str += 'X: %g ' % self.x_start
if self.y_start is not None:
start_str += 'Y: %g ' % self.y_start
end_str = ''
if self.x_end is not None:
end_str += 'X: %g ' % self.x_end
if self.y_end is not None:
end_str += 'Y: %g ' % self.y_end
return '<Slot Statement: %s to %s>' % (start_str, end_str)
def pairwise(iterator):
""" Iterate over list taking two elements at a time.
e.g. [1, 2, 3, 4, 5, 6] ==> [(1, 2), (3, 4), (5, 6)]
"""
a, b = itertools.tee(iterator)
itr = zip(itertools.islice(a, 0, None, 2), itertools.islice(b, 1, None, 2))
for elem in itr:
yield elem
|
|
import tables as T
import numpy as N
import subprocess
import threading
import SimpleXMLRPCServer
import SocketServer
import time,xmlrpclib,httplib
import cPickle as pickle
import ConfigParser
import sys, os
import imp
# xmlrpc mit timeout
def Server(url, *args, **kwargs):
t = TimeoutTransport()
t.timeout = kwargs.get('timeout', 60)
if 'timeout' in kwargs:
del kwargs['timeout']
kwargs['transport'] = t
server = xmlrpclib.Server(url, *args, **kwargs)
return server
class TimeoutTransport(xmlrpclib.Transport):
def make_connection(self, host):
conn = TimeoutHTTP(host)
conn.set_timeout(self.timeout)
return conn
class TimeoutHTTPConnection(httplib.HTTPConnection):
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutHTTP(httplib.HTTP):
_connection_class = TimeoutHTTPConnection
def set_timeout(self, timeout):
self._conn.timeout = timeout
# Create server
# SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer
#class ThreadingXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer): pass
class ThreadingXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): pass
#class ThreadingXMLRPCServer(SocketServer.ThreadingTCPServer, SimpleXMLRPCServer.SimpleXMLRPCServer): pass
#class ThreadingXMLRPCServer(SocketServer.TCPServer, SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
# def __init__(self, addr, requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler, logRequests = 0):
# self.logRequests = logRequests
# if sys.version_info[:2] < (2, 5):
# SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
# else:
# SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, allow_none = False, encoding = None)
# SocketServer.ThreadingTCPServer.__init__(self, addr, requestHandler)
# pass
# The Damaris Logger Daemon
class DamarisLogger:
def __init__(self, data_file, max_puts_before_flush,port):
#print os.getcwd()
self.hdf = T.openFile(data_file,'a')
self.run_flag = True
self.server = ThreadingXMLRPCServer(("localhost", port), logRequests=0)
self.server.allow_reuse_address = True
self.server.allow_none = True
self.server.allow_introspection = True
self.server.register_introspection_functions=True
self.server.register_function(self.put_data)
self.server.register_function(self.get_data)
self.server.register_function(self.put_external_data)
self.server.register_function(self.unblock_client)
self.server.register_function(self.block_client)
self.server.register_function(self.occupied)
self.server.register_function(self.quit)
self.server.register_function(self.quit_client)
self.server.register_function(self.status)
self.server.register_function(self.register_client)
self.server.register_function(self.unregister_client)
self.server.register_function(self.quit_device)
self.server.register_function(self.server_run)
self.puts = 0
self.total_puts = 0
self.block_list = set()
self.quit_list = set()
self.quit_client_flag = False
self.client_list = set()
self.max_puts_before_flush = max_puts_before_flush
self.lock = threading.Lock()
def __del__(self):
try:
print "closing files ..."
self.hdf.flush()
self.hdf.close()
except:
print "Could not close files, sorry!"
def register_client(self,device):
self.client_list.add(device)
return None
def unregister_client(self,device):
self.client_list.remove(device)
return None
def flushing(self):
self.lock.acquire()
if self.puts >= self.max_puts_before_flush:
self.hdf.flush()
self.puts = 0
self.lock.release()
return None
def server_run(self):
return self.run_flag
def quit(self):
#self.run_flag = False
self.quit_client_flag = True
i = 0
while (i < 10) and not len(self.client_list) == 0:
i += 1
print "Clients still running ...", self.client_list
time.sleep(0.5)
self.__del__()
self.run_flag = False
return None
def status(self):
self.flushing()
return self.total_puts, list(self.client_list), list(self.block_list)
def quit_device(self,device):
"""
Tell client 'device' to quit
"""
self.quit_list.add(device)
return None
######## client controls ###############
def quit_client(self, device):
"""
Should the client 'device' quit?
"""
if device in self.quit_list:
self.quit_list.remove(device)
return True
if self.quit_client_flag:
return self.quit_client_flag
def occupied(self,device):
if device in self.block_list:
return True
else:
return False
def block_client(self,device):
self.block_list.add(device)
return None
def unblock_client(self,device):
self.block_list.remove(device)
return None
########################################
def run(self):
"""
This function is starting the server and registering the needed functions
"""
print "Server up and running ..."
while self.run_flag:
self.server.handle_request()
def put_external_data(self, device, command):
"""
Reads data from an external (shell) command
"""
record_time = time.time()
external = subprocess.Popen(command, shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
in_data = external.stdout.readline()
# result = map(float, in_data.strip().split(' '))
# Much faster!
result = N.fromstring("".join(in_data), sep=' ')
self.put_data(device, record_time, result)
return 1
def put_data(self, device, record_time, data):
"""
Put in data via an external program. The program should connect to the
DAMARISd daemon and issue this command.
record_time is a list of timestamps, from for example time.time().
value is a cPickle.dumps(numpy.array), each row corresponds to a
time stamp
"""
def make_EArray(device,cols):
try:
self.hdf.root._g_checkHasChild(device) # throws NoSuchNodeError if not existing
device_array = self.hdf.getNode('/',device)
except T.exceptions.NoSuchNodeError:
device_array = self.hdf.createEArray('/',device,
atom=T.Float64Atom(),
shape=(0,cols+1))
return device_array
# Check the type of record_time, we need one dimensional data
if type(record_time) != list:
# if a numpy array
if type(record_time) == N.ndarray:
if all(record_time.shape) > 1:
# we need one dimensional time data, thus flatten
record_time = record_time.flatten()
# not a numpy array, make a list out of it
#else:
# record_time = list(record_time)
# Data comes from outside client via xmlrpclib
if type(data) == str:
data = pickle.loads(data)
# rows, cols for creating the pytables array
rows_cols = data.shape
# is one dimension
if len(rows_cols) == 1:
rows = 1
cols = rows_cols[0]
# is two dimensinal
if len(rows_cols) == 2:
rows,cols = rows_cols
else:
# todo error handling
pass
device_array = make_EArray(device,cols)
rows = N.empty((rows,cols+1))
rows[:,0]=record_time
rows[:,1:]=data
try:
device_array.append(rows)
except:
print "ERROR! Quitting client: %s"%device
self.quit_device(device)
# update puts, flush if necessary
self.lock.acquire()
self.total_puts += 1
self.puts += 1
self.lock.release()
self.flushing()
return None
def search(self,anarray,value,start=None,stop=None):
"""
Binary search, needs ~ 23 iterations for 12e6 records
"""
Found = False
if start == None:
start = 0
if stop == None:
stop = anarray.shape[0]
bisect = (stop+start)/2
current_point = anarray.read(start=bisect)[:,0]
while not Found:
if value < current_point:
stop = bisect-1
elif value > current_point:
start = bisect+1
# can't go higher
if start >= anarray.shape[0]:
start=bisect
bisect = (stop+start)/2
if bisect >= anarray.shape[0]:
bisect = anarray.shape[0]
if bisect < 0:
bisect = 0
current_point = anarray.read(start=bisect)[:,0]
if start >= stop:
Found = True
return bisect
def get_data(self, device, start_time, stop_time):
self.hdf.flush()
device_array = self.hdf.getNode('/',device)
# select the values in timeframe
# This is very inefficient
#tmp = [ x[:] for x in device_array.iterrows()
# if (start_time <= x[0] <= stop_time) ]
#values_to_return = N.empty((len(tmp), len(tmp[0])))
#for i,row in enumerate(tmp):
# values_to_return[i,:]=row
# using binary search
start_point = self.search(device_array,start_time)
# don't search the whole thing again, start at start_point
end_point = self.search(device_array,stop_time, start = start_point-1)
#print start_point, end_point,device
if start_point == end_point:
values_to_return = device_array.read(start_point)
else:
values_to_return = device_array.read(start_point, end_point)
return_object = pickle.dumps(values_to_return, protocol=0)
return return_object
def damarisd_daemon():
s = "%s Starting server"%(time.ctime())
print
print len(s)*"#"
print s
print len(s)*"#"
############ GENERAL CONFIGURATION PART ###################
config = ConfigParser.ConfigParser()
config.read('damarisd.config')
devices = [sec for sec in config.sections()]# if sec!="general"]
data_file = config.defaults()['data_file']
max_puts_before_flush = int(config.defaults()['max_puts_before_flush'])
port = int(config.defaults()['port'])
# log the config
for sec in config.sections():
print "Device: %s"%sec
for pair in sorted(config.items(sec)):
keys,val = pair
print "\t%s = %s"%pair
############## SERVER PART ######################
damarisd_server = DamarisLogger(data_file,max_puts_before_flush,port)
daemon = threading.Thread(target = damarisd_server.run)
daemon.setDaemon(True)
daemon.start()
# move this to background daemon.run()
time.sleep(0.1)
server = Server('http://localhost:%i'%port)
######### CLIENT PART ############
def shelldevice_thread(device, command,rate):
Quit = False
#server = xmlrpclib.Server('http://localhost:%i'%port)
server.register_client(device)
while not Quit:
Quit = bool(server.quit_client(device))
if server.occupied(device) == 0:
server.put_external_data(device, command)
time.sleep(rate)
server.unregister_client(device)
def pydevice_thread(device, module, arg_list):
"""
Python module interface.
All the logic has to be in the client module.
On start:
a) server.register_client(device)
b) server.put_data
c) check server.occupied
d) check server.quit_client flag
On quitting:
a) last transmission
b) server.unregister_client(device)
"""
fm = imp.find_module(module)
mod = imp.load_module(module, fm[0],fm[1],fm[2])
mod.doit(server, device, arg_list)
# start the device logger
# distinguish between shell commands and python scripts
################### CLIENT CONFIGURATION ###########################
shelldevices = [dev for dev in devices if config.has_option(dev,'command')]
pydevices = [dev for dev in devices if config.has_option(dev,'module')]
for device in shelldevices:
command = config.get(device,'command')
rate = config.getfloat(device,'rate')
cmd = threading.Thread(target = shelldevice_thread, args = (device,command,rate))
cmd.setName("Thread_%s"%device)
cmd.setDaemon(True)
cmd.start()
for device in pydevices:
module = config.get(device,'module')
argument_list = eval(config.get(device,'args'))
#print argument_list
cmd = threading.Thread(target = pydevice_thread, args = (device,module,argument_list))
cmd.setName("Thread_%s"%device)
cmd.setDaemon(True)
cmd.start()
# endless loop
run = True
# server = xmlrpclib.Server('http://localhost:8002')
while run:
time.sleep(2)
try:
run = server.server_run()
except:
run = False
pass
# DAMONIZE from chris python page
#!/usr/bin/env python
###########################################################################
# configure UID and GID of server
UID = 501
GID = 501
# configure these paths:
LOGFILE = '/Users/markusro/Projects/DAMARISd/damarisd.log'
PIDFILE = '/Users/markusro/Projects/DAMARISd/damarisd.pid'
# and let USERPROG be the main function of your project
USERPROG = damarisd_daemon
###########################################################################
#based on Juergen Hermanns http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
class Log:
"""file like for writes with auto flush after each write
to ensure that everything is logged, even during an
unexpected exit."""
def __init__(self, f):
self.f = f
def write(self, s):
self.f.write(s)
self.f.flush()
def main():
#change to data directory if needed
os.chdir("/Users/markusro/Projects/DAMARISd")
#redirect outputs to a logfile
sys.stdout = sys.stderr = Log(open(LOGFILE, 'a+'))
#ensure the that the daemon runs a normal user
#os.setegid(GID) #set group first "pydaemon"
#os.seteuid(UID) #set user "pydaemon"
#start the user program here:
USERPROG()
if __name__ == "__main__":
# do the UNIX double-fork magic, see Stevens' "Advanced
# Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/") #don't prevent unmounting....
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "Daemon PID %d" % pid
open(PIDFILE,'w').write("%d"%pid)
sys.exit(0)
print "Daemon PID %d" % pid
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# start the daemon main loop
main()
|
|
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Original version written by John Hoffman, heavily modified for different
# multitorrent architecture by Uoti Urpala (over 40% shorter than original)
import os
from cStringIO import StringIO
from traceback import print_exc
from BitTorrent import configfile
from BitTorrent.parsedir import parsedir
from BitTorrent.download import Multitorrent, Feedback
from BitTorrent.ConvertedMetainfo import ConvertedMetainfo
from BitTorrent import BTFailure
from threading import Event
from time import time
class LaunchMany(Feedback):
def __init__(self, config, output, configfile_key):
try:
self.config = config
self.output = output
self.configfile_key = configfile_key
self.torrent_dir = config['torrent_dir']
self.torrent_cache = {}
self.file_cache = {}
self.blocked_files = {}
self.torrent_list = []
self.downloads = {}
self.doneflag = Event()
self.hashcheck_queue = []
self.hashcheck_store = {}
self.hashcheck_current = None
self.multitorrent = Multitorrent(config, self.doneflag,
self.global_error)
self.rawserver = self.multitorrent.rawserver
self.rawserver.add_task(self.scan, 0)
self.rawserver.add_task(self.stats, 0)
try:
import signal
def handler(signum, frame):
self.rawserver.external_add_task(self.read_config, 0)
signal.signal(signal.SIGHUP, handler)
self.rawserver.install_sigint_handler()
except Exception, e:
self.output.message(_("Could not set signal handler: ") + str(e))
self.rawserver.listen_forever()
self.output.message(_("shutting down"))
for infohash in self.torrent_list:
self.output.message(_('dropped "%s"') %
self.torrent_cache[infohash]['path'])
torrent = self.downloads[infohash]
if torrent is not None:
torrent.shutdown()
except:
data = StringIO()
print_exc(file = data)
output.exception(data.getvalue())
def scan(self):
self.rawserver.add_task(self.scan, self.config['parse_dir_interval'])
r = parsedir(self.torrent_dir, self.torrent_cache,
self.file_cache, self.blocked_files,
self.output.message)
( self.torrent_cache, self.file_cache, self.blocked_files,
added, removed ) = r
for infohash, data in removed.items():
self.output.message(_('dropped "%s"') % data['path'])
self.remove(infohash)
for infohash, data in added.items():
self.output.message(_('added "%s"' ) % data['path'])
self.add(infohash, data)
def stats(self):
self.rawserver.add_task(self.stats, self.config['display_interval'])
data = []
for infohash in self.torrent_list:
cache = self.torrent_cache[infohash]
if self.config['display_path']:
name = cache['path']
else:
name = cache['name']
size = cache['length']
d = self.downloads[infohash]
progress = '0.0%'
peers = 0
seeds = 0
seedsmsg = "S"
dist = 0.0
uprate = 0.0
dnrate = 0.0
upamt = 0
dnamt = 0
t = 0
msg = ''
if d is None:
status = _("waiting for hash check")
else:
stats = d.get_status()
status = stats['activity']
progress = '%.1f%%' % (int(stats['fractionDone']*1000)/10.0)
if d.started and not d.closed:
s = stats
dist = s['numCopies']
if d.is_seed:
seeds = 0 # s['numOldSeeds']
seedsmsg = "s"
else:
if s['numSeeds'] + s['numPeers']:
t = stats['timeEst']
if t is None:
t = -1
if t == 0: # unlikely
t = 0.01
status = _("downloading")
else:
t = -1
status = _("connecting to peers")
seeds = s['numSeeds']
dnrate = stats['downRate']
peers = s['numPeers']
uprate = stats['upRate']
upamt = s['upTotal']
dnamt = s['downTotal']
if d.errors and (d.closed or d.errors[-1][0] + 300 > time()):
msg = d.errors[-1][2]
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ))
stop = self.output.display(data)
if stop:
self.doneflag.set()
def remove(self, infohash):
self.torrent_list.remove(infohash)
if self.downloads[infohash] is not None:
self.downloads[infohash].shutdown()
self.was_stopped(infohash)
del self.downloads[infohash]
def add(self, infohash, data):
self.torrent_list.append(infohash)
self.downloads[infohash] = None
self.hashcheck_queue.append(infohash)
self.hashcheck_store[infohash] = data['metainfo']
self.check_hashcheck_queue()
def check_hashcheck_queue(self):
if self.hashcheck_current is not None or not self.hashcheck_queue:
return
self.hashcheck_current = self.hashcheck_queue.pop(0)
metainfo = self.hashcheck_store[self.hashcheck_current]
del self.hashcheck_store[self.hashcheck_current]
filename = self.determine_filename(self.hashcheck_current)
self.downloads[self.hashcheck_current] = self.multitorrent. \
start_torrent(ConvertedMetainfo(metainfo),
self.config, self, filename)
def determine_filename(self, infohash):
x = self.torrent_cache[infohash]
name = x['name']
savein = self.config['save_in']
isdir = not x['metainfo']['info'].has_key('length')
style = self.config['saveas_style']
if style == 4:
torrentname = os.path.split(x['path'][:-8])[1]
suggestedname = name
if torrentname == suggestedname:
style = 1
else:
style = 3
if style == 1 or style == 3:
if savein:
saveas = os.path.join(savein,x['file'][:-8]) # strip '.torrent'
else:
saveas = x['path'][:-8] # strip '.torrent'
if style == 3 and not isdir:
saveas = os.path.join(saveas, name)
else:
if savein:
saveas = os.path.join(savein, name)
else:
saveas = os.path.join(os.path.split(x['path'])[0], name)
return saveas
def was_stopped(self, infohash):
try:
self.hashcheck_queue.remove(infohash)
except:
pass
else:
del self.hashcheck_store[infohash]
if self.hashcheck_current == infohash:
self.hashcheck_current = None
self.check_hashcheck_queue()
def global_error(self, level, text):
self.output.message(text)
def exchandler(self, s):
self.output.exception(s)
def read_config(self):
try:
newvalues = configfile.get_config(self.config, self.configfile_key)
except Exception, e:
self.output.message(_("Error reading config: ") + str(e))
return
self.output.message(_("Rereading config file"))
self.config.update(newvalues)
# The set_option call can potentially trigger something that kills
# the torrent (when writing this the only possibility is a change in
# max_files_open causing an IOError while closing files), and so
# the self.failed() callback can run during this loop.
for option, value in newvalues.iteritems():
self.multitorrent.set_option(option, value)
for torrent in self.downloads.values():
if torrent is not None:
for option, value in newvalues.iteritems():
torrent.set_option(option, value)
# rest are callbacks from torrent instances
def started(self, torrent):
self.hashcheck_current = None
self.check_hashcheck_queue()
def failed(self, torrent, is_external):
infohash = torrent.infohash
self.was_stopped(infohash)
if self.torrent_cache.has_key(infohash):
self.output.message('DIED: "'+self.torrent_cache[infohash]['path']+'"')
def exception(self, torrent, text):
self.exchandler(text)
|
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.common import costilius
from rally.task.processing import charts
from tests.unit import test
CHARTS = "rally.task.processing.charts."
class ChartTestCase(test.TestCase):
class Chart(charts.Chart):
def _map_iteration_values(self, iteration):
return [("foo_" + k, iteration[k]) for k in ["a", "b"]]
@property
def bench_info(self):
return {"iterations_count": 42, "atomic": {"a": {}, "b": {}, "c": {}}}
def test___init__(self):
self.assertRaises(TypeError, charts.Chart, self.bench_info)
chart = self.Chart(self.bench_info)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(1000, chart.zipped_size)
chart = self.Chart(self.bench_info, zipped_size=24)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(24, chart.zipped_size)
@mock.patch(CHARTS + "utils.GraphZipper")
def test_add_iteration_and_render(self, mock_graph_zipper):
gzipper_a = mock.Mock(get_zipped_graph=lambda: "a_points")
gzipper_b = mock.Mock(get_zipped_graph=lambda: "b_points")
mock_graph_zipper.side_effect = [gzipper_a, gzipper_b]
chart = self.Chart(self.bench_info, 24)
self.assertEqual([], chart.render())
[chart.add_iteration(itr) for itr in [{"a": 1, "b": 2},
{"a": 3, "b": 4}]]
self.assertEqual([mock.call(42, 24), mock.call(42, 24)],
mock_graph_zipper.mock_calls)
self.assertEqual(2, len(chart._data))
self.assertEqual([mock.call(1), mock.call(3)],
chart._data["foo_a"].add_point.mock_calls)
self.assertEqual([mock.call(2), mock.call(4)],
chart._data["foo_b"].add_point.mock_calls)
self.assertEqual([("foo_a", "a_points"), ("foo_b", "b_points")],
chart.render())
def test__fix_atomic_actions(self):
chart = self.Chart(self.bench_info)
self.assertEqual(
{"atomic_actions": {"a": 5, "b": 6, "c": 0}},
chart._fix_atomic_actions({"atomic_actions": {"a": 5, "b": 6}}))
class MainStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 0}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": [], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": []})]
expected = [("duration", [[1, 1.1], [2, 1.1], [3, 1.3]]),
("idle_duration", [[1, 2.2], [2, 0.5], [3, 3.4]])]
self.assertEqual(expected, chart.render())
def test_add_iteration_and_render_with_failed_iterations(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 2}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": ["foo_err"], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": ["foo_err"]})]
expected = [("duration", [[1, 1.1], [2, 0], [3, 0]]),
("idle_duration", [[1, 2.2], [2, 0], [3, 0]]),
("failed_duration", [[1, 0], [2, 1.6], [3, 4.7]])]
self.assertEqual(expected, chart.render())
class AtomicStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": [], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": [], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 0,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
def test_add_iteration_and_render_with_failed_iterations(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": ["foo_err"], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": ["foo_err"], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("failed_duration", [[1, 0], [2, 39.7], [3, 6.8]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 2,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
class OutputStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.OutputStackedAreaChart(
{"iterations_count": 3, "output_names": ["foo", "bar"]}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"scenario_output": {"data": x}})
for x in ({"foo": 1.1, "bar": 1.2}, {"foo": 1.3}, {"bar": 1.4})]
expected = [("bar", [[1, 1.2], [2, 0], [3, 1.4]]),
("foo", [[1, 1.1], [2, 1.3], [3, 0]])]
self.assertEqual(expected, sorted(chart.render()))
class AvgChartTestCase(test.TestCase):
class AvgChart(charts.AvgChart):
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.AvgChart, {"iterations_count": 3})
chart = self.AvgChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"foo": x}) for x in ({"a": 1.3, "b": 4.3},
{"a": 2.4, "b": 5.4},
{"a": 3.5, "b": 7.7})]
self.assertEqual([("a", 2.4), ("b", 5.8)], sorted(chart.render()))
class AtomicAvgChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicAvgChart({"iterations_count": 3,
"atomic": {"foo": {}, "bar": {}}})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"atomic_actions": costilius.OrderedDict(a)})
for a in ([("foo", 2), ("bar", 5)], [("foo", 4)], [("bar", 7)])]
self.assertEqual([("bar", 4.0), ("foo", 2.0)], sorted(chart.render()))
@ddt.ddt
class LoadProfileChartTestCase(test.TestCase):
@ddt.data({"count": 5, "load_duration": 63, "tstamp_start": 12345,
"kwargs": {"scale": 10}, "data": [
(12345, 4.2, False), (12347, 42, False), (12349, 10, True),
(12351, 5.5, False), (12353, 0.42, False)],
"expected": [("parallel iterations", [
[6.0, 3], [12.0, 3], [18.0, 1], [24.0, 1], [30.0, 1],
[36.0, 1], [42.0, 1], [48.0, 1], [54.0, 0], [63, 0]])]},
{"count": 5, "load_duration": 63, "tstamp_start": 12345,
"kwargs": {"scale": 8, "name": "Custom text"}, "data": [
(12345, 4.2, False), (12347, 42, False), (12349, 10, True),
(12351, 5.5, False), (12353, 0.42, False)],
"expected": [("Custom text", [
[8.0, 4], [16.0, 3], [24.0, 1], [32.0, 1], [40.0, 1],
[48.0, 1], [56.0, 0], [63, 0]])]},
{"count": 0, "load_duration": 0, "tstamp_start": 12345,
"kwargs": {"scale": 8}, "data": [],
"expected": [("parallel iterations", [[0, 0]])]})
@ddt.unpack
def test_add_iteration_and_render(self, count, load_duration,
tstamp_start, kwargs, data, expected):
chart = charts.LoadProfileChart(
{"iterations_count": count,
"load_duration": load_duration, "tstamp_start": tstamp_start},
**kwargs)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"timestamp": t, "duration": d, "error": e})
for t, d, e in data]
self.assertEqual(expected, chart.render())
@ddt.ddt
class HistogramChartTestCase(test.TestCase):
class HistogramChart(charts.HistogramChart):
def __init__(self, benchmark_info):
super(HistogramChartTestCase.HistogramChart,
self).__init__(benchmark_info)
self._data["bar"] = {"views": self._init_views(1.2, 4.2),
"disabled": None}
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.HistogramChart,
{"iterations_count": 3})
chart = self.HistogramChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"foo": x}) for x in ({"bar": 1.2}, {"bar": 2.4},
{"bar": 4.2})]
expected = [[{"disabled": None, "key": "bar",
"values": [{"x": 2.7, "y": 2}, {"x": 4.2, "y": 1}],
"view": "Square Root Choice"},
{"disabled": None, "key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Sturges Formula"},
{"disabled": None,
"key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Rice Rule"}]]
self.assertEqual(expected, chart.render())
@ddt.data(
{"base_size": 2, "min_value": 1, "max_value": 4,
"expected": [{"bins": 2, "view": "Square Root Choice",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 2, "view": "Sturges Formula",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 3, "view": "Rice Rule",
"x": [2.0, 3.0, 4.0], "y": [0, 0, 0]}]},
{"base_size": 100, "min_value": 27, "max_value": 42,
"expected": [
{"bins": 10, "view": "Square Root Choice",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 8, "view": "Sturges Formula",
"x": [28.875, 30.75, 32.625, 34.5, 36.375, 38.25, 40.125,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 10, "view": "Rice Rule",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}]})
@ddt.unpack
def test_views(self, base_size=None, min_value=None, max_value=None,
expected=None):
chart = self.HistogramChart({"iterations_count": base_size})
self.assertEqual(expected, chart._init_views(min_value, max_value))
class MainHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainHistogramChart(
{"iterations_count": 3, "min_duration": 2, "max_duration": 7})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": None},
{"error": True},
{"duration": 1.3, "idle_duration": 3.4, "error": None})]
expected = [
{"disabled": None, "key": "task", "view": "Square Root Choice",
"values": [{"x": 4.5, "y": 3}, {"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Sturges Formula",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Rice Rule",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]}]
self.assertEqual([expected], chart.render())
class AtomicHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicHistogramChart(
{"iterations_count": 3,
"atomic": costilius.OrderedDict(
[("foo", {"min_duration": 1.6, "max_duration": 2.8}),
("bar", {"min_duration": 3.1, "max_duration": 5.5})])})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"atomic_actions": a})
for a in ({"foo": 1.6, "bar": 3.1}, {"foo": 2.8}, {"bar": 5.5})]
expected = [
[{"disabled": 0, "key": "foo", "view": "Square Root Choice",
"values": [{"x": 2.2, "y": 2}, {"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Sturges Formula",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Rice Rule",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]}],
[{"disabled": 1, "key": "bar", "view": "Square Root Choice",
"values": [{"x": 4.3, "y": 2}, {"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Sturges Formula",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Rice Rule",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]}]]
self.assertEqual(expected, chart.render())
MAIN_STATS_TABLE_COLUMNS = ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)", "Max (sec)",
"Avg (sec)", "Success", "Count"]
def generate_iteration(duration, error, *args):
return {
"atomic_actions": costilius.OrderedDict(args),
"duration": duration,
"error": error
}
@ddt.ddt
class MainStatsTableTestCase(test.TestCase):
@ddt.data(
{
"info": {
"iterations_count": 1,
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
},
"data": [
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "100.0%", 1],
["bar", 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, "100.0%", 1],
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "100.0%", 1],
]
}
},
{
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
"data": [
generate_iteration(10.0, True, ("foo", 1.0)),
generate_iteration(10.0, True, ("foo", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a",
2],
["total", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a",
2],
]
}
},
{
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
"data": [
generate_iteration(10.0, False, ("foo", 1.0)),
generate_iteration(20.0, True, ("foo", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "50.0%", 2],
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "50.0%", 2]
]
}
},
{
"info": {
"iterations_count": 4,
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
},
"data": [
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 4.0)),
generate_iteration(20.0, False, ("foo", 2.0), ("bar", 4.0)),
generate_iteration(30.0, False, ("foo", 3.0), ("bar", 4.0)),
generate_iteration(40.0, True, ("foo", 4.0), ("bar", 4.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 2.0, 2.8, 2.9, 3.0, 2.0, "75.0%", 4],
["bar", 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, "75.0%", 4],
["total", 10.0, 20.0, 28.0, 29.0, 30.0, 20.0, "75.0%", 4]
]
}
},
{
"info": {
"iterations_count": 0,
"atomic": costilius.OrderedDict()
},
"data": [],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["total", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a",
0]
]
}
}
)
@ddt.unpack
def test_add_iteration_and_render(self, info, data, expected):
table = charts.MainStatsTable(info)
for el in data:
table.add_iteration(el)
self.assertEqual(expected, table.render())
|
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Mauro Soria
import os
from queue import Queue
import time
import sys
import gc
from threading import Lock
from lib.connection import Requester, RequestException
from lib.core import Dictionary, Fuzzer, ReportManager
from lib.reports import JSONReport, PlainTextReport, SimpleReport
from lib.utils import FileUtils
class SkipTargetInterrupt(Exception):
pass
MAYOR_VERSION = 0
MINOR_VERSION = 3
REVISION = 8
VERSION = {
"MAYOR_VERSION": MAYOR_VERSION,
"MINOR_VERSION": MINOR_VERSION,
"REVISION": REVISION
}
class Controller(object):
def __init__(self, script_path, arguments, output):
global VERSION
PROGRAM_BANNER = open(FileUtils.buildPath(script_path, "lib", "controller", "banner.txt")).read().format(
**VERSION)
self.script_path = script_path
self.exit = False
self.arguments = arguments
self.output = output
self.savePath = self.script_path
if self.arguments.saveHome:
savePath = self.getSavePath()
if not FileUtils.exists(savePath):
FileUtils.createDirectory(savePath)
if FileUtils.exists(savePath) and not FileUtils.isDir(savePath):
self.output.error('Cannot use {} because is a file. Should be a directory'.format(savePath))
exit(1)
if not FileUtils.canWrite(savePath):
self.output.error('Directory {} is not writable'.format(savePath))
exit(1)
logs = FileUtils.buildPath(savePath, "logs")
if not FileUtils.exists(logs):
FileUtils.createDirectory(logs)
reports = FileUtils.buildPath(savePath, "reports")
if not FileUtils.exists(reports):
FileUtils.createDirectory(reports)
self.savePath = savePath
self.reportsPath = FileUtils.buildPath(self.savePath, "logs")
self.blacklists = self.getBlacklists()
self.fuzzer = None
self.excludeStatusCodes = self.arguments.excludeStatusCodes
self.recursive = self.arguments.recursive
self.suppressEmpty = self.arguments.suppressEmpty
self.directories = Queue()
self.excludeSubdirs = (arguments.excludeSubdirs if arguments.excludeSubdirs is not None else [])
self.output.header(PROGRAM_BANNER)
self.dictionary = Dictionary(self.arguments.wordlist, self.arguments.extensions,
self.arguments.lowercase, self.arguments.forceExtensions)
self.printConfig()
self.errorLog = None
self.errorLogPath = None
self.errorLogLock = Lock()
self.batch = False
self.batchSession = None
self.setupErrorLogs()
self.output.newLine("\nError Log: {0}".format(self.errorLogPath))
if self.arguments.autoSave and len(self.arguments.urlList) > 1:
self.setupBatchReports()
self.output.newLine("\nAutoSave path: {0}".format(self.batchDirectoryPath))
if self.arguments.useRandomAgents:
self.randomAgents = FileUtils.getLines(FileUtils.buildPath(script_path, "db", "user-agents.txt"))
try:
for url in self.arguments.urlList:
try:
gc.collect()
self.reportManager = ReportManager()
self.currentUrl = url
self.output.target(self.currentUrl)
try:
self.requester = Requester(url, cookie=self.arguments.cookie,
useragent=self.arguments.useragent, maxPool=self.arguments.threadsCount,
maxRetries=self.arguments.maxRetries, delay=self.arguments.delay, timeout=self.arguments.timeout,
ip=self.arguments.ip, proxy=self.arguments.proxy,
redirect=self.arguments.redirect,
requestByHostname=self.arguments.requestByHostname)
self.requester.request("/")
except RequestException as e:
self.output.error(e.args[0]['message'])
raise SkipTargetInterrupt
if self.arguments.useRandomAgents:
self.requester.setRandomAgents(self.randomAgents)
for key, value in arguments.headers.items():
self.requester.setHeader(key, value)
# Initialize directories Queue with start Path
self.basePath = self.requester.basePath
if self.arguments.scanSubdirs is not None:
for subdir in self.arguments.scanSubdirs:
self.directories.put(subdir)
else:
self.directories.put('')
self.setupReports(self.requester)
matchCallbacks = [self.matchCallback]
notFoundCallbacks = [self.notFoundCallback]
errorCallbacks = [self.errorCallback, self.appendErrorLog]
self.fuzzer = Fuzzer(self.requester, self.dictionary, testFailPath=self.arguments.testFailPath,
threads=self.arguments.threadsCount, matchCallbacks=matchCallbacks,
notFoundCallbacks=notFoundCallbacks, errorCallbacks=errorCallbacks)
self.wait()
except SkipTargetInterrupt:
continue
finally:
self.reportManager.save()
except KeyboardInterrupt:
self.output.error('\nCanceled by the user')
exit(0)
finally:
if not self.errorLog.closed:
self.errorLog.close()
self.reportManager.close()
self.output.warning('\nTask Completed')
def printConfig(self):
self.output.config(', '.join(self.arguments.extensions), str(self.arguments.threadsCount),
str(len(self.dictionary)))
def getSavePath(self):
basePath = None
dirPath = None
basePath = os.path.expanduser('~')
if os.name == 'nt':
dirPath = "dirsearch"
else:
dirPath = ".dirsearch"
return FileUtils.buildPath(basePath, dirPath)
def getBlacklists(self):
blacklists = {}
for status in [400, 403, 500]:
blacklistFileName = FileUtils.buildPath(self.script_path, 'db')
blacklistFileName = FileUtils.buildPath(blacklistFileName, '{}_blacklist.txt'.format(status))
if not FileUtils.canRead(blacklistFileName):
# Skip if cannot read file
continue
blacklists[status] = []
for line in FileUtils.getLines(blacklistFileName):
# Skip comments
if line.lstrip().startswith('#'):
continue
blacklists[status].append(line)
return blacklists
def setupErrorLogs(self):
fileName = "errors-{0}.log".format(time.strftime('%y-%m-%d_%H-%M-%S'))
self.errorLogPath = FileUtils.buildPath(FileUtils.buildPath(self.savePath, "logs", fileName))
self.errorLog = open(self.errorLogPath, "w")
def setupBatchReports(self):
self.batch = True
self.batchSession = "BATCH-{0}".format(time.strftime('%y-%m-%d_%H-%M-%S'))
self.batchDirectoryPath = FileUtils.buildPath(self.savePath, "reports", self.batchSession)
if not FileUtils.exists(self.batchDirectoryPath):
FileUtils.createDirectory(self.batchDirectoryPath)
if not FileUtils.exists(self.batchDirectoryPath):
self.output.error("Couldn't create batch folder {}".format(self.batchDirectoryPath))
sys.exit(1)
if FileUtils.canWrite(self.batchDirectoryPath):
FileUtils.createDirectory(self.batchDirectoryPath)
targetsFile = FileUtils.buildPath(self.batchDirectoryPath, "TARGETS.txt")
FileUtils.writeLines(targetsFile, self.arguments.urlList)
else:
self.output.error("Couldn't create batch folder {}.".format(self.batchDirectoryPath))
sys.exit(1)
def setupReports(self, requester):
if self.arguments.autoSave:
basePath = ('/' if requester.basePath is '' else requester.basePath)
basePath = basePath.replace(os.path.sep, '.')[1:-1]
fileName = None
directoryPath = None
if self.batch:
fileName = requester.host
directoryPath = self.batchDirectoryPath
else:
fileName = ('{}_'.format(basePath) if basePath is not '' else '')
fileName += time.strftime('%y-%m-%d_%H-%M-%S')
directoryPath = FileUtils.buildPath(self.savePath,'reports', requester.host)
outputFile = FileUtils.buildPath(directoryPath, fileName)
if FileUtils.exists(outputFile):
i = 2
while FileUtils.exists(outputFile + "_" + str(i)):
i += 1
outputFile += "_" + str(i)
if not FileUtils.exists(directoryPath):
FileUtils.createDirectory(directoryPath)
if not FileUtils.exists(directoryPath):
self.output.error("Couldn't create reports folder {}".format(directoryPath))
sys.exit(1)
if FileUtils.canWrite(directoryPath):
report = None
if self.arguments.autoSaveFormat == 'simple':
report = SimpleReport(requester.host, requester.port, requester.protocol, requester.basePath,
outputFile)
if self.arguments.autoSaveFormat == 'json':
report = JSONReport(requester.host, requester.port, requester.protocol, requester.basePath,
outputFile)
else:
report = PlainTextReport(requester.host, requester.port, requester.protocol, requester.basePath,
outputFile)
self.reportManager.addOutput(report)
else:
self.output.error("Can't write reports to {}".format(directoryPath))
sys.exit(1)
if self.arguments.simpleOutputFile is not None:
self.reportManager.addOutput(SimpleReport(requester.host, requester.port, requester.protocol,
requester.basePath, self.arguments.simpleOutputFile))
if self.arguments.plainTextOutputFile is not None:
self.reportManager.addOutput(PlainTextReport(requester.host, requester.port, requester.protocol,
requester.basePath, self.arguments.plainTextOutputFile))
if self.arguments.jsonOutputFile is not None:
self.reportManager.addOutput(JSONReport(requester.host, requester.port, requester.protocol,
requester.basePath, self.arguments.jsonOutputFile))
def matchCallback(self, path):
self.index += 1
if path.status is not None:
if path.status not in self.excludeStatusCodes and (
self.blacklists.get(path.status) is None or path.path not in self.blacklists.get(
path.status)) and not (
self.suppressEmpty and (len(path.response.body) == 0)):
self.output.statusReport(path.path, path.response)
self.addDirectory(path.path)
self.reportManager.addPath(self.currentDirectory + path.path, path.status, path.response)
self.reportManager.save()
del path
def notFoundCallback(self, path):
self.index += 1
self.output.lastPath(path, self.index, len(self.dictionary))
del path
def errorCallback(self, path, errorMsg):
self.output.addConnectionError()
del path
def appendErrorLog(self, path, errorMsg):
with self.errorLogLock:
line = time.strftime('[%y-%m-%d %H:%M:%S] - ')
line += self.currentUrl + " - " + path + " - " + errorMsg
self.errorLog.write(os.linesep + line)
self.errorLog.flush()
def handleInterrupt(self):
self.output.warning('CTRL+C detected: Pausing threads, please wait...')
self.fuzzer.pause()
try:
while True:
msg = "[e]xit / [c]ontinue"
if not self.directories.empty():
msg += " / [n]ext"
if len(self.arguments.urlList) > 1:
msg += " / [s]kip target"
self.output.inLine(msg + ': ')
option = input()
if option.lower() == 'e':
self.exit = True
self.fuzzer.stop()
raise KeyboardInterrupt
elif option.lower() == 'c':
self.fuzzer.play()
return
elif not self.directories.empty() and option.lower() == 'n':
self.fuzzer.stop()
return
elif len(self.arguments.urlList) > 1 and option.lower() == 's':
raise SkipTargetInterrupt
else:
continue
except KeyboardInterrupt as SystemExit:
self.exit = True
raise KeyboardInterrupt
def processPaths(self):
while True:
try:
while not self.fuzzer.wait(0.3):
continue
break
except (KeyboardInterrupt, SystemExit) as e:
self.handleInterrupt()
def wait(self):
while not self.directories.empty():
self.index = 0
self.currentDirectory = self.directories.get()
self.output.warning('[{1}] Starting: {0}'.format(self.currentDirectory, time.strftime('%H:%M:%S')))
self.fuzzer.requester.basePath = self.basePath + self.currentDirectory
self.output.basePath = self.basePath + self.currentDirectory
self.fuzzer.start()
self.processPaths()
return
def addDirectory(self, path):
if not self.recursive:
return False
if path.endswith('/'):
if path in [directory + '/' for directory in self.excludeSubdirs]:
return False
self.directories.put(self.currentDirectory + path)
return True
else:
return False
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import copy
import json
import requests
from requests import HTTPError, RequestException
import os
import time
from mycroft.configuration import Configuration
from mycroft.configuration.config import DEFAULT_CONFIG, SYSTEM_CONFIG, \
USER_CONFIG
from mycroft.identity import IdentityManager, identity_lock
from mycroft.version import VersionManager
from mycroft.util import get_arch, connected, LOG
_paired_cache = False
class BackendDown(RequestException):
pass
class InternetDown(RequestException):
pass
class Api:
""" Generic class to wrap web APIs """
params_to_etag = {}
etag_to_response = {}
def __init__(self, path):
self.path = path
# Load the config, skipping the REMOTE_CONFIG since we are
# getting the info needed to get to it!
config = Configuration.get([DEFAULT_CONFIG,
SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
config_server = config.get("server")
self.url = config_server.get("url")
self.version = config_server.get("version")
self.identity = IdentityManager.get()
def request(self, params):
self.check_token()
self.build_path(params)
self.old_params = copy(params)
return self.send(params)
def check_token(self):
# If the identity hasn't been loaded, load it
if not self.identity.has_refresh():
self.identity = IdentityManager.load()
# If refresh is needed perform a refresh
if self.identity.refresh and self.identity.is_expired():
self.identity = IdentityManager.load()
# if no one else has updated the token refresh it
if self.identity.is_expired():
self.refresh_token()
def refresh_token(self):
LOG.debug('Refreshing token')
if identity_lock.acquire(blocking=False):
try:
data = self.send({
"path": "auth/token",
"headers": {
"Authorization": "Bearer " + self.identity.refresh
}
})
IdentityManager.save(data, lock=False)
LOG.debug('Saved credentials')
except HTTPError as e:
if e.response.status_code == 401:
LOG.error('Could not refresh token, invalid refresh code.')
else:
raise
finally:
identity_lock.release()
else: # Someone is updating the identity wait for release
with identity_lock:
LOG.debug('Refresh is already in progress, waiting until done')
time.sleep(1.2)
os.sync()
self.identity = IdentityManager.load(lock=False)
LOG.debug('new credentials loaded')
def send(self, params, no_refresh=False):
""" Send request to mycroft backend.
The method handles Etags and will return a cached response value
if nothing has changed on the remote.
Arguments:
params (dict): request parameters
no_refresh (bool): optional parameter to disable refreshs of token
Returns:
Requests response object.
"""
query_data = frozenset(params.get('query', {}).items())
params_key = (params.get('path'), query_data)
etag = self.params_to_etag.get(params_key)
method = params.get("method", "GET")
headers = self.build_headers(params)
data = self.build_data(params)
json_body = self.build_json(params)
query = self.build_query(params)
url = self.build_url(params)
# For an introduction to the Etag feature check out:
# https://en.wikipedia.org/wiki/HTTP_ETag
if etag:
headers['If-None-Match'] = etag
response = requests.request(
method, url, headers=headers, params=query,
data=data, json=json_body, timeout=(3.05, 15)
)
if response.status_code == 304:
# Etag matched, use response previously cached
response = self.etag_to_response[etag]
elif 'ETag' in response.headers:
etag = response.headers['ETag'].strip('"')
# Cache response for future lookup when we receive a 304
self.params_to_etag[params_key] = etag
self.etag_to_response[etag] = response
return self.get_response(response, no_refresh)
def get_response(self, response, no_refresh=False):
""" Parse response and extract data from response.
Will try to refresh the access token if it's expired.
Arguments:
response (requests Response object): Response to parse
no_refresh (bool): Disable refreshing of the token
Returns:
data fetched from server
"""
data = self.get_data(response)
if 200 <= response.status_code < 300:
return data
elif (not no_refresh and response.status_code == 401 and not
response.url.endswith("auth/token") and
self.identity.is_expired()):
self.refresh_token()
return self.send(self.old_params, no_refresh=True)
raise HTTPError(data, response=response)
def get_data(self, response):
try:
return response.json()
except:
return response.text
def build_headers(self, params):
headers = params.get("headers", {})
self.add_content_type(headers)
self.add_authorization(headers)
params["headers"] = headers
return headers
def add_content_type(self, headers):
if not headers.__contains__("Content-Type"):
headers["Content-Type"] = "application/json"
def add_authorization(self, headers):
if not headers.__contains__("Authorization"):
headers["Authorization"] = "Bearer " + self.identity.access
def build_data(self, params):
return params.get("data")
def build_json(self, params):
json = params.get("json")
if json and params["headers"]["Content-Type"] == "application/json":
for k, v in json.items():
if v == "":
json[k] = None
params["json"] = json
return json
def build_query(self, params):
return params.get("query")
def build_path(self, params):
path = params.get("path", "")
params["path"] = self.path + path
return params["path"]
def build_url(self, params):
path = params.get("path", "")
version = params.get("version", self.version)
return self.url + "/" + version + "/" + path
class DeviceApi(Api):
""" Web API wrapper for obtaining device-level information """
def __init__(self):
super(DeviceApi, self).__init__("device")
def get_code(self, state):
IdentityManager.update()
return self.request({
"path": "/code?state=" + state
})
def activate(self, state, token):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "POST",
"path": "/activate",
"json": {"state": state,
"token": token,
"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def update_version(self):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "PATCH",
"path": "/" + self.identity.uuid,
"json": {"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def send_email(self, title, body, sender):
return self.request({
"method": "PUT",
"path": "/" + self.identity.uuid + "/message",
"json": {"title": title, "body": body, "sender": sender}
})
def report_metric(self, name, data):
return self.request({
"method": "POST",
"path": "/" + self.identity.uuid + "/metric/" + name,
"json": data
})
def get(self):
""" Retrieve all device information from the web backend """
return self.request({
"path": "/" + self.identity.uuid
})
def get_settings(self):
""" Retrieve device settings information from the web backend
Returns:
str: JSON string with user configuration information.
"""
return self.request({
"path": "/" + self.identity.uuid + "/setting"
})
def get_location(self):
""" Retrieve device location information from the web backend
Returns:
str: JSON string with user location.
"""
return self.request({
"path": "/" + self.identity.uuid + "/location"
})
def get_subscription(self):
"""
Get information about type of subscrition this unit is connected
to.
Returns: dictionary with subscription information
"""
return self.request({
'path': '/' + self.identity.uuid + '/subscription'})
@property
def is_subscriber(self):
"""
status of subscription. True if device is connected to a paying
subscriber.
"""
try:
return self.get_subscription().get('@type') != 'free'
except:
# If can't retrieve, assume not paired and not a subscriber yet
return False
def get_subscriber_voice_url(self, voice=None):
self.check_token()
archs = {'x86_64': 'x86_64', 'armv7l': 'arm', 'aarch64': 'arm'}
arch = archs.get(get_arch())
if arch:
path = '/' + self.identity.uuid + '/voice?arch=' + arch
return self.request({'path': path})['link']
def get_oauth_token(self, dev_cred):
"""
Get Oauth token for dev_credential dev_cred.
Argument:
dev_cred: development credentials identifier
Returns:
json string containing token and additional information
"""
return self.request({
"method": "GET",
"path": "/" + self.identity.uuid + "/token/" + str(dev_cred)
})
def upload_skills_data(self, data):
""" Upload skills.json file.
Arguments:
data: dictionary with skills data from msm
"""
self.request({
"method": "PUT",
"path": "/" + self.identity.uuid + "/skillJson",
"json": data
})
class STTApi(Api):
""" Web API wrapper for performing Speech to Text (STT) """
def __init__(self, path):
super(STTApi, self).__init__(path)
def stt(self, audio, language, limit):
""" Web API wrapper for performing Speech to Text (STT)
Args:
audio (bytes): The recorded audio, as in a FLAC file
language (str): A BCP-47 language code, e.g. "en-US"
limit (int): Maximum minutes to transcribe(?)
Returns:
str: JSON structure with transcription results
"""
return self.request({
"method": "POST",
"headers": {"Content-Type": "audio/x-flac"},
"query": {"lang": language, "limit": limit},
"data": audio
})
def has_been_paired():
""" Determine if this device has ever been paired with a web backend
Returns:
bool: True if ever paired with backend (not factory reset)
"""
# This forces a load from the identity file in case the pairing state
# has recently changed
id = IdentityManager.load()
return id.uuid is not None and id.uuid != ""
def is_paired(ignore_errors=True):
""" Determine if this device is actively paired with a web backend
Determines if the installation of Mycroft has been paired by the user
with the backend system, and if that pairing is still active.
Returns:
bool: True if paired with backend
"""
global _paired_cache
if _paired_cache:
# NOTE: This assumes once paired, the unit remains paired. So
# un-pairing must restart the system (or clear this value).
# The Mark 1 does perform a restart on RESET.
return True
try:
api = DeviceApi()
device = api.get()
_paired_cache = api.identity.uuid is not None and \
api.identity.uuid != ""
return _paired_cache
except HTTPError as e:
if e.response.status_code == 401:
return False
except Exception as e:
LOG.warning('Could not get device infO: ' + repr(e))
if ignore_errors:
return False
if connected():
raise BackendDown
raise InternetDown
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.common import constants as n_const
from neutron.common import utils
from neutron import context
from neutron.db.models import l3 as l3_models
from neutron.db.models import l3_attrs
from neutron.db.models import l3ha as l3ha_model
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2 import models
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit import testlib_api
HOST = helpers.HOST
HOST_2 = 'HOST_2'
HOST_3 = 'HOST_3'
HOST_2_TUNNELING_IP = '20.0.0.2'
HOST_3_TUNNELING_IP = '20.0.0.3'
TEST_ROUTER_ID = 'router_id'
TEST_NETWORK_ID = 'network_id'
TEST_HA_NETWORK_ID = 'ha_network_id'
class TestL2PopulationDBTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TestL2PopulationDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
self._create_network()
def _create_network(self, network_id=TEST_NETWORK_ID):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
def _create_router(self, distributed=True, ha=False):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(l3_models.Router(id=TEST_ROUTER_ID))
self.ctx.session.add(l3_attrs.RouterExtraAttributes(
router_id=TEST_ROUTER_ID, distributed=distributed, ha=ha))
def _create_ha_router(self, distributed=False):
helpers.register_l3_agent(HOST_2)
helpers.register_ovs_agent(HOST_2, tunneling_ip=HOST_2_TUNNELING_IP)
# Register l3 agent on host3, which doesn't host any HA router.
# Tests should test that host3 is not a HA agent host.
helpers.register_l3_agent(HOST_3)
helpers.register_ovs_agent(HOST_3, tunneling_ip=HOST_3_TUNNELING_IP)
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=TEST_HA_NETWORK_ID))
self._create_router(distributed=distributed, ha=True)
for state, host in [(n_const.HA_ROUTER_STATE_ACTIVE, HOST),
(n_const.HA_ROUTER_STATE_STANDBY, HOST_2)]:
self._setup_port_binding(
network_id=TEST_HA_NETWORK_ID,
device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
host_state=state,
host=host)
def get_l3_agent_by_host(self, agent_host):
plugin = helpers.FakePlugin()
return plugin._get_agent_by_type_and_host(
self.ctx, constants.AGENT_TYPE_L3, agent_host)
def test_get_agent_by_host(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
agent = l2pop_db.get_agent_by_host(
self.ctx.session, helpers.HOST)
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_agent_by_host_no_candidate(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
agent = l2pop_db.get_agent_by_host(
self.ctx.session, helpers.HOST)
self.assertIsNone(agent)
def _setup_port_binding(self, **kwargs):
with self.ctx.session.begin(subtransactions=True):
mac = utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
port_id = uuidutils.generate_uuid()
network_id = kwargs.get('network_id', TEST_NETWORK_ID)
device_owner = kwargs.get('device_owner', '')
device_id = kwargs.get('device_id', '')
host = kwargs.get('host', helpers.HOST)
self.ctx.session.add(models_v2.Port(
id=port_id, network_id=network_id, mac_address=mac,
admin_state_up=True, status=constants.PORT_STATUS_ACTIVE,
device_id=device_id, device_owner=device_owner))
port_binding_cls = models.PortBinding
binding_kwarg = {'port_id': port_id,
'host': host,
'vif_type': portbindings.VIF_TYPE_UNBOUND,
'vnic_type': portbindings.VNIC_NORMAL}
if device_owner == constants.DEVICE_OWNER_DVR_INTERFACE:
port_binding_cls = models.DistributedPortBinding
binding_kwarg['router_id'] = TEST_ROUTER_ID
binding_kwarg['status'] = constants.PORT_STATUS_DOWN
self.ctx.session.add(port_binding_cls(**binding_kwarg))
if network_id == TEST_HA_NETWORK_ID:
agent = self.get_l3_agent_by_host(host)
haport_bindings_cls = l3ha_model.L3HARouterAgentPortBinding
habinding_kwarg = {'port_id': port_id,
'router_id': device_id,
'l3_agent_id': agent['id'],
'state': kwargs.get('host_state',
n_const.HA_ROUTER_STATE_ACTIVE)}
self.ctx.session.add(haport_bindings_cls(**habinding_kwarg))
def test_get_distributed_active_network_ports(self):
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
# Register a L2 agent + A bunch of other agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
tunnel_network_ports = l2pop_db.get_distributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(tunnel_network_ports))
_, agent = tunnel_network_ports[0]
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_distributed_active_network_ports_no_candidate(self):
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
# Register a bunch of non-L2 agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
tunnel_network_ports = l2pop_db.get_distributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(tunnel_network_ports))
def test_get_nondistributed_active_network_ports(self):
self._setup_port_binding(dvr=False)
# Register a L2 agent + A bunch of other agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(fdb_network_ports))
_, agent = fdb_network_ports[0]
self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type)
def test_get_nondistributed_active_network_ports_no_candidate(self):
self._setup_port_binding(dvr=False)
# Register a bunch of non-L2 agents on the same host
helpers.register_l3_agent()
helpers.register_dhcp_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(fdb_network_ports))
def test__get_ha_router_interface_ids_with_ha_dvr_snat_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(list(ha_iface_ids)))
def test__get_ha_router_interface_ids_with_ha_replicated_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_HA_REPLICATED_INT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(list(ha_iface_ids)))
def test__get_ha_router_interface_ids_with_no_ha_port(self):
self._create_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
ha_iface_ids = l2pop_db._get_ha_router_interface_ids(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(list(ha_iface_ids)))
def test_active_network_ports_with_dvr_snat_port(self):
# Test to get agent hosting dvr snat port
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
# create DVR router
self._create_router()
# setup DVR snat port
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
helpers.register_dhcp_agent()
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(1, len(fdb_network_ports))
def test_active_network_ports_with_ha_dvr_snat_port(self):
# test to get HA agents hosting HA+DVR snat port
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
# create HA+DVR router
self._create_ha_router()
# setup HA snat port
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(0, len(fdb_network_ports))
ha_ports = l2pop_db.get_ha_active_network_ports(
self.ctx.session, TEST_NETWORK_ID)
self.assertEqual(2, len(ha_ports))
def test_active_port_count_with_dvr_snat_port(self):
helpers.register_l3_agent()
helpers.register_dhcp_agent()
helpers.register_ovs_agent()
self._create_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
helpers.register_dhcp_agent()
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST_2, TEST_NETWORK_ID)
self.assertEqual(0, port_count)
def test_active_port_count_with_ha_dvr_snat_port(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
port_count = l2pop_db.get_agent_network_active_port_count(
self.ctx.session, HOST_2, TEST_NETWORK_ID)
self.assertEqual(1, port_count)
def test_get_ha_agents_by_router_id(self):
helpers.register_dhcp_agent()
helpers.register_l3_agent()
helpers.register_ovs_agent()
self._create_ha_router()
self._setup_port_binding(
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
device_id=TEST_ROUTER_ID)
agents = l2pop_db.get_ha_agents_by_router_id(
self.ctx.session, TEST_ROUTER_ID)
ha_agents = [agent.host for agent in agents]
self.assertEqual(tools.UnorderedList([HOST, HOST_2]), ha_agents)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
def testFtrlwithoutRegularization2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1]], var0.eval(), atol=0.01)
def testFtrlWithL1(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
def testFtrlWithL1_L2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
|
|
__author__ = 'stuart'
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from datastreams import DataSet, DataStream, Datum, DictSet, DictStream
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
try:
reduce
except:
from functools import reduce
class JoinTests(unittest.TestCase):
def test_inner_join(self):
left = DataSet.from_csv("test_set_1.csv")
right = DataSet.from_csv("test_set_2.csv")
joined = left.join('inner', 'name', right)
self.assertIn('stuart', joined.map(lambda entity: entity.name))
self.assertEqual(2, sum(joined.map(lambda entity: entity.name == 'gatsby')))
self.assertNotIn('max', joined.map(lambda entity: entity.name))
self.assertNotIn('john', joined.map(lambda entity: entity.name))
def test_outer_join(self):
left = DataSet.from_csv("test_set_1.csv")
right = DataSet.from_csv("test_set_2.csv")
joined = left.join('outer', 'name', right)
self.assertIn('max', joined.map(lambda entity: entity.name))
self.assertEqual(2, sum(joined.map(lambda entity: entity.name == 'gatsby')))
self.assertEqual(1, sum(joined.map(lambda entity: entity.name == 'max')))
self.assertIn('max', joined.map(lambda entity: entity.name))
self.assertIn('john', joined.map(lambda entity: entity.name))
def test_left_join(self):
left = DataSet.from_csv("test_set_1.csv")
right = DataSet.from_csv("test_set_2.csv")
joined = left.join('left', 'name', right)
self.assertEqual(2, sum(joined.map(lambda entity: entity.name == 'gatsby')))
self.assertEqual(0, sum(joined.map(lambda entity: entity.name == 'max')))
self.assertNotIn('max', joined.map(lambda entity: entity.name))
self.assertIn('john', joined.map(lambda entity: entity.name))
def test_right_join(self):
left = DataSet.from_csv("test_set_1.csv")
right = DataSet.from_csv("test_set_2.csv")
joined = left.join('right', 'name', right)
self.assertEqual(2, sum(joined.map(lambda entity: entity.name == 'gatsby')))
self.assertEqual(1, sum(joined.map(lambda entity: entity.name == 'max')))
self.assertIn('max', joined.map(lambda entity: entity.name))
self.assertNotIn('john', joined.map(lambda entity: entity.name))
def test_group_by(self):
stream = DataStream.from_csv("test_set_2.csv")
grouped = stream.group_by('name')
groupdict = dict(grouped)
self.assertEqual(2, len(groupdict['gatsby']))
self.assertEqual(1, len(groupdict['carina']))
class StreamTests(unittest.TestCase):
def test_map(self):
stream = DataStream(range(10))
inced = stream.map(lambda num: num + 1)
self.assertEqual(1, next(inced))
self.assertEqual(2, next(inced))
self.assertEqual(3, next(inced))
self.assertEqual(4, next(inced))
self.assertEqual(5, next(inced))
def test_map_builtin(self):
stream = DataStream(range(10))
updated = list(map(lambda num: num + 1, stream))
self.assertEqual(len(updated), 10)
self.assertEqual(updated[0], 1)
self.assertEqual(updated[1], 2)
self.assertEqual(updated[2], 3)
def test_filter(self):
stream = DataStream(range(14))
odds = stream.filter(lambda num: num % 2)
self.assertEqual(next(odds), 1)
self.assertEqual(next(odds), 3)
self.assertEqual(next(odds), 5)
posset = DataStream(range(10)).collect()
negs = set(posset.filter(lambda num: num < 0))
self.assertEqual(len(negs), 0)
doubled = list(posset.map(lambda num: num * 2))
self.assertEqual(len(doubled), len(posset))
def test_filters(self):
stream = DataStream(range(14))
odd_filter = lambda num: num % 2 != 0
gt5filter = lambda num: num > 5
filtered = list(stream.filters([odd_filter, gt5filter]))
self.assertListEqual(filtered, [7, 9, 11, 13])
def test_filter_builtin(self):
stream = DataStream(range(14))
odds = list(filter(lambda num: num % 2, stream))
self.assertEqual(odds[0], 1)
self.assertEqual(odds[1], 3)
self.assertEqual(odds[2], 5)
def test_reduce(self):
stream = DataStream(range(5))
factorials = stream.map(lambda num: num + 1)\
.reduce(lambda facts, num: facts * num)
self.assertEqual(factorials, 5*4*3*2*1)
def test_reduce_to_dataset(self):
stream = DataStream(range(5))
def filtreducer(agg, row):
if row % 2 == 0:
return agg + [row]
return agg
ghetto_filtered = stream.reduce(filtreducer, [])
self.assertEqual(ghetto_filtered[0], 0)
self.assertEqual(ghetto_filtered[1], 2)
self.assertEqual(ghetto_filtered[2], 4)
def test_reduce_builtin(self):
stream = DataStream(range(5))
summed = reduce(lambda a, b: a + b, stream, 0)
self.assertEqual(summed, sum(range(5)))
def test_read_file(self):
stream = DataStream.from_file("test_set_1.csv")
self.assertEqual('name,age,height', next(stream).strip())
self.assertEqual('carina,27,60', next(stream).strip())
self.assertEqual('stuart,27,72', next(stream).strip())
self.assertEqual('gatsby,7,24', next(stream).strip())
self.assertEqual('john,31,76', next(stream).strip())
def test_read_csv(self):
stream = DataStream.from_csv("test_set_1.csv")
self.assertEqual('carina', next(stream).name)
self.assertEqual('stuart', next(stream).name)
self.assertEqual('gatsby', next(stream).name)
self.assertEqual('john', next(stream).name)
def test_batch(self):
batched = DataStream(range(9)).batch(2)
self.assertSequenceEqual(next(batched), [0, 1])
self.assertSequenceEqual(next(batched), [2, 3])
self.assertSequenceEqual(next(batched), [4, 5])
self.assertSequenceEqual(next(batched), [6, 7])
self.assertSequenceEqual(next(batched), [8])
batched = DataStream(range(4)).batch(2).to_list()
self.assertEqual(len(batched), 2)
def test_window(self):
stream = DataStream(range(10))
windowed = stream.window(3, 2)
self.assertSequenceEqual(next(windowed).to_list(), [0, 1, 2])
self.assertSequenceEqual(next(windowed).to_list(), [2, 3, 4])
self.assertSequenceEqual(next(windowed).to_list(), [4, 5, 6])
self.assertSequenceEqual(next(windowed).to_list(), [6, 7, 8])
self.assertSequenceEqual(next(windowed).to_list(), [8, 9])
def test_concat(self):
stream = DataStream([[], [1], [2, 3]])
flattened = stream.concat()
self.assertEqual(next(flattened), 1)
self.assertEqual(next(flattened), 2)
self.assertEqual(next(flattened), 3)
def test_concat_map(self):
stream = DataStream(range(20))
batched = stream.batch(4)
concat_mapped = batched.concat_map(
lambda nums: map(lambda num: num + 1, nums))
result = list(concat_mapped)
self.assertSequenceEqual(result, list(map(lambda num: num + 1, range(20))))
def test_for_each(self):
stream = DataStream(range(20))
not_changed = stream.for_each(lambda num: num + 1)
self.assertEqual(next(not_changed), 0)
self.assertEqual(next(not_changed), 1)
self.assertEqual(next(not_changed), 2)
def test_take_now(self):
stream = DataStream(range(13))
not_iter = stream.take_now(5)
self.assertEqual(len(not_iter), 5)
self.assertEqual(not_iter[0], 0)
def test_drop_take(self):
stream = DataStream(range(10))
second_half = stream.drop(5).take(5)
self.assertEqual(next(second_half), 5)
self.assertEqual(next(second_half), 6)
self.assertEqual(next(second_half), 7)
self.assertEqual(next(second_half), 8)
def test_count(self):
n = 20
self.assertEqual(DataStream(range(n)).count(), n)
def test_count_frequency(self):
stream = DataStream("Hello, world!")
counts = stream.count_frequency()
self.assertEqual(dict(counts)['l'], 3)
self.assertEqual(dict(counts)['e'], 1)
self.assertEqual(dict(counts)['!'], 1)
def test_to(self):
stream = DataStream(['{"key": "value"}'])\
.to(list)
self.assertTrue(isinstance(stream, list))
def test_to_dict(self):
stream = DataStream("Hello, world!")
counts = stream.count_frequency().to_dict()
self.assertEqual(counts['l'], 3)
self.assertEqual(counts['e'], 1)
self.assertEqual(counts['!'], 1)
def test_to_list(self):
stream = DataStream(range(20))
streamlist = stream.to_list()
self.assertListEqual(streamlist, list(range(20)))
def test_to_set(self):
stream = DataStream(range(19))
range10 = stream\
.map(lambda num: abs(num - 9))\
.to_set()
self.assertSetEqual(range10, set(range(10)))
def test_set(self):
class Brad(object):
def __init__(self, name, height, age):
self.name = name
self.height = height
self.age = age
stream = DataStream([Brad('b-rad', 72, 21)]) \
.set('name', lambda row: 'brad')\
.set('height', lambda row: 70) \
.set('age', lambda row: 30)
brad = next(stream)
self.assertEqual(brad.height, 70)
self.assertEqual(brad.name, 'brad')
self.assertEqual(brad.age, 30)
def test_map_method(self):
class Brad(object):
def __init__(self, name, height, age):
self.name = name
self.height = height
self.age = age
def get_name(self, upper):
return self.name.upper() if upper else self.name
stream = DataStream([Brad('b-rad', 72, 21)]) \
.set('name', lambda row: 'brad') \
.set('height', lambda row: 70) \
.set('age', lambda row: 30)
BRAD_NAME = next(stream.map_method('get_name', True))
self.assertEqual(BRAD_NAME, 'BRAD')
def test_call(self):
global _test_call_passed
_test_call_passed = False
def make_test_pass(dataset):
global _test_call_passed
_test_call_passed = isinstance(dataset, DataSet)
DataSet(range(10)).call(make_test_pass)
self.assertTrue(_test_call_passed)
del _test_call_passed
def test_execute(self):
global _test_execute_count
_test_execute_count = 0
def inc_execute_count(num):
global _test_execute_count
_test_execute_count += 1
self.assertEqual(_test_execute_count, 0)
DataStream(range(20))\
.for_each(inc_execute_count)\
.execute()
self.assertEqual(_test_execute_count, 20)
del _test_execute_count
def test_pick_attrs(self):
def test_attrs(obj):
self.assertIn('b', dir(obj))
self.assertNotIn('a', dir(obj))
DataStream([Datum({'a': 1, 'b': 2}), Datum({'bbb': 0, 'b': 5})])\
.pick_attrs(['b'])\
.for_each(test_attrs)\
.execute()
def test_dedupe(self):
stream = DataStream([[0, 1], [0, 2], [1, 1]])
deduped = stream.dedupe(lambda row: row[0])
self.assertSequenceEqual([[0, 1], [1, 1]], deduped.to_list())
def test_sample(self):
stream = DataStream(range(100))
sampled = stream.sample(0.5, 5).to_list()
self.assertEqual(len(sampled), 5)
def test_slots_set(self):
class Thing(object):
__slots__ = ['name', 'age']
def __init__(self, name, age):
self.name = name
self.age = age
class Other(object):
__slots__ = ['name', 'weight']
def __init__(self, name, weight):
self.name = name
self.weight = weight
things = DataStream([Thing('brad', 24), Thing('alice', 54)])
others = DataStream([Other('brad', 170), Other('angela', 115)])
other_things = things.join('inner', 'name', others).set('age', value=20)
self.assertEqual(next(other_things).age, 20)
def test_from_files(self):
stream = DataStream.from_files(['test_set_1.csv', 'test_set_2.csv'])
self.assertEqual(stream.count(), 11)
class DataSetTests(unittest.TestCase):
def test_map(self):
stream2 = DataSet(range(10)) \
.take_now(5) \
.map(lambda num: num + 1)
self.assertEqual(1, next(stream2))
self.assertEqual(2, next(stream2))
self.assertEqual(3, next(stream2))
self.assertEqual(4, next(stream2))
self.assertEqual(5, next(stream2))
class FilterRadixTests(unittest.TestCase):
def test_radix_eq(self):
stream = DataStream(range(10))
just_three = stream.where('real').eq(3)
self.assertListEqual(list(just_three), [3])
def test_radix_is_in(self):
stream = DataStream(range(20))
some_primes = [1, 2, 3, 5, 7]
those_primes = list(stream.where('real').is_in(some_primes))
self.assertListEqual(those_primes, some_primes)
def test_lengths(self):
stream = DataSet(['hello', 'hello world', 'hello even more worlds'])
self.assertEqual(stream.where().shorter_than(10).count(), 1)
self.assertEqual(stream.where().longer_than(10).count(), 2)
self.assertEqual(stream.where().has_length(5).count(), 1)
class DictStreamTests(unittest.TestCase):
def test_dicstream_set(self):
stream = DictStream([{'name': 'brad', 'age': 25}])\
.set('height', lambda row: 70)\
.set('age', lambda row: 30)
brad = next(stream)
self.assertEqual(brad['height'], 70)
self.assertEqual(brad['name'], 'brad')
self.assertEqual(brad['age'], 30)
def test_dictstream_where(self):
stream = DictSet([{'name': 'brad', 'age': 25}])
filtered_empty = stream.where('age').gt(40).collect()
filtered_something = stream.where('age').lt(40).collect()
self.assertEqual(len(filtered_empty), 0)
self.assertEqual(len(filtered_something), 1)
def test_group_by(self):
stream = DictStream([{'name': 'brad', 'age': 25}, {'name': 'bradley', 'age': 22}])
grouped = stream.group_by('name')
self.assertEqual(len(grouped), 2)
def test_join_basic(self):
streama = DictStream([{'name': 'brad', 'age': 25}, {'name': 'bradley', 'age': 22}])
streamb = DictStream([{'name': 'brad', 'num': 21}, {'name': 'cooper', 'num': 22}])
joined = streama.join('inner', 'name', streamb)
self.assertEqual(len(joined), 1)
self.assertEqual(joined[0]['name'], 'brad')
self.assertEqual(joined[0]['age'], 25)
self.assertEqual(joined[0]['num'], 21)
if __name__ == '__main__':
unittest.main()
|
|
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
import numpy as np
def test_parameter():
p = gluon.Parameter('weight', shape=(10, 10))
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assert len(p.list_data()) == 2
assert len(p.list_grad()) == 2
assert p.data(mx.cpu(1)).context == mx.cpu(1)
assert p.data(mx.cpu(0)).shape == (10, 10)
assert p.var().name == 'weight'
def test_paramdict():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(10, 10))
assert list(params.keys()) == ['net_weight']
params.initialize(ctx=mx.cpu())
params.save('test.params')
params.load('test.params', mx.cpu())
def test_parameter_sharing():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(5, in_units=5)
self.dense1 = nn.Dense(5, in_units=5)
def forward(self, x):
return self.dense1(self.dense0(x))
net1 = Net(prefix='net1_')
net2 = Net(prefix='net2_', params=net1.collect_params())
net1.collect_params().initialize()
net2(mx.nd.zeros((3, 5)))
net1.save_params('net1.params')
net3 = Net(prefix='net3_')
net3.load_params('net1.params', mx.cpu())
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=128))
model.add(nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
def check_layer_forward(layer, dshape):
layer.collect_params().initialize()
with mx.autograd.record():
out = layer(mx.nd.ones(shape=dshape))
out.backward()
layer.hybridize()
with mx.autograd.record():
out = layer(mx.nd.ones(shape=dshape))
out.backward()
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_pool():
layers1d = [
nn.MaxPool1D(),
nn.MaxPool1D(3),
nn.MaxPool1D(3, 2),
nn.AvgPool1D(),
nn.GlobalAvgPool1D(),
]
for layer in layers1d:
check_layer_forward(layer, (1, 2, 10))
layers2d = [
nn.MaxPool2D(),
nn.MaxPool2D((3, 3)),
nn.MaxPool2D(3, 2),
nn.AvgPool2D(),
nn.GlobalAvgPool2D(),
]
for layer in layers2d:
check_layer_forward(layer, (1, 2, 10, 10))
layers3d = [
nn.MaxPool3D(),
nn.MaxPool3D((3, 3, 3)),
nn.MaxPool3D(3, 2),
nn.AvgPool3D(),
nn.GlobalAvgPool3D(),
]
for layer in layers3d:
check_layer_forward(layer, (1, 2, 10, 10, 10))
# test ceil_mode
x = mx.nd.zeros((2, 2, 10, 10))
layer = nn.MaxPool2D(3, ceil_mode=False)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 3, 3))
layer = nn.MaxPool2D(3, ceil_mode=True)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 4, 4))
def test_batchnorm():
layer = nn.BatchNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
def test_reshape():
x = mx.nd.ones((2, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x.reshape((-1,))
x = x + 10
x.backward()
def test_slice():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1:3]
x = x + 10
x.backward()
def test_at():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1]
x = x + 10
x.backward()
def test_defered_init():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2)
layer.collect_params().initialize()
layer(x)
def check_split_data(x, num_slice, batch_axis, **kwargs):
res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)
assert len(res) == num_slice
mx.test_utils.assert_almost_equal(mx.nd.concat(*res, dim=batch_axis).asnumpy(),
x.asnumpy())
def test_split_data():
x = mx.nd.random_uniform(shape=(128, 33, 64))
check_split_data(x, 8, 0)
check_split_data(x, 3, 1)
check_split_data(x, 4, 1, even_split=False)
check_split_data(x, 15, 1, even_split=False)
try:
check_split_data(x, 4, 1)
except ValueError:
return
assert False, "Should have failed"
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
from abc import ABCMeta, abstractmethod
import numpy as np
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import Hinge
from .sgd_fast import Huber
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredEpsilonInsensitive
from .sgd_fast import SquaredHinge
from .sgd_fast import SquaredLoss
from .sgd_fast import plain_sgd, average_sgd
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils import compute_class_weight
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import astype
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon,)
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes,):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1, )
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log,),
"modified_huber": (ModifiedHuber,),
"squared_loss": (SquaredLoss,),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss,),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ = \
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
|
import logging
import servercommands
# Globals
invalidNames = ['NONE']
def configLog(filepath):
logging.basicConfig(level=logging.DEBUG,
filename=filepath,
format='%(asctime)s | %(message)s')
def logMsg(msg):
"""Logs the specified message to std out
and the file set my the logging config."""
print msg
logging.info(msg)
def gamesList(client):
"""Get list of all games on the server."""
return [{'name':k,
'player_count':len(v['players']),
'playerlimit':v['playerlimit']}
for k, v in client.factory.games.items()]
def addStatusInfo(client, resp):
"""Add needed data based on client's status."""
resp['status'] = client.status
if client.status == 'In lobby':
resp['games'] = gamesList(client)
elif client.status == 'In game':
resp['gamedata'] = client.factory.games[client.gamekey]
def sendToLobby(client, resp):
"""Send response to specified clients"""
clientlist = [k for k, v
in client.factory.named_clients.items()
if v.gamekey == None]
resp['status'] = 'In lobby'
resp['games'] = gamesList(client)
client.factory.sendToClients(clientlist,
client.factory.json_encoder.encode(resp))
# Log
logMsg('to: '+str(clientlist)+' - '+str(resp))
def sendToGame(client, gamename, resp):
"""Send response to specified clients"""
clientlist = client.factory.games[gamename]['players'].keys()
resp['status'] = 'In game'
resp['gamedata'] = client.factory.games[gamename]
client.factory.sendToClients(clientlist,
client.factory.json_encoder.encode(resp))
# Log
logMsg('to: '+str(clientlist)+' - '+str(resp))
def sendResp(client, resp):
"""Send response to client"""
addStatusInfo(client, resp)
client.sendMessage(client.factory.json_encoder.encode(resp))
# Log
logMsg('to: '+str(client.name)+' - '+str(resp))
def sendError(client, error):
"""Send client an error"""
logMsg('Client error: '+error)
sendResp(client, {'err':error})
# Command functions:
def createUser(client, cmd, sendMsg=True):
"""Create a new user and save new player data"""
if len(cmd) < 4:
if sendMsg:
sendError(client, 'Both username and password are required')
elif len(cmd) > 4 or cmd[2] in invalidNames:
if sendMsg:
sendError(client, 'Invalid username or password')
elif servercommands.createUser(client, cmd[2], cmd[3]):
if sendMsg:
sendResp(client, {'message':'User successfully created'})
else:
if sendMsg:
sendError(client, 'Failed to create user')
def login(client, cmd, sendMsg=True):
"""Attempt to log client in.
Response will contain error response if failed.
Response will contain lobby data (games) if successful.
Status changes to 'inlobby' on success."""
if len(cmd) < 3:
if sendMsg:
sendError(client, 'Both username and password required')
return
elif len(cmd) > 3:
if sendMsg:
sendError(client, 'Invalid username or password')
return
else:
username, password = cmd[1], cmd[2]
if client.status != 'Logging in':
if sendMsg:
sendError(client, 'You are already logged in')
elif client.name in client.factory.named_clients.keys():
if sendMsg:
sendError(client, 'You are already logged in')
elif username in client.factory.named_clients.keys():
if sendMsg:
sendError(client, 'That username is already in use')
elif username in invalidNames:
if sendMsg:
sendError(client, 'That username is not valid, try again')
else:
if servercommands.login(client, username, password):
# Log
logMsg('Login successful: "'+username+'" logged in')
if sendMsg:
sendResp(client, {'message':'Logged in as '+username})
else:
if sendMsg:
sendError(client, 'Invalid username or password')
def createGame(client, cmd, sendMsg=True):
"""Attempt to create a game.
Response will contain an error message (err) if failed.
Status does not change on success."""
error = None
if len(cmd) != 3:
if sendMsg:
sendError(client, 'No game name provided')
return
else:
gamename = cmd[2]
if client.name == None:
if sendMsg:
sendError(client, 'You must be logged in to create a game')
elif client.status != 'In lobby':
if sendMsg:
sendError(client, 'Must be in lobby to create a game')
elif gamename in client.factory.games.keys():
if sendMsg:
sendError(client, 'That game already exists')
elif (len(client.factory.games)
== int(client.factory.properties['game_limit'])):
if sendMsg:
sendError(client, """Game limit reached,
try again later or join another game""")
else:
servercommands.createGame(client, gamename, {})
# Log
logMsg('Create game successful: "'+gamename+'" created')
if sendMsg:
sendToLobby(client, {'message':'Game '+gamename+' created'})
def joinGame(client, cmd, sendMsg=True):
"""Attempt to add the client to the specified game.
Response will contain error message (err) if failed.
Response will contain game data (gamedata) if successful.
Status changes to 'ingame' on success."""
if len(cmd) != 3:
sendError(client, 'Invalid game name')
return
else:
gamename = cmd[2]
if client.name == None:
if sendMsg:
sendError(client, 'You must be logged in to join a game')
elif client.gamekey != None:
if sendMsg:
sendError(client, 'You are already in a game')
elif gamename not in client.factory.games.keys():
if sendMsg:
sendError(client, 'There is currently no game with that name')
elif (len(client.factory.games[gamename]['players'])
== int(client.factory.games[gamename]['playerlimit'])):
if sendMsg:
sendError(client, 'That game is full')
else:
servercommands.joinGame(client, gamename)
# Log
logMsg('Join game successful: "'+client.name
+'"added to "'+gamename+'"')
if sendMsg:
sendToGame(client, gamename,
{'message':client.name+' joined the game'})
sendToLobby(client, {})
def quitGame(client, sendMsg=True):
"""Attempt to quit game.
Response will contain error message (err) if failed.
Response will contain lobby data (games) if successful.
Status changes to 'inlobby' on success."""
if client.gamekey == None:
if sendMsg:
sendError(client, 'You are not in a game yet')
else:
# Capture game name
gamename = client.gamekey
servercommands.quitGame(client)
# Log
logMsg('Quit game successful: "'+client.name
+'" removed from "'+gamename+'"')
if sendMsg:
sendToGame(client, gamename,
{'message':client.name+' left the game'})
sendToLobby(client, {})
def levelup(client, cmd, sendMsg=True):
"""Attempt to level up player.
Response will contain error message (err) if failed.
Response will contain lobby data (games) if successful.
Status changes to 'inlobby' on success."""
if len(cmd) != 2:
sendError(client, 'Invalid stat name')
return
else:
statname = cmd[1]
if client.playerdata == None:
if sendMsg:
sendError(client, 'You must be logged in to level up')
elif statname not in client.playerdata['stats'].keys():
if sendMsg:
sendError(client, "Couldn't find that stat, try again")
elif client.playerdata['exp'] <= 0:
if sendMsg:
sendError(client, "You don't have enough XP, win some games first")
else:
servercommands.levelUp(client, statname)
# Log
logMsg('Level up successful: "'+statname
+'" increased by 1 for "'+client.name+'"')
if sendMsg:
sendResp(client, {'message':statname+' increased by 1'})
def logout(client, sendMsg=True):
"""Attempt to log client out.
Response will contain error message (err) if failed.
Response will contain if status of 'logged_out' successful.
Status changes to 'logging_in' on success."""
# Clear game data
quitGame(client, False)
if client.name == None:
if sendMsg:
sendError(client, "You haven't logged in yet")
else:
# Capture username
username = client.name
servercommands.logout(client)
# Log
logMsg('Logout successful: "'+username+'" logged out')
if sendMsg:
sendResp(client, {'status':'Logging in',
'message':'Logged out'})
def closeConn(client):
"""Remove client data and close connection with client"""
logMsg('Closing client connection')
# Clear client data
logout(client, False)
client.factory.clients.remove(client)
# Disconnect
client.abortConnection()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
def test_dropout_partial_noise_shape(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
out = model(np.ones((20, 5, 10)), training=True)
out_np = keras.backend.get_value(out)
# Test that dropout mask is shared across second dim.
self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :])
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertEqual(ld.function(3), 4)
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn, dtype=dtypes.float64)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
output_signature = l.compute_output_signature([
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 10)),
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 20))])
self.assertAllEqual((10, 20), output_signature.shape)
self.assertAllEqual(dtypes.float64, output_signature.dtype)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_nested(self):
def lambda_fn(inputs):
return (inputs[1]['a'], {'b': inputs[0]})
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)}))
self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(
lambda x: x + 1,
output_shape=(1, 1),
mask=lambda i, m: m)
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
layer = keras.layers.Lambda.from_config(config)
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
def test_lambda_with_training_arg(self):
def fn(x, training=True):
return keras.backend.in_train_phase(x, 2 * x, training=training)
layer = keras.layers.Lambda(fn)
x = keras.backend.ones(())
train_out = layer(x, training=True)
eval_out = layer(x, training=False)
self.assertEqual(keras.backend.get_value(train_out), 1.)
self.assertEqual(keras.backend.get_value(eval_out), 2.)
def test_lambda_with_mask(self):
def add_one(inputs):
return inputs + 1.0
def mask(unused_inputs, previous_mask):
return previous_mask
layer = keras.layers.Lambda(add_one, mask=mask)
x = np.ones([5, 4, 3])
x[:, -1, :] = 0
masking = keras.layers.Masking()
out = layer(masking(x))
expected_out = np.full([5, 4, 3], 2.0)
expected_out[:, -1, :] = 1.0
expected_mask = np.ones([5, 4])
expected_mask[:, -1] = 0.0
self.assertAllClose(self.evaluate(out), expected_out)
self.assertIsNotNone(out._keras_mask)
self.assertAllClose(self.evaluate(out._keras_mask), expected_mask)
def test_lambda_with_ragged_input(self):
def add_one(inputs):
return inputs + 1.0
layer = keras.layers.Lambda(add_one)
ragged_input = ragged_factory_ops.constant([[1.0], [2.0, 3.0]])
out = layer(ragged_input)
expected_out = ragged_factory_ops.constant([[2.0], [3.0, 4.0]])
self.assertAllClose(out, expected_out)
def test_lambda_deserialization_does_not_pollute_core(self):
layer = keras.layers.Lambda(lambda x: x + 1)
config = layer.get_config()
keras.layers.Lambda.from_config(config)
self.assertNotIn(self.__class__.__name__, dir(core))
class TestStatefulLambda(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_lambda_with_variable_in_model(self):
v = variables.Variable(1., trainable=True)
def lambda_fn(x, v):
return x * v
# While it is generally not advised to mix Variables with Lambda layers, if
# the variables are explicitly set as attributes then they are still
# tracked. This is consistent with the base Layer behavior.
layer = keras.layers.Lambda(lambda_fn, arguments={'v': v})
self.assertLen(layer.trainable_weights, 0)
layer.v = v
self.assertLen(layer.trainable_weights, 1)
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)
self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_creation_inside_lambda(self):
def lambda_fn(x):
scale = variables.Variable(1., trainable=True, name='scale')
shift = variables.Variable(1., trainable=True, name='shift')
return x * scale + shift
expected_error = textwrap.dedent(r'''
( )?The following Variables were created within a Lambda layer \(shift_and_scale\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*shift_and_scale/scale:0\'.+
( )? <tf.Variable \'.*shift_and_scale/shift:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+''')
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_transitive_variable_creation(self):
dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones')
def bad_lambda_fn(x):
return dense(x + 1) # Dense layer is built on first call
expected_error = textwrap.dedent(r'''
( )?The following Variables were created within a Lambda layer \(bias_dense\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+''')
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_warns_on_variable_capture(self):
v = variables.Variable(1., trainable=True)
def lambda_fn(x):
return x * v
expected_warning = textwrap.dedent(r'''
( )?The following Variables were used a Lambda layer\'s call \(lambda\), but
( )?are not present in its tracked objects:
( )? <tf.Variable \'.*Variable:0\'.+
( )?It is possible that this is intended behavior.+''')
layer = keras.layers.Lambda(lambda_fn)
def patched_warn(msg):
raise ValueError(msg)
layer._warn = patched_warn
with self.assertRaisesRegex(ValueError, expected_warning):
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_compute_mask_with_positional_mask_arg(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, inputs, mask=None):
if mask is not None:
return array_ops.ones(())
else:
return array_ops.zeros(())
x, mask = array_ops.ones((1, 1)), array_ops.ones((1, 1))
layer = MyLayer()
y = layer(x, mask)
# Check that `mask` was correctly sent to `compute_mask`.
self.assertEqual(keras.backend.get_value(y._keras_mask), 1)
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_reshape_set_static_shape(self):
input_layer = keras.Input(batch_shape=(1, None))
reshaped = keras.layers.Reshape((1, 100))(input_layer)
# Make sure the batch dim is not lost after array_ops.reshape.
self.assertEqual(reshaped.shape, [1, 1, 100])
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_dtype(self):
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype=policy.Policy('mixed_float16'))
outputs = layer(inputs)
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype='float16', shape=(2, 2)))
self.assertEqual(output_signature.dtype, dtypes.float16)
self.assertEqual(output_signature.shape, (2, 5))
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if context.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os
import re
import subprocess
import time
import sys
from Queue import Queue, Empty
from threading import Thread
from sonLib.bioio import logger
from sonLib.bioio import system
from jobTree.batchSystems.abstractBatchSystem import AbstractBatchSystem
from jobTree.src.master import getParasolResultsFileName
from jobTree.batchSystems.multijob import MultiTarget
class MemoryString:
def __init__(self, string):
if string[-1] == 'K' or string[-1] == 'M' or string[-1] == 'G':
self.unit = string[-1]
self.val = float(string[:-1])
else:
self.unit = 'B'
self.val = float(string)
self.bytes = self.byteVal()
def __str__(self):
if self.unit != 'B':
return str(val) + unit
else:
return str(val)
def byteVal(self):
if self.unit == 'B':
return self.val
elif self.unit == 'K':
return self.val * 1024
elif self.unit == 'M':
return self.val * 1048576
elif self.unit == 'G':
return self.val * 1073741824
def __cmp__(self, other):
return cmp(self.bytes, other.bytes)
def prepareQsub(cpu, mem):
qsubline = ["qsub","-b","y","-terse","-j" ,"y", "-cwd", "-o", "/dev/null", "-e", "/dev/null", "-v",
"LD_LIBRARY_PATH=%s" % os.environ["LD_LIBRARY_PATH"]]
reqline = list()
if cpu is not None:
reqline.append("p="+str(cpu))
if mem is not None:
reqline.append("vf="+str(mem/ 1024)+"K")
reqline.append("h_vmem="+str(mem/ 1024)+"K")
if len(reqline) > 0:
qsubline.extend(["-hard","-l", ",".join(reqline)])
return qsubline
def qsub(qsubline):
logger.debug("**"+" ".join(qsubline))
process = subprocess.Popen(qsubline, stdout=subprocess.PIPE)
result = int(process.stdout.readline().strip().split('.')[0])
logger.debug("Got the job id: %s" % (str(result)))
return result
def getjobexitcode(sgeJobID):
job, task = sgeJobID
args = ["qacct", "-j", str(job)]
if task is not None:
args.extend(["-t", str(task)])
process = subprocess.Popen(args, stdout = subprocess.PIPE,stderr = subprocess.STDOUT)
for line in process.stdout:
if line.startswith("failed") and int(line.split()[1]) == 1:
return 1
elif line.startswith("exit_status"):
return int(line.split()[1])
return None
class Worker(Thread):
def __init__(self, newJobsQueue, updatedJobsQueue, killQueue, killedJobsQueue, boss):
Thread.__init__(self)
self.newJobsQueue = newJobsQueue
self.updatedJobsQueue = updatedJobsQueue
self.killQueue = killQueue
self.killedJobsQueue = killedJobsQueue
self.waitingJobs = list()
self.runningJobs = set()
self.boss = boss
self.allocatedCpus = dict()
self.sgeJobIDs = dict()
def getRunningJobIDs(self):
times = {}
currentjobs = dict((self.sgeJobIDs[x], x) for x in self.runningJobs)
process = subprocess.Popen(["qstat"], stdout = subprocess.PIPE)
stdout, stderr = process.communicate()
for currline in stdout.split('\n'):
items = curline.strip().split()
if ((len(items) > 9 and (items[0],items[9]) in currentjobs) or (items[0], None) in currentjobs) and items[4] == 'r':
jobstart = " ".join(items[5:7])
jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
times[currentjobs[(items[0],items[9])]] = time.time() - jobstart
return times
def getSgeID(self, jobID):
if not jobID in self.sgeJobIDs:
RuntimeError("Unknown jobID, could not be converted")
(job,task) = self.sgeJobIDs[jobID]
if task is None:
return str(job)
else:
return str(job) + "." + str(task)
def forgetJob(self, jobID):
self.runningJobs.remove(jobID)
del self.allocatedCpus[jobID]
del self.sgeJobIDs[jobID]
def killJobs(self):
# Load hit list:
killList = list()
while not self.killQueue.empty():
killList.append(self.killQueue.get())
# Do the dirty job
for jobID in list(killList):
if jobID in self.runningJobs:
process = subprocess.Popen(["qdel", self.getSgeID(jobID)])
else:
if jobID in self.waitingJobs:
self.waitingJobs.remove(jobID)
self.killedJobsQueue.put(jobID)
killList.remove(jobID)
# Wait to confirm the kill
while len(killList) > 0:
for jobID in list(killList):
if getjobexitcode(self.sgeJobIDs[jobID]) is not None:
self.killedJobsQueue.put(jobID)
killList.remove(jobID)
self.forgetJob(jobID)
if len(killList) > 0:
logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
time.sleep(5)
def createJobs(self):
# Load new job ids:
while not self.newJobsQueue.empty():
self.waitingJobs.append(self.newJobsQueue.get())
# Launch jobs as necessary:
while len(self.waitingJobs) > 0 and sum(self.allocatedCpus.values()) < int(self.boss.maxCpus):
jobID, cpu, memory, command = self.waitingJobs.pop(0)
qsubline = prepareQsub(cpu, memory) + [command]
sgeJobID = qsub(qsubline)
self.sgeJobIDs[jobID] = (sgeJobID, None)
self.runningJobs.add(jobID)
self.allocatedCpus[jobID] = cpu
def checkOnJobs(self):
for jobID in list(self.runningJobs):
exit = getjobexitcode(self.sgeJobIDs[jobID])
if exit is not None:
self.updatedJobsQueue.put((jobID, exit))
self.forgetJob(jobID)
def run(self):
while True:
self.killJobs()
self.createJobs()
self.checkOnJobs()
time.sleep(10)
class GridengineBatchSystem(AbstractBatchSystem):
"""The interface for gridengine.
"""
@classmethod
def getDisplayNames(cls):
"""
Names used to select this batch system.
"""
return ["gridengine","gridEngine"]
def __init__(self, config, maxCpus, maxMemory):
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
self.gridengineResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.gridengineResultsFileHandle = open(self.gridengineResultsFile, 'w')
self.gridengineResultsFileHandle.close() #We lose any previous state in this file, and ensure the files existence
self.currentjobs = set()
self.obtainSystemConstants()
self.nextJobID = 0
self.newJobsQueue = Queue()
self.updatedJobsQueue = Queue()
self.killQueue = Queue()
self.killedJobsQueue = Queue()
self.worker = Worker(self.newJobsQueue, self.updatedJobsQueue, self.killQueue, self.killedJobsQueue, self)
self.worker.setDaemon(True)
self.worker.start()
def __des__(self):
#Closes the file handle associated with the results file.
self.gridengineResultsFileHandle.close() #Close the results file, cos were done.
def issueJob(self, command, memory, cpu):
self.checkResourceRequest(memory, cpu)
jobID = self.nextJobID
self.nextJobID += 1
self.currentjobs.add(jobID)
self.newJobsQueue.put((jobID, cpu, memory, command))
logger.debug("Issued the job command: %s with job id: %s " % (command, str(jobID)))
return jobID
def killJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
for jobID in jobIDs:
self.killQueue.put(jobID)
killList = set(jobIDs)
while len(killList) > 0:
while True:
i = self.getFromQueueSafely(self.killedJobsQueue, maxWait)
if i is not None:
killList.remove(jobID)
self.currentjobs.remove(jobID)
else:
break
if len(killList) > 0:
time.sleep(5)
def getIssuedJobIDs(self):
"""Gets the list of jobs issued to SGE.
"""
return list(self.currentjobs)
def getRunningJobIDs(self):
return self.worker.getRunningJobIDs()
def getUpdatedJob(self, maxWait):
i = self.getFromQueueSafely(self.updatedJobsQueue, maxWait)
if i == None:
return None
jobID, retcode = i
self.updatedJobsQueue.task_done()
self.currentjobs.remove(jobID)
return i
def getWaitDuration(self):
"""We give parasol a second to catch its breath (in seconds)
"""
return 0.0
def getRescueJobFrequency(self):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive. We allow this every 10 minutes..
"""
return 1800 #Half an hour
def obtainSystemConstants(self):
p = subprocess.Popen(["qhost"], stdout = subprocess.PIPE,stderr = subprocess.STDOUT)
line = p.stdout.readline()
items = line.strip().split()
num_columns = len(items)
cpu_index = None
mem_index = None
for i in range(num_columns):
if items[i] == 'NCPU':
cpu_index = i
elif items[i] == 'MEMTOT':
mem_index = i
if cpu_index is None or mem_index is None:
RuntimeError("qhost command does not return NCPU or MEMTOT columns")
p.stdout.readline()
self.maxCPU = 0
self.maxMEM = MemoryString("0")
for line in p.stdout:
items = line.strip().split()
if len(items) < num_columns:
RuntimeError("qhost output has a varying number of columns")
if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
self.maxCPU = items[cpu_index]
if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
self.maxMEM = MemoryString(items[mem_index])
if self.maxCPU is 0 or self.maxMEM is 0:
RuntimeError("qhost returns null NCPU or MEMTOT info")
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import constants
import json
class Topology:
"""
Represents the topology provided by the Cloud Controller
"""
def __init__(self):
self.service_map = {}
""" :type : dict[str, Service] """
self.initialized = False
""" :type : bool """
self.json_str = None
""" :type : str """
def get_services(self):
"""
Provides the list of services on the topology
:return: The list of Service objects
:rtype: list[Service]
"""
return self.service_map.values()
def get_service(self, service_name):
"""
Provides the service information for the given service name
:param str service_name: service name to be retrieved
:return: Service object of the service, None if the provided service name is invalid
:rtype: Service
"""
if service_name in self.service_map:
return self.service_map[service_name]
return None
def add_service(self, service):
"""
Adds a service to the list of services on the topology
:param Service service:
:return: void
"""
self.service_map[service.service_name] = service
def add_services(self, services):
"""
:param list[Service] services:
:return: void
"""
for service in services:
self.add_service(service)
def remove_service(self, service_name):
"""
Removes the service of the provided service name
:param str service_name:
:return: void
"""
if service_name in self.service_map:
self.service_map.pop(service_name)
def service_exists(self, service_name):
"""
Checks if the service of the provided service name exists
:param str service_name:
:return: True if the service exists, False if otherwise
:rtype: bool
"""
return service_name in self.service_map
def clear(self):
"""
Clears the service information list
:return: void
"""
self.service_map = {}
def __str__(self):
"""
to string override
:return:
"""
return "Topology [serviceMap= %r , initialized= %r ]" % (self.service_map, self.initialized)
class Service:
"""
Represents a service on the topology
"""
def __init__(self, service_name, service_type):
self.service_name = service_name
""" :type : str """
self.service_type = service_type
""" :type : str """
self.cluster_id_cluster_map = {}
""" :type : dict[str, Cluster] """
self.port_map = {}
""" :type : dict[str, Port] """
self.properties = {}
""" :type : dict[str, str] """
def get_clusters(self):
"""
Provides the list of clusters in the particular service
:return: The list of Cluster objects
:rtype: list[Cluster]
"""
return self.cluster_id_cluster_map.values()
def add_cluster(self, cluster):
"""
Adds a cluster to the service
:param Cluster cluster: the cluster to be added
:return: void
"""
self.cluster_id_cluster_map[cluster.cluster_id] = cluster
def remove_cluster(self, cluster_id):
if cluster_id in self.cluster_id_cluster_map:
self.cluster_id_cluster_map.pop(cluster_id)
def cluster_exists(self, cluster_id):
"""
Checks if the cluster with the given cluster id exists for ther service
:param str cluster_id:
:return: True if the cluster for the given cluster id exists, False if otherwise
:rtype: bool
"""
return cluster_id in self.cluster_id_cluster_map
def get_cluster(self, cluster_id):
"""
Provides the Cluster information for the provided cluster id
:param str cluster_id: the cluster id to search for
:return: Cluster object for the given cluster id, None if the cluster id is invalid
:rtype: Cluster
"""
if cluster_id in self.cluster_id_cluster_map:
return self.cluster_id_cluster_map[cluster_id]
return None
def get_ports(self):
"""
Returns the list of ports in the particular service
:return: The list of Port object
:rtype: list[Port]
"""
return self.port_map.values()
def get_port(self, proxy_port):
"""
Provides the port information for the provided proxy port
:param str proxy_port:
:return: Port object for the provided port, None if port is invalid
:rtype: Port
"""
if proxy_port in self.port_map:
return self.port_map[proxy_port]
return None
def add_port(self, port):
self.port_map[port.proxy] = port
def add_ports(self, ports):
for port in ports:
self.add_port(port)
class Cluster:
"""
Represents a cluster for a service
"""
def __init__(self, service_name="", cluster_id="", deployment_policy_name="", autoscale_policy_name=""):
self.service_name = service_name
""" :type : str """
self.cluster_id = cluster_id
""" :type : str """
self.deployment_policy_name = deployment_policy_name
""" :type : str """
self.autoscale_policy_name = autoscale_policy_name
""" :type : str """
self.hostnames = []
""" :type : list[str] """
self.member_map = {}
""" :type : dict[str, Member] """
self.tenant_range = None
""" :type : str """
self.is_lb_cluster = False
""" :type : bool """
self.is_kubernetes_cluster = False
""" :type : bool """
# self.status = None
# """ :type : str """
self.load_balancer_algorithm_name = None
""" :type : str """
self.properties = {}
""" :type : dict[str, str] """
self.member_list_json = None
""" :type : str """
self.app_id = ""
""" :type : str """
self.kubernetesService_map = {}
""" :type : dict[str, KubernetesService] """
# Not relevant to cartridge agent
# self.instance_id_instance_context_map = {}
# """ :type : dict[str, ClusterInstance] """
def add_hostname(self, hostname):
self.hostnames.append(hostname)
def set_tenant_range(self, tenant_range):
Cluster.validate_tenant_range(tenant_range)
self.tenant_range = tenant_range
def get_members(self):
"""
Provides the list of member information in the cluster
:return: The list of Member object
:rtype: list[Member]
"""
return self.member_map.values()
def get_kubernetesServices(self):
"""
Provides the list of kubernetes Services in the cluster
:return: The list of KubernetesService object
:rtype: list[KubernetesService]
"""
return self.kubernetesService_map.values()
def add_kubernetesService(self, kubernetesService):
self.kubernetesService_map[kubernetesService.id] = kubernetesService
def add_member(self, member):
self.member_map[member.member_id] = member
def remove_member(self, member_id):
if self.member_exists(member_id):
self.member_map.pop(member_id)
def get_member(self, member_id):
"""
Provides the member information for the provided member id
:param str member_id:
:return: Member object for the provided member id, None if member id is invalid
:rtype: Member
"""
if self.member_exists(member_id):
return self.member_map[member_id]
return None
def member_exists(self, member_id):
"""
Checks if the member for the provided member id exists in this cluster
:param str member_id: member id to be searched
:return: True if the member exists, False if otherwise
:rtype: bool
"""
return member_id in self.member_map
def __str__(self):
return "Cluster [serviceName=" + self.service_name + ", clusterId=" + self.cluster_id \
+ ", autoscalePolicyName=" + self.autoscale_policy_name + ", deploymentPolicyName=" \
+ self.deployment_policy_name + ", hostNames=" + self.hostnames + ", tenantRange=" + self.tenant_range \
+ ", isLbCluster=" + self.is_lb_cluster + ", properties=" + self.properties + "]"
def tenant_id_in_range(self, tenant_id):
"""
Check whether a given tenant id is in tenant range of the cluster.
:param str tenant_id: tenant id to be checked
:return: True if the tenant id is in tenant id range, False if otherwise
:rtype: bool
"""
if self.tenant_range is None:
return False
if self.tenant_range == "*":
return True
else:
arr = self.tenant_range.split(constants.TENANT_RANGE_DELIMITER)
tenant_start = int(arr[0])
if tenant_start <= tenant_id:
tenant_end = arr[1]
if tenant_end == "*":
return True
else:
if tenant_id <= int(tenant_end):
return True
return False
@staticmethod
def validate_tenant_range(tenant_range):
"""
Validates the tenant range to be either '*' or a delimeted range of numbers
:param str tenant_range: The tenant range string to be validated
:return: void if the provided tenant range is valid, RuntimeError if otherwise
:exception: RuntimeError if the tenant range is invalid
"""
valid = False
if tenant_range == "*":
valid = True
else:
arr = tenant_range.split(constants.TENANT_RANGE_DELIMITER)
if len(arr) == 2:
if arr[0].isdigit() and arr[1].isdigit():
valid = True
elif arr[0].isdigit() and arr[1] == "*":
valid = True
if not valid:
raise RuntimeError("Tenant range %r is not valid" % tenant_range)
class Member:
"""
Represents a member on a particular cluster
"""
def __init__(self, service_name="", cluster_id="", network_partition_id="", partition_id="", member_id="",
cluster_instance_id=""):
self.service_name = service_name
""" :type : str """
self.cluster_id = cluster_id
""" :type : str """
self.network_partition_id = network_partition_id
""" :type : str """
self.cluster_instance_id = cluster_instance_id
""" :type : str """
self.partition_id = partition_id
""" :type : str """
self.member_id = member_id
""" :type : str """
self.port_map = {}
""" :type : dict[str, Port] """
self.init_time = None
""" :type : int """
self.member_public_ips = None
""" :type : str """
self.member_default_public_ip = None
""" :type : str """
self.status = None
""" :type : str """
self.member_private_ips = None
""" :type : str """
self.member_default_private_ip = None
""" :type : str """
self.properties = {}
""" :type : dict[str, str] """
self.lb_cluster_id = None
""" :type : str """
self.json_str = None
""" :type : str """
def is_active(self):
"""
Checks if the member is in active state
:return: True if active, False if otherwise
:rtype: bool
"""
return self.status == MemberStatus.Active
def get_ports(self):
"""
Provides the list of the ports in the member
:return: List of Port objects
:rtype: list[Port]
"""
return self.port_map.values()
def get_port(self, proxy):
"""
Provides the port information for the given port id
:param str proxy: The port id
:return: Port object of the provided port id, None if otherwise
:rtype: Port
"""
if proxy in self.port_map:
return self.port_map[proxy]
return None
def add_port(self, port):
self.port_map[port.proxy] = port
def add_ports(self, ports):
for port in ports:
self.add_port(port)
def to_json(self):
return "{memberId: " + self.member_id + ", status: " + self.status + "}"
class KubernetesService:
"""
Represents a kubernetes service on a particular cluster
"""
def __init__(self, id, portalIP, protocol, port, containerPort, serviceType, portName):
self.id = id
""" :type : str """
self.portalIP = portalIP
""" :type : str """
self.protocol = protocol
""" :type : str """
self.port = port
""" :type : str """
self.containerPort = containerPort
""" :type : str """
self.serviceType = serviceType
""" :type : str """
self.portName = portName
""" :type : str """
self.publicIPs = []
""" :type : list[str] """
def add_public_ips(self, public_ip):
self.publicIPs.append(public_ip)
class Port:
"""
Represents a port on a particular member
"""
def __init__(self, protocol, value, proxy):
self.protocol = protocol
""" :type : str """
self.value = value
""" :type : str """
self.proxy = proxy
""" :type : str """
def __str__(self):
return "Port [protocol=%r, value=%r proxy=%r]" % (self.protocol, self.value, self.proxy)
class ServiceType:
"""
ServiceType enum
"""
SingleTenant = 1
MultiTenant = 2
class ClusterStatus:
"""
ClusterStatus enum
"""
Created = 1
In_Maintenance = 2
Removed = 3
class MemberStatus:
"""
MemberStatus enum
"""
Created = "Created"
Initialized = "Initialized"
Starting = "Starting"
Active = "Active"
In_Maintenance = "In_Maintenance"
ReadyToShutDown = "ReadyToShutDown"
Suspended = "Suspended"
Terminated = "Terminated"
class TopologyContext:
"""
Handles and maintains a model of the topology provided by the Cloud Controller
"""
topology = Topology()
initialized = False
@staticmethod
def get_topology():
if TopologyContext.topology is None:
TopologyContext.topology = Topology()
return TopologyContext.topology
@staticmethod
def update(topology):
TopologyContext.topology = topology
class Tenant:
"""
Object type representing the tenant details of a single tenant
"""
def __init__(self, tenant_id, tenant_domain):
self.tenant_id = tenant_id
""" :type : int """
self.tenant_domain = tenant_domain
""" :type : str """
self.service_name_subscription_map = {}
""" :type : dict[str, Subscription] """
def get_subscription(self, service_name):
"""
Returns the Subscription object related to the provided service name
:param str service_name: service name to be retrieved
:return: Subscription of the service or None if the service name doesn't exist
:rtype: Subscription
"""
if service_name in self.service_name_subscription_map:
return self.service_name_subscription_map[service_name]
return None
def is_subscribed(self, service_name):
"""
Checks if the given service name has a subscription from this tenant
:param str service_name: name of the service to check
:return: True if the tenant is subscribed to the given service name, False if not
:rtype: bool
"""
return service_name in self.service_name_subscription_map
def add_subscription(self, subscription):
"""
Adds a subscription information entry on the subscription list for this tenant
:param Subscription subscription: Subscription information to be added
:return: void
:rtype: void
"""
self.service_name_subscription_map[subscription.service_name] = subscription
def remove_subscription(self, service_name):
"""
Removes the specified subscription details from the subscription list
:param str service_name: The service name of the subscription to be removed
:return: void
:rtype: void
"""
if service_name in self.service_name_subscription_map:
self.service_name_subscription_map.pop(service_name)
class Subscription:
"""
Subscription information of a particular subscription to a service
"""
def __init__(self, service_name, cluster_ids):
self.service_name = service_name
""" :type : str """
self.cluster_ids = cluster_ids
""" :type : list[str] """
self.subscription_domain_map = {}
""" :type : dict[str, SubscriptionDomain] """
def add_subscription_domain(self, domain_name, application_context):
"""
Adds a subscription domain
:param str domain_name:
:param str application_context:
:return: void
:rtype: void
"""
self.subscription_domain_map[domain_name] = SubscriptionDomain(domain_name, application_context)
def remove_subscription_domain(self, domain_name):
"""
Removes the subscription domain of the specified domain name
:param str domain_name:
:return: void
:rtype: void
"""
if domain_name in self.subscription_domain_map:
self.subscription_domain_map.pop(domain_name)
def subscription_domain_exists(self, domain_name):
"""
Returns the SubscriptionDomain information of the specified domain name
:param str domain_name:
:return: SubscriptionDomain
:rtype: SubscriptionDomain
"""
return domain_name in self.subscription_domain_map
def get_subscription_domains(self):
"""
Returns the list of subscription domains of this subscription
:return: List of SubscriptionDomain objects
:rtype: list[SubscriptionDomain]
"""
return self.subscription_domain_map.values()
class SubscriptionDomain:
"""
Represents a Subscription Domain
"""
def __init__(self, domain_name, application_context):
self.domain_name = domain_name
""" :type : str """
self.application_context = application_context
""" :type : str """
class TenantContext:
"""
Handles and maintains a model of all the information related to tenants within this instance
"""
tenants = {}
initialized = False
tenant_domains = {"carbon.super": Tenant(-1234, "carbon.super")}
@staticmethod
def add_tenant(tenant):
TenantContext.tenants[tenant.tenant_id] = tenant
TenantContext.tenant_domains[tenant.tenant_domain] = tenant
@staticmethod
def remove_tenant(tenant_id):
if tenant_id in TenantContext.tenants:
tenant = TenantContext.get_tenant(tenant_id)
TenantContext.tenants.pop(tenant.tenant_id)
TenantContext.tenant_domains.pop(tenant.tenant_domain)
@staticmethod
def update(tenants):
for tenant in tenants:
TenantContext.add_tenant(tenant)
@staticmethod
def get_tenant(tenant_id):
"""
Gets the Tenant object of the provided tenant ID
:param int tenant_id:
:return: Tenant object of the provided tenant ID
:rtype: Tenant
"""
if tenant_id in TenantContext.tenants:
return TenantContext.tenants[tenant_id]
return None
@staticmethod
def get_tenant_by_domain(tenant_domain):
"""
Gets the Tenant object of the provided tenant domain
:param str tenant_domain:
:return: Tenant object of the provided tenant domain
:rtype: str
"""
if tenant_domain in TenantContext.tenant_domains:
return TenantContext.tenant_domains[tenant_domain]
return None
|
|
"""
Various complex queries that have been problematic in the past.
"""
import datetime
from django.db import models
from django.db.models.query import Q
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True)
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num')
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_query_set(self):
return super(CustomManager, self).get_query_set().filter(public=True,
tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __unicode__(self):
return self.data
__test__ = {'API_TESTS':"""
>>> t1 = Tag(name='t1')
>>> t1.save()
>>> t2 = Tag(name='t2', parent=t1)
>>> t2.save()
>>> t3 = Tag(name='t3', parent=t1)
>>> t3.save()
>>> t4 = Tag(name='t4', parent=t3)
>>> t4.save()
>>> t5 = Tag(name='t5', parent=t3)
>>> t5.save()
>>> n1 = Note(note='n1', misc='foo')
>>> n1.save()
>>> n2 = Note(note='n2', misc='bar')
>>> n2.save()
>>> n3 = Note(note='n3', misc='foo')
>>> n3.save()
Create these out of order so that sorting by 'id' will be different to sorting
by 'info'. Helps detect some problems later.
>>> e2 = ExtraInfo(info='e2', note=n2)
>>> e2.save()
>>> e1 = ExtraInfo(info='e1', note=n1)
>>> e1.save()
>>> a1 = Author(name='a1', num=1001, extra=e1)
>>> a1.save()
>>> a2 = Author(name='a2', num=2002, extra=e1)
>>> a2.save()
>>> a3 = Author(name='a3', num=3003, extra=e2)
>>> a3.save()
>>> a4 = Author(name='a4', num=4004, extra=e2)
>>> a4.save()
>>> time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
>>> time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
>>> time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
>>> time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
>>> i1 = Item(name='one', created=time1, creator=a1, note=n3)
>>> i1.save()
>>> i1.tags = [t1, t2]
>>> i2 = Item(name='two', created=time2, creator=a2, note=n2)
>>> i2.save()
>>> i2.tags = [t1, t3]
>>> i3 = Item(name='three', created=time3, creator=a2, note=n3)
>>> i3.save()
>>> i4 = Item(name='four', created=time4, creator=a4, note=n3)
>>> i4.save()
>>> i4.tags = [t4]
>>> r1 = Report(name='r1', creator=a1)
>>> r1.save()
>>> r2 = Report(name='r2', creator=a3)
>>> r2.save()
Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
will be rank3, rank2, rank1.
>>> rank1 = Ranking(rank=2, author=a2)
>>> rank1.save()
>>> rank2 = Ranking(rank=1, author=a3)
>>> rank2.save()
>>> rank3 = Ranking(rank=3, author=a1)
>>> rank3.save()
>>> c1 = Cover(title="first", item=i4)
>>> c1.save()
>>> c2 = Cover(title="second", item=i2)
>>> c2.save()
>>> n1 = Number(num=4)
>>> n1.save()
>>> n2 = Number(num=8)
>>> n2.save()
>>> n3 = Number(num=12)
>>> n3.save()
Bug #1050
>>> Item.objects.filter(tags__isnull=True)
[<Item: three>]
>>> Item.objects.filter(tags__id__isnull=True)
[<Item: three>]
Bug #1801
>>> Author.objects.filter(item=i2)
[<Author: a2>]
>>> Author.objects.filter(item=i3)
[<Author: a2>]
>>> Author.objects.filter(item=i2) & Author.objects.filter(item=i3)
[<Author: a2>]
Bug #2306
Checking that no join types are "left outer" joins.
>>> query = Item.objects.filter(tags=t2).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
>>> Item.objects.filter(Q(tags=t1)).order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(tags=t2))
[<Item: one>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(creator__name='fred')|Q(tags=t2))
[<Item: one>]
Each filter call is processed "at once" against a single table, so this is
different from the previous example as it tries to find tags that are two
things at once (rather than two tags).
>>> Item.objects.filter(Q(tags=t1) & Q(tags=t2))
[]
>>> Item.objects.filter(Q(tags=t1), Q(creator__name='fred')|Q(tags=t2))
[]
>>> qs = Author.objects.filter(ranking__rank=2, ranking__id=rank1.id)
>>> list(qs)
[<Author: a2>]
>>> qs.query.count_active_tables()
2
>>> qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=rank1.id)
>>> qs.query.count_active_tables()
3
Bug #4464
>>> Item.objects.filter(tags=t1).filter(tags=t2)
[<Item: one>]
>>> Item.objects.filter(tags__in=[t1, t2]).distinct().order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(tags__in=[t1, t2]).filter(tags=t3)
[<Item: two>]
Bug #2080, #3592
>>> Author.objects.filter(item__name='one') | Author.objects.filter(name='a3')
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='one') | Q(name='a3'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(name='a3') | Q(item__name='one'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='three') | Q(report__name='r3'))
[<Author: a2>]
Bug #4289
A slight variation on the above theme: restricting the choices by the lookup
constraints.
>>> Number.objects.filter(num__lt=4)
[]
>>> Number.objects.filter(num__gt=8, num__lt=12)
[]
>>> Number.objects.filter(num__gt=8, num__lt=13)
[<Number: 12>]
>>> Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12))
[]
>>> Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4))
[<Number: 8>]
Bug #6074
Merging two empty result sets shouldn't leave a queryset with no constraints
(which would match everything).
>>> Author.objects.filter(Q(id__in=[]))
[]
>>> Author.objects.filter(Q(id__in=[])|Q(id__in=[]))
[]
Bug #1878, #2939
>>> Item.objects.values('creator').distinct().count()
3
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
>>> xx = Item(name='four', created=time1, creator=a2, note=n1)
>>> xx.save()
>>> Item.objects.exclude(name='two').values('creator', 'name').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count()
4
>>> xx.delete()
Bug #2253
>>> q1 = Item.objects.order_by('name')
>>> q2 = Item.objects.filter(id=i1.id)
>>> q1
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> q2
[<Item: one>]
>>> (q1 | q2).order_by('name')
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> (q1 & q2).order_by('name')
[<Item: one>]
# FIXME: This is difficult to fix and very much an edge case, so punt for now.
# # This is related to the order_by() tests, below, but the old bug exhibited
# # itself here (q2 was pulling too many tables into the combined query with the
# # new ordering, but only because we have evaluated q2 already).
# >>> len((q1 & q2).order_by('name').query.tables)
# 1
>>> q1 = Item.objects.filter(tags=t1)
>>> q2 = Item.objects.filter(note=n3, tags=t2)
>>> q3 = Item.objects.filter(creator=a4)
>>> ((q1 & q2) | q3).order_by('name')
[<Item: four>, <Item: one>]
Bugs #4088, #4306
>>> Report.objects.filter(creator=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__num=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__id=1001)
[]
>>> Report.objects.filter(creator__id=a1.id)
[<Report: r1>]
>>> Report.objects.filter(creator__name='a1')
[<Report: r1>]
Bug #4510
>>> Author.objects.filter(report__name='r1')
[<Author: a1>]
Bug #5324, #6704
>>> Item.objects.filter(tags__name='t4')
[<Item: four>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct()
[<Item: one>, <Item: three>, <Item: two>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse()
[<Item: two>, <Item: three>, <Item: one>]
>>> Author.objects.exclude(item__name='one').distinct().order_by('name')
[<Author: a2>, <Author: a3>, <Author: a4>]
# Excluding across a m2m relation when there is more than one related object
# associated was problematic.
>>> Item.objects.exclude(tags__name='t1').order_by('name')
[<Item: four>, <Item: three>]
>>> Item.objects.exclude(tags__name='t1').exclude(tags__name='t4')
[<Item: three>]
# Excluding from a relation that cannot be NULL should not use outer joins.
>>> query = Item.objects.exclude(creator__in=[a1, a2]).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
Similarly, when one of the joins cannot possibly, ever, involve NULL values (Author -> ExtraInfo, in the following), it should never be promoted to a left outer join. So hte following query should only involve one "left outer" join (Author -> Item is 0-to-many).
>>> qs = Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
>>> len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER])
1
The previous changes shouldn't affect nullable foreign key joins.
>>> Tag.objects.filter(parent__isnull=True).order_by('name')
[<Tag: t1>]
>>> Tag.objects.exclude(parent__isnull=True).order_by('name')
[<Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
Bug #2091
>>> t = Tag.objects.get(name='t4')
>>> Item.objects.filter(tags__in=[t])
[<Item: four>]
Combining querysets built on different models should behave in a well-defined
fashion. We raise an error.
>>> Author.objects.all() & Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
>>> Author.objects.all() | Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
Bug #3141
>>> Author.objects.extra(select={'foo': '1'}).count()
4
>>> Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count()
4
Bug #2400
>>> Author.objects.filter(item__isnull=True)
[<Author: a3>]
>>> Tag.objects.filter(item__isnull=True)
[<Tag: t5>]
Bug #2496
>>> Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1]
[<Item: four>]
Bug #2076
# Ordering on related tables should be possible, even if the table is not
# otherwise involved.
>>> Item.objects.order_by('note__note', 'name')
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# Ordering on a related field should use the remote model's default ordering as
# a final step.
>>> Author.objects.order_by('extra', '-name')
[<Author: a2>, <Author: a1>, <Author: a4>, <Author: a3>]
# Using remote model default ordering can span multiple models (in this case,
# Cover is ordered by Item's default, which uses Note's default).
>>> Cover.objects.all()
[<Cover: first>, <Cover: second>]
# If you're not careful, it's possible to introduce infinite loops via default
# ordering on foreign keys in a cycle. We detect that.
>>> LoopX.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
>>> LoopZ.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
# ... but you can still order in a non-recursive fashion amongst linked fields
# (the previous test failed because the default ordering was recursive).
>>> LoopX.objects.all().order_by('y__x__y__x__id')
[]
# If the remote model does not have a default ordering, we order by its 'id'
# field.
>>> Item.objects.order_by('creator', 'name')
[<Item: one>, <Item: three>, <Item: two>, <Item: four>]
# Cross model ordering is possible in Meta, too.
>>> Ranking.objects.all()
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
>>> Ranking.objects.all().order_by('rank')
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That isn't
# Django's problem. Garbage in, garbage out.
>>> Item.objects.all().order_by('tags', 'id')
[<Item: one>, <Item: two>, <Item: one>, <Item: two>, <Item: four>]
# If we replace the default ordering, Django adjusts the required tables
# automatically. Item normally requires a join with Note to do the default
# ordering, but that isn't needed here.
>>> qs = Item.objects.order_by('name')
>>> qs
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> len(qs.query.tables)
1
# Ordering of extra() pieces is possible, too and you can mix extra fields and
# model fields in the ordering.
>>> Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank'])
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
>>> qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
>>> [o.good for o in qs.extra(order_by=('-good',))] == [True, False, False]
True
>>> qs.extra(order_by=('-good', 'id'))
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
# Despite having some extra aliases in the query, we can still omit them in a
# values() query.
>>> qs.values('id', 'rank').order_by('id')
[{'id': 1, 'rank': 2}, {'id': 2, 'rank': 1}, {'id': 3, 'rank': 3}]
Bugs #2874, #3002
>>> qs = Item.objects.select_related().order_by('note__note', 'name')
>>> list(qs)
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# This is also a good select_related() test because there are multiple Note
# entries in the SQL. The two Note items should be different.
>>> qs[0].note, qs[0].creator.extra.note
(<Note: n2>, <Note: n1>)
Bug #3037
>>> Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four'))
[<Item: four>]
Bug #5321, #7070
Ordering columns must be included in the output columns. Note that this means
results that might otherwise be distinct are not (if there are multiple values
in the ordering cols), as in this example. This isn't a bug; it's a warning to
be careful with the selection of ordering columns.
>>> Note.objects.values('misc').distinct().order_by('note', '-misc')
[{'misc': u'foo'}, {'misc': u'bar'}, {'misc': u'foo'}]
Bug #4358
If you don't pass any fields to values(), relation fields are returned as
"foo_id" keys, not "foo". For consistency, you should be able to pass "foo_id"
in the fields list and have it work, too. We actually allow both "foo" and
"foo_id".
# The *_id version is returned by default.
>>> 'note_id' in ExtraInfo.objects.values()[0]
True
# You can also pass it in explicitly.
>>> ExtraInfo.objects.values('note_id')
[{'note_id': 1}, {'note_id': 2}]
# ...or use the field name.
>>> ExtraInfo.objects.values('note')
[{'note': 1}, {'note': 2}]
Bug #5261
>>> Note.objects.exclude(Q())
[<Note: n1>, <Note: n2>, <Note: n3>]
Bug #3045, #3288
Once upon a time, select_related() with circular relations would loop
infinitely if you forgot to specify "depth". Now we set an arbitrary default
upper bound.
>>> X.objects.all()
[]
>>> X.objects.select_related()
[]
Bug #3739
The all() method on querysets returns a copy of the queryset.
>>> q1 = Item.objects.order_by('name')
>>> id(q1) == id(q1.all())
False
Bug #2902
Parameters can be given to extra_select, *if* you use a SortedDict.
(First we need to know which order the keys fall in "naturally" on your system,
so we can put things in the wrong way around from normal. A normal dict would
thus fail.)
>>> from django.utils.datastructures import SortedDict
>>> s = [('a', '%s'), ('b', '%s')]
>>> params = ['one', 'two']
>>> if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
... s.reverse()
... params.reverse()
# This slightly odd comparison works aorund the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
>>> d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
>>> d == {'a': u'one', 'b': u'two'}
True
# Order by the number of tags attached to an item.
>>> l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
>>> [o.count for o in l]
[2, 2, 1, 0]
Bug #6154
Multiple filter statements are joined using "AND" all the time.
>>> Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
[<Author: a1>]
>>> Author.objects.filter(Q(extra__note=n1)|Q(item__note=n3)).filter(id=a1.id)
[<Author: a1>]
Bug #6981
>>> Tag.objects.select_related('parent').order_by('name')
[<Tag: t1>, <Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
Bug #6180, #6203 -- dates with limits and/or counts
>>> Item.objects.count()
4
>>> Item.objects.dates('created', 'month').count()
1
>>> Item.objects.dates('created', 'day').count()
2
>>> len(Item.objects.dates('created', 'day'))
2
>>> Item.objects.dates('created', 'day')[0]
datetime.datetime(2007, 12, 19, 0, 0)
Bug #7087 -- dates with extra select columns
>>> Item.objects.dates('created', 'day').extra(select={'a': 1})
[datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0)]
Test that parallel iterators work.
>>> qs = Tag.objects.all()
>>> i1, i2 = iter(qs), iter(qs)
>>> i1.next(), i1.next()
(<Tag: t1>, <Tag: t2>)
>>> i2.next(), i2.next(), i2.next()
(<Tag: t1>, <Tag: t2>, <Tag: t3>)
>>> i1.next()
<Tag: t3>
>>> qs = X.objects.all()
>>> bool(qs)
False
>>> bool(qs)
False
We can do slicing beyond what is currently in the result cache, too.
## FIXME!! This next test causes really weird PostgreSQL behaviour, but it's
## only apparent much later when the full test suite runs. I don't understand
## what's going on here yet.
##
## # We need to mess with the implemenation internals a bit here to decrease the
## # cache fill size so that we don't read all the results at once.
## >>> from django.db.models import query
## >>> query.ITER_CHUNK_SIZE = 2
## >>> qs = Tag.objects.all()
##
## # Fill the cache with the first chunk.
## >>> bool(qs)
## True
## >>> len(qs._result_cache)
## 2
##
## # Query beyond the end of the cache and check that it is filled out as required.
## >>> qs[4]
## <Tag: t5>
## >>> len(qs._result_cache)
## 5
##
## # But querying beyond the end of the result set will fail.
## >>> qs[100]
## Traceback (most recent call last):
## ...
## IndexError: ...
Bug #7045 -- extra tables used to crash SQL construction on the second use.
>>> qs = Ranking.objects.extra(tables=['django_site'])
>>> s = qs.query.as_sql()
>>> s = qs.query.as_sql() # test passes if this doesn't raise an exception.
Bug #7098 -- Make sure semi-deprecated ordering by related models syntax still
works.
>>> Item.objects.values('note__note').order_by('queries_note.note', 'id')
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
Bug #7096 -- Make sure exclude() with multiple conditions continues to work.
>>> Tag.objects.filter(parent=t1, name='t3').order_by('name')
[<Tag: t3>]
>>> Tag.objects.exclude(parent=t1, name='t3').order_by('name')
[<Tag: t1>, <Tag: t2>, <Tag: t4>, <Tag: t5>]
>>> Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct()
[<Item: four>, <Item: three>, <Item: two>]
>>> Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name')
[<Item: four>, <Item: three>]
More twisted cases, involving nested negations.
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'))
[<Item: one>]
>>> Item.objects.filter(~Q(tags__name='t1', name='one'), name='two')
[<Item: two>]
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two')
[<Item: four>, <Item: one>, <Item: three>]
Bug #7095
Updates that are filtered on the model being updated are somewhat tricky to get
in MySQL. This exercises that case.
>>> mm = ManagedModel.objects.create(data='mm1', tag=t1, public=True)
>>> ManagedModel.objects.update(data='mm')
"""}
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A batched replay buffer of nests of Tensors which can be sampled uniformly.
- Each add assumes tensors have batch_size as first dimension, and will store
each element of the batch in an offset segment, so that each batch dimension has
its own contiguous memory. Within batch segments, behaves as a circular buffer.
The get_next function returns 'ids' in addition to the data. This is not really
needed for the batched replay buffer, but is returned to be consistent with
the API for a priority replay buffer, which needs the ids to update priorities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import tensorflow as tf
from tf_agents.replay_buffers import replay_buffer
from tf_agents.replay_buffers import table
from tf_agents.specs import tensor_spec
from tf_agents.utils import common
from tf_agents.utils import nest_utils
BufferInfo = collections.namedtuple('BufferInfo',
['ids', 'probabilities'])
@gin.configurable
class TFUniformReplayBuffer(replay_buffer.ReplayBuffer):
"""A TFUniformReplayBuffer with batched adds and uniform sampling."""
def __init__(self,
data_spec,
batch_size,
max_length=1000,
scope='TFUniformReplayBuffer',
device='cpu:*',
table_fn=table.Table,
dataset_drop_remainder=False,
dataset_window_shift=None,
stateful_dataset=False):
"""Creates a TFUniformReplayBuffer.
The TFUniformReplayBuffer stores episodes in `B == batch_size` blocks of
size `L == max_length`, with total frame capacity
`C == L * B`. Storage looks like:
```
block1 ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
block2 ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
...
blockB ep1 frame1
frame2
...
ep2 frame1
frame2
...
<L frames total>
```
Multiple episodes may be stored within a given block, up to `max_length`
frames total. In practice, new episodes will overwrite old ones as the
block rolls over its `max_length`.
Args:
data_spec: A TensorSpec or a list/tuple/nest of TensorSpecs describing a
single item that can be stored in this buffer.
batch_size: Batch dimension of tensors when adding to buffer.
max_length: The maximum number of items that can be stored in a single
batch segment of the buffer.
scope: Scope prefix for variables and ops created by this class.
device: A TensorFlow device to place the Variables and ops.
table_fn: Function to create tables `table_fn(data_spec, capacity)` that
can read/write nested tensors.
dataset_drop_remainder: If `True`, then when calling
`as_dataset` with arguments `single_deterministic_pass=True` and
`sample_batch_size is not None`, the final batch will be dropped if it
does not contain exactly `sample_batch_size` items. This is helpful for
static shape inference as the resulting tensors will always have
leading dimension `sample_batch_size` instead of `None`.
dataset_window_shift: Window shift used when calling `as_dataset` with
arguments `single_deterministic_pass=True` and `num_steps is not None`.
This determines how the resulting frames are windowed. If `None`, then
there is no overlap created between frames and each frame is seen
exactly once. For example, if `max_length=5`, `num_steps=2`,
`sample_batch_size=None`, and `dataset_window_shift=None`, then the
datasets returned will have frames `{[0, 1], [2, 3], [4]}`.
If `dataset_window_shift is not None`, then windows are created with a
window overlap of `dataset_window_shift` and you will see each frame up
to `num_steps` times. For example, if `max_length=5`, `num_steps=2`,
`sample_batch_size=None`, and `dataset_window_shift=1`, then the
datasets returned will have windows of shifted repeated frames:
`{[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]}`.
For more details, see the documentation of `tf.data.Dataset.window`,
specifically for the `shift` argument.
The default behavior is to not overlap frames
(`dataset_window_shift=None`) but users often want to see all
combinations of frame sequences, in which case `dataset_window_shift=1`
is the appropriate value.
stateful_dataset: whether the dataset contains stateful ops or not.
"""
self._batch_size = batch_size
self._max_length = max_length
capacity = self._batch_size * self._max_length
super(TFUniformReplayBuffer, self).__init__(
data_spec, capacity, stateful_dataset)
self._id_spec = tensor_spec.TensorSpec([], dtype=tf.int64, name='id')
self._capacity_value = np.int64(self._capacity)
self._batch_offsets = (
tf.range(self._batch_size, dtype=tf.int64) * self._max_length)
self._scope = scope
self._device = device
self._table_fn = table_fn
self._dataset_drop_remainder = dataset_drop_remainder
self._dataset_window_shift = dataset_window_shift
with tf.device(self._device), tf.compat.v1.variable_scope(self._scope):
self._capacity = tf.constant(capacity, dtype=tf.int64)
self._data_table = table_fn(self._data_spec, self._capacity_value)
self._id_table = table_fn(self._id_spec, self._capacity_value)
self._last_id = common.create_variable('last_id', -1)
self._last_id_cs = tf.CriticalSection(name='last_id')
def variables(self):
return (self._data_table.variables() +
self._id_table.variables() +
[self._last_id])
@property
def device(self):
return self._device
@property
def table_fn(self):
return self._table_fn
@property
def scope(self):
return self._scope
# Methods defined in ReplayBuffer base class
def _num_frames(self):
num_items_single_batch_segment = self._get_last_id() + 1
total_frames = num_items_single_batch_segment * self._batch_size
return tf.minimum(total_frames, self._capacity)
def _add_batch(self, items):
"""Adds a batch of items to the replay buffer.
Args:
items: A tensor or list/tuple/nest of tensors representing a batch of
items to be added to the replay buffer. Each element of `items` must match
the data_spec of this class. Should be shape [batch_size, data_spec, ...]
Returns:
An op that adds `items` to the replay buffer.
Raises:
ValueError: If called more than once.
"""
nest_utils.assert_same_structure(items, self._data_spec)
# Calling get_outer_rank here will validate that all items have the same
# outer rank. This was not usually an issue, but now that it's easier to
# call this from an eager context it's easy to make the mistake.
nest_utils.get_outer_rank(
tf.nest.map_structure(tf.convert_to_tensor, items),
self._data_spec)
with tf.device(self._device), tf.name_scope(self._scope):
id_ = self._increment_last_id()
write_rows = self._get_rows_for_id(id_)
write_id_op = self._id_table.write(write_rows, id_)
write_data_op = self._data_table.write(write_rows, items)
return tf.group(write_id_op, write_data_op)
def _get_next(self,
sample_batch_size=None,
num_steps=None,
time_stacked=True):
"""Returns an item or batch of items sampled uniformly from the buffer.
Sample transitions uniformly from replay buffer. When sub-episodes are
desired, specify num_steps, although note that for the returned items to
truly be sub-episodes also requires that experience collection be
single-threaded.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See get_next() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See get_next() documentation.
time_stacked: Bool, when true and num_steps > 1 get_next on the buffer
would return the items stack on the time dimension. The outputs would be
[B, T, ..] if sample_batch_size is given or [T, ..] otherwise.
Returns:
A 2 tuple, containing:
- An item, sequence of items, or batch thereof sampled uniformly
from the buffer.
- BufferInfo NamedTuple, containing:
- The items' ids.
- The sampling probability of each item.
Raises:
ValueError: if num_steps is bigger than the capacity.
"""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('get_next'):
min_val, max_val = _valid_range_ids(
self._get_last_id(), self._max_length, num_steps)
rows_shape = () if sample_batch_size is None else (sample_batch_size,)
assert_nonempty = tf.compat.v1.assert_greater(
max_val,
min_val,
message='TFUniformReplayBuffer is empty. Make sure to add items '
'before sampling the buffer.')
with tf.control_dependencies([assert_nonempty]):
num_ids = max_val - min_val
probability = tf.cond(
pred=tf.equal(num_ids, 0),
true_fn=lambda: 0.,
false_fn=lambda: 1. / tf.cast(num_ids * self._batch_size, # pylint: disable=g-long-lambda
tf.float32))
ids = tf.random.uniform(
rows_shape, minval=min_val, maxval=max_val, dtype=tf.int64)
# Move each id sample to a random batch.
batch_offsets = tf.random.uniform(
rows_shape, minval=0, maxval=self._batch_size, dtype=tf.int64)
batch_offsets *= self._max_length
if num_steps is None:
rows_to_get = tf.math.mod(ids, self._max_length) + batch_offsets
data = self._data_table.read(rows_to_get)
data_ids = self._id_table.read(rows_to_get)
else:
if time_stacked:
step_range = tf.range(num_steps, dtype=tf.int64)
if sample_batch_size:
step_range = tf.reshape(step_range, [1, num_steps])
step_range = tf.tile(step_range, [sample_batch_size, 1])
ids = tf.tile(tf.expand_dims(ids, -1), [1, num_steps])
batch_offsets = batch_offsets[:, None]
else:
step_range = tf.reshape(step_range, [num_steps])
rows_to_get = tf.math.mod(step_range + ids,
self._max_length) + batch_offsets
data = self._data_table.read(rows_to_get)
data_ids = self._id_table.read(rows_to_get)
else:
data = []
data_ids = []
for step in range(num_steps):
steps_to_get = tf.math.mod(ids + step,
self._max_length) + batch_offsets
items = self._data_table.read(steps_to_get)
data.append(items)
data_ids.append(self._id_table.read(steps_to_get))
data = tuple(data)
data_ids = tuple(data_ids)
probabilities = tf.fill(rows_shape, probability)
buffer_info = BufferInfo(ids=data_ids,
probabilities=probabilities)
return data, buffer_info
@gin.configurable(
'tf_agents.tf_uniform_replay_buffer.TFUniformReplayBuffer.as_dataset')
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None,
single_deterministic_pass=False):
return super(TFUniformReplayBuffer, self).as_dataset(
sample_batch_size, num_steps, num_parallel_calls,
single_deterministic_pass=single_deterministic_pass)
def _as_dataset(self,
sample_batch_size=None,
num_steps=None,
sequence_preprocess_fn=None,
num_parallel_calls=None):
"""Creates a dataset that returns entries from the buffer in shuffled order.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See as_dataset() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See as_dataset() documentation.
sequence_preprocess_fn: (Optional.) Preprocessing function for sequences
before they are sharded into subsequences of length `num_steps` and
batched.
num_parallel_calls: (Optional.) Number elements to process in parallel.
See as_dataset() documentation.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
NotImplementedError: If `sequence_preprocess_fn != None` is passed in.
"""
if sequence_preprocess_fn is not None:
raise NotImplementedError('sequence_preprocess_fn is not supported.')
def get_next(_):
return self.get_next(sample_batch_size, num_steps, time_stacked=True)
dataset = tf.data.experimental.Counter().map(
get_next, num_parallel_calls=num_parallel_calls)
return dataset
def _single_deterministic_pass_dataset(self,
sample_batch_size=None,
num_steps=None,
sequence_preprocess_fn=None,
num_parallel_calls=None):
"""Creates a dataset that returns entries from the buffer in fixed order.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See as_dataset() documentation.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. See as_dataset() documentation.
sequence_preprocess_fn: (Optional.) Preprocessing function for sequences
before they are sharded into subsequences of length `num_steps` and
batched.
num_parallel_calls: (Optional.) Number elements to process in parallel.
See as_dataset() documentation.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
ValueError: If `dataset_drop_remainder` is set, and
`sample_batch_size > self.batch_size`. In this case all data will
be dropped.
NotImplementedError: If `sequence_preprocess_fn != None` is passed in.
"""
if sequence_preprocess_fn is not None:
raise NotImplementedError('sequence_preprocess_fn is not supported.')
static_size = tf.get_static_value(sample_batch_size)
static_num_steps = tf.get_static_value(num_steps)
static_self_batch_size = tf.get_static_value(self._batch_size)
static_self_max_length = tf.get_static_value(self._max_length)
if (self._dataset_drop_remainder
and static_size is not None
and static_self_batch_size is not None
and static_size > static_self_batch_size):
raise ValueError(
'sample_batch_size ({}) > self.batch_size ({}) and '
'dataset_drop_remainder is True. In '
'this case, ALL data will be dropped by the deterministic dataset.'
.format(static_size, static_self_batch_size))
if (self._dataset_drop_remainder
and static_num_steps is not None
and static_self_max_length is not None
and static_num_steps > static_self_max_length):
raise ValueError(
'num_steps_size ({}) > self.max_length ({}) and '
'dataset_drop_remainder is True. In '
'this case, ALL data will be dropped by the deterministic dataset.'
.format(static_num_steps, static_self_max_length))
def get_row_ids(_):
"""Passed to Dataset.range(self._batch_size).flat_map(.), gets row ids."""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('single_deterministic_pass_dataset'):
# Here we pass num_steps=None because _valid_range_ids uses
# num_steps to determine a hard stop when sampling num_steps starting
# from the returned indices. But in our case, we want all the indices
# and we'll use TF dataset's window() mechanism to get
# num_steps-length blocks. The window mechanism handles this stuff
# for us.
min_frame_offset, max_frame_offset = _valid_range_ids(
self._get_last_id(), self._max_length, num_steps=None)
tf.compat.v1.assert_less(
min_frame_offset,
max_frame_offset,
message='TFUniformReplayBuffer is empty. Make sure to add items '
'before asking the buffer for data.')
min_max_frame_range = tf.range(min_frame_offset, max_frame_offset)
window_shift = self._dataset_window_shift
def group_windows(ds_, drop_remainder=self._dataset_drop_remainder):
return ds_.batch(num_steps, drop_remainder=drop_remainder)
if sample_batch_size is None:
def row_ids(b):
# Create a vector of shape [num_frames] and slice it along each
# frame.
ids = tf.data.Dataset.from_tensor_slices(
b * self._max_length + min_max_frame_range)
if num_steps is not None:
ids = (ids.window(num_steps, shift=window_shift)
.flat_map(group_windows))
return ids
return tf.data.Dataset.range(self._batch_size).flat_map(row_ids)
else:
def batched_row_ids(batch):
# Create a matrix of indices shaped [num_frames, batch_size]
# and slice it along each frame row to get groups of batches
# for frame 0, frame 1, ...
return tf.data.Dataset.from_tensor_slices(
(min_max_frame_range[:, tf.newaxis]
+ batch * self._max_length))
indices_ds = (
tf.data.Dataset.range(self._batch_size)
.batch(sample_batch_size,
drop_remainder=self._dataset_drop_remainder)
.flat_map(batched_row_ids))
if num_steps is not None:
# We have sequences of num_frames rows shaped [sample_batch_size].
# Window and group these to rows of shape
# [num_steps, sample_batch_size], then
# transpose them to get index tensors of shape
# [sample_batch_size, num_steps].
def group_windows_drop_remainder(d):
return group_windows(d, drop_remainder=True)
indices_ds = (indices_ds.window(num_steps, shift=window_shift)
.flat_map(group_windows_drop_remainder)
.map(tf.transpose))
return indices_ds
# Get our indices as a dataset; each time we reinitialize the iterator we
# update our min/max id bounds from the state of the replay buffer.
ds = tf.data.Dataset.range(1).flat_map(get_row_ids)
def get_data(id_):
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('single_deterministic_pass_dataset'):
data = self._data_table.read(id_ % self._capacity)
buffer_info = BufferInfo(ids=id_, probabilities=())
return (data, buffer_info)
# Deterministic even though num_parallel_calls > 1. Operations are
# run in parallel but then the results are returned in original stream
# order.
ds = ds.map(get_data, num_parallel_calls=num_parallel_calls)
return ds
def _gather_all(self):
"""Returns all the items in buffer, shape [batch_size, timestep, ...].
Returns:
All the items currently in the buffer.
"""
with tf.device(self._device), tf.name_scope(self._scope):
with tf.name_scope('gather_all'):
# Make ids, repeated over batch_size. Shape [batch_size, num_ids, ...].
min_val, max_val = _valid_range_ids(
self._get_last_id(), self._max_length)
ids = tf.range(min_val, max_val)
ids = tf.stack([ids] * self._batch_size)
rows = tf.math.mod(ids, self._max_length)
# Make batch_offsets, shape [batch_size, 1], then add to rows.
batch_offsets = tf.expand_dims(
tf.range(self._batch_size, dtype=tf.int64) * self._max_length,
1)
rows += batch_offsets
# Expected shape is [batch_size, max_length, ...].
data = self._data_table.read(rows)
return data
def _clear(self, clear_all_variables=False):
"""Return op that resets the contents of replay buffer.
Args:
clear_all_variables: boolean indicating if all variables should be
cleared. By default, table contents will be unlinked from
replay buffer, but values are unmodified for efficiency. Set
`clear_all_variables=True` to reset all variables including Table
contents.
Returns:
op that clears or unlinks the replay buffer contents.
"""
table_vars = self._data_table.variables() + self._id_table.variables()
def _init_vars():
assignments = [self._last_id.assign(-1)]
if clear_all_variables:
assignments += [v.assign(tf.zeros_like(v)) for v in table_vars]
return tf.group(*assignments, name='clear')
return self._last_id_cs.execute(_init_vars)
# Helper functions.
def _increment_last_id(self, increment=1):
"""Increments the last_id in a thread safe manner.
Args:
increment: amount to increment last_id by.
Returns:
An op that increments the last_id.
"""
def _assign_add():
return self._last_id.assign_add(increment).value()
return self._last_id_cs.execute(_assign_add)
def _get_last_id(self):
def last_id():
return self._last_id.value()
return self._last_id_cs.execute(last_id)
def _get_rows_for_id(self, id_):
"""Make a batch_size length list of tensors, with row ids for write."""
id_mod = tf.math.mod(id_, self._max_length)
rows = self._batch_offsets + id_mod
return rows
def _valid_range_ids(last_id, max_length, num_steps=None):
"""Returns the [min_val, max_val) range of ids.
When num_steps is provided, [min_val, max_val+num_steps) are also valid ids.
Args:
last_id: The last id added to the buffer.
max_length: The max length of each batch segment in the buffer.
num_steps: Optional way to specify that how many ids need to be valid.
Returns:
A tuple (min_id, max_id) for the range [min_id, max_id) of valid ids.
"""
if num_steps is None:
num_steps = tf.constant(1, tf.int64)
min_id_not_full = tf.constant(0, dtype=tf.int64)
max_id_not_full = tf.maximum(last_id + 1 - num_steps + 1, 0)
min_id_full = last_id + 1 - max_length
max_id_full = last_id + 1 - num_steps + 1
return (tf.where(last_id < max_length, min_id_not_full, min_id_full),
tf.where(last_id < max_length, max_id_not_full, max_id_full))
|
|
# coding: utf-8
"""
This module defines the Node class that is inherited by Task, Work and Flow objects.
"""
from __future__ import division, print_function, unicode_literals
import sys
import os
import time
import collections
import abc
import six
import numpy as np
from pprint import pprint
from atomicfile import AtomicFile
from pydispatch import dispatcher
from monty.termcolor import colored
from monty.serialization import loadfn
from monty.string import is_string
from monty.io import FileLock
from monty.collections import AttrDict, Namespace
from monty.functools import lazy_property
from monty.json import MSONable
from pymatgen.serializers.json_coders import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_extensions
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
def _2attrs(item):
return item if item is None or isinstance(list, tuple) else (item,)
class Status(int):
"""This object is an integer representing the status of the `Node`."""
# Possible status of the node. See monty.termocolor for the meaning of color, on_color and attrs.
_STATUS_INFO = [
#(value, name, color, on_color, attrs)
(1, "Initialized", None , None, None), # Node has been initialized
(2, "Locked", None , None, None), # Task is locked an must be explicitly unlocked by an external subject (Work).
(3, "Ready", None , None, None), # Node is ready i.e. all the depencies of the node have status S_OK
(4, "Submitted", "blue" , None, None), # Node has been submitted (The `Task` is running or we have started to finalize the Work)
(5, "Running", "magenta", None, None), # Node is running.
(6, "Done", None , None, None), # Node done, This does not imply that results are ok or that the calculation completed successfully
(7, "AbiCritical", "red" , None, None), # Node raised an Error by ABINIT.
(8, "QCritical", "red" , "on_white", None), # Node raised an Error by submitting submission script, or by executing it
(9, "Unconverged", "red" , "on_yellow", None), # This usually means that an iterative algorithm didn't converge.
(10, "Error", "red" , None, None), # Node raised an unrecoverable error, usually raised when an attempt to fix one of other types failed.
(11, "Completed", "green" , None, None), # Execution completed successfully.
]
_STATUS2STR = collections.OrderedDict([(t[0], t[1]) for t in _STATUS_INFO])
_STATUS2COLOR_OPTS = collections.OrderedDict([(t[0], {"color": t[2], "on_color": t[3], "attrs": _2attrs(t[4])}) for t in _STATUS_INFO])
def __repr__(self):
return "<%s: %s, at %s>" % (self.__class__.__name__, str(self), id(self))
def __str__(self):
"""String representation."""
return self._STATUS2STR[self]
@classmethod
def as_status(cls, obj):
"""Convert obj into Status."""
if obj is None: return None
return obj if isinstance(obj, cls) else cls.from_string(obj)
@classmethod
def from_string(cls, s):
"""Return a `Status` instance from its string representation."""
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s)
@classmethod
def all_status_strings(cls):
"""List of strings with all possible values status."""
return [info[1] for info in cls._STATUS_INFO]
@property
def is_critical(self):
"""True if status is critical."""
return str(self) in ("AbiCritical", "QCritical", "Uncoverged", "Error")
@property
def color_opts(self):
return self._STATUS2COLOR_OPTS[self]
@property
def colored(self):
"""Return colorized text used to print the status if the stream supports it."""
return colored(str(self), **self.color_opts)
class Dependency(object):
"""
This object describes the dependencies among the nodes of a calculation.
A `Dependency` consists of a `Node` that produces a list of products (files)
that are used by the other nodes (`Task` or `Work`) to start the calculation.
One usually creates the object by calling work.register
Example:
# Register the SCF task in work.
scf_task = work.register(scf_strategy)
# Register the NSCF calculation and its dependency on the SCF run via deps.
nscf_task = work.register(nscf_strategy, deps={scf_task: "DEN"})
"""
def __init__(self, node, exts=None):
"""
Args:
node: The task or the worfklow associated to the dependency or string with a filepath.
exts: Extensions of the output files that are needed for running the other tasks.
"""
self._node = Node.as_node(node)
if exts and is_string(exts): exts = exts.split()
# Extract extensions.
self.exts = [e for e in exts if not e.startswith("@")]
# Save getters
self.getters = [e for e in exts if e.startswith("@")]
#if self.getters: print(self.getters)
def __hash__(self):
return hash(self._node)
def __repr__(self):
return "node %s will produce: %s " % (repr(self.node), repr(self.exts))
def __str__(self):
return "node %s will produce: %s " % (str(self.node), str(self.exts))
@property
def info(self):
return str(self.node)
@property
def node(self):
"""The :class:`Node` associated to the dependency."""
return self._node
@property
def status(self):
"""The status of the dependency, i.e. the status of the :class:`Node`."""
return self.node.status
@lazy_property
def products(self):
"""List of output files produces by self."""
_products = []
for ext in self.exts:
prod = Product(ext, self.node.opath_from_ext(ext))
_products.append(prod)
return _products
def apply_getters(self, task):
"""
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
"""
if not self.getters: return
for getter in self.getters:
if getter == "@structure":
task.history.info("Getting structure from %s" % self.node)
new_structure = self.node.get_final_structure()
task._change_structure(new_structure)
else:
raise ValueError("Wrong getter %s" % getter)
def connecting_vars(self):
"""
Returns a dictionary with the variables that must be added to the
input file in order to connect this :class:`Node` to its dependencies.
"""
vars = {}
for prod in self.products:
vars.update(prod.connecting_vars())
return vars
def get_filepaths_and_exts(self):
"""Returns the paths of the output files produced by self and its extensions"""
filepaths = [prod.filepath for prod in self.products]
exts = [prod.ext for prod in self.products]
return filepaths, exts
class Product(object):
"""
A product represents an output file produced by ABINIT instance.
This file is needed to start another `Task` or another `Work`.
"""
def __init__(self, ext, path):
"""
Args:
ext: ABINIT file extension
path: (asbolute) filepath
"""
if ext not in abi_extensions():
raise ValueError("Extension %s has not been registered in the internal database" % str(ext))
self.ext = ext
self.file = File(path)
@classmethod
def from_file(cls, filepath):
"""Build a :class:`Product` instance from a filepath."""
# Find the abinit extension.
for i in range(len(filepath)):
if filepath[i:] in abi_extensions():
ext = filepath[i:]
break
else:
raise ValueError("Cannot detect abinit extension in %s" % filepath)
return cls(ext, filepath)
def __str__(self):
return "File=%s, Extension=%s, " % (self.file.path, self.ext)
@property
def filepath(self):
"""Absolute path of the file."""
return self.file.path
def connecting_vars(self):
"""
Returns a dictionary with the ABINIT variables that
must be used to make the code use this file.
"""
return irdvars_for_ext(self.ext)
class GridFsFile(AttrDict):
"""Information on a file that will stored in the MongoDb gridfs collection."""
def __init__(self, path, fs_id=None, mode="b"):
super(GridFsFile, self).__init__(path=path, fs_id=fs_id, mode=mode)
class NodeResults(dict, MSONable):
"""Dictionary used to store the most important results produced by a :class:`Node`."""
JSON_SCHEMA = {
"type": "object",
"properties": {
"node_id": {"type": "integer", "required": True},
"node_finalized": {"type": "boolean", "required": True},
"node_history": {"type": "array", "required": True},
"node_class": {"type": "string", "required": True},
"node_name": {"type": "string", "required": True},
"node_status": {"type": "string", "required": True},
"in": {"type": "object", "required": True, "description": "dictionary with input parameters"},
"out": {"type": "object", "required": True, "description": "dictionary with the output results"},
"exceptions": {"type": "array", "required": True},
"files": {"type": "object", "required": True},
},
}
@classmethod
def from_node(cls, node):
"""Initialize an instance of `NodeResults` from a `Node` subclass."""
kwargs = dict(
node_id=node.node_id,
node_finalized=node.finalized,
node_history=list(node.history),
node_name=node.name,
node_class=node.__class__.__name__,
node_status=str(node.status),
)
return node.Results(node, **kwargs)
def __init__(self, node, **kwargs):
super(NodeResults, self).__init__(**kwargs)
self.node = node
if "in" not in self: self["in"] = Namespace()
if "out" not in self: self["out"] = Namespace()
if "exceptions" not in self: self["exceptions"] = []
if "files" not in self: self["files"] = Namespace()
@property
def exceptions(self):
return self["exceptions"]
@property
def gridfs_files(self):
"""List with the absolute paths of the files to be put in GridFs."""
return self["files"]
def register_gridfs_files(self, **kwargs):
"""
This function registers the files that will be saved in GridFS.
kwargs is a dictionary mapping the key associated to the file (usually the extension)
to the absolute path. By default, files are assumed to be in binary form, for formatted files
one should pass a tuple ("filepath", "t").
Example::
results.register_gridfs(GSR="path/to/GSR.nc", text_file=("/path/to/txt_file", "t"))
The GSR file is a binary file, whereas text_file is a text file.
"""
d = {}
for k, v in kwargs.items():
mode = "b"
if isinstance(v, (list, tuple)): v, mode = v
d[k] = GridFsFile(path=v, mode=mode)
self["files"].update(d)
return self
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self["exceptions"] += [newstr,]
@pmg_serialize
def as_dict(self):
return self.copy()
@classmethod
def from_dict(cls, d):
return cls({k: v for k, v in d.items() if k not in ("@module", "@class")})
def json_dump(self, filename):
json_pretty_dump(self.as_dict(), filename)
@classmethod
def json_load(cls, filename):
return cls.from_dict(loadfn(filename))
def validate_json_schema(self):
import validictory
d = self.as_dict()
try:
validictory.validate(d, self.JSON_SCHEMA)
return True
except ValueError as exc:
pprint(d)
print(exc)
return False
def update_collection(self, collection):
"""
Update a mongodb collection.
"""
node = self.node
flow = node if node.is_flow else node.flow
# Build the key used to store the entry in the document.
key = node.name
if node.is_task:
key = "w" + str(node.pos[0]) + "_t" + str(node.pos[1])
elif node.is_work:
key = "w" + str(node.pos)
db = collection.database
# Save files with GridFs first in order to get the ID.
if self.gridfs_files:
import gridfs
fs = gridfs.GridFS(db)
for ext, gridfile in self.gridfs_files.items():
logger.info("gridfs: about to put file:", str(gridfile))
# Here we set gridfile.fs_id that will be stored in the mondodb document
try:
with open(gridfile.path, "r" + gridfile.mode) as f:
gridfile.fs_id = fs.put(f, filename=gridfile.path)
except IOError as exc:
logger.critical(str(exc))
if flow.mongo_id is None:
# Flow does not have a mongo_id, allocate doc for the flow and save its id.
flow.mongo_id = collection.insert({})
print("Creating flow.mongo_id", flow.mongo_id, type(flow.mongo_id))
# Get the document from flow.mongo_id and update it.
doc = collection.find_one({"_id": flow.mongo_id})
if key in doc:
raise ValueError("%s is already in doc!" % key)
doc[key] = self.as_dict()
collection.save(doc)
#collection.update({'_id':mongo_id}, {"$set": doc}, upsert=False)
def check_spectator(node_method):
"""
Decorator for :class:`Node` methods. Raise `SpectatorNodeError`.
"""
from functools import wraps
@wraps(node_method)
def wrapper(*args, **kwargs):
node = args[0]
if node.in_spectator_mode:
#raise node.SpectatorError("You should not call this method when the node in spectator_mode")
#warnings.warn("You should not call %s when the node in spectator_mode" % node_method)
import warnings
return node_method(*args, **kwargs)
return wrapper
class NodeError(Exception):
"""Base Exception raised by :class:`Node` subclasses"""
class SpectatorNodeError(NodeError):
"""
Exception raised by :class:`Node` methods when the node is in spectator mode
and we are calling a method with side effects.
"""
class Node(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class defining the interface that must be
implemented by the nodes of the calculation.
Nodes are hashable and can be tested for equality
"""
Results = NodeResults
Error = NodeError
SpectatorError = SpectatorNodeError
# Possible status of the node.
S_INIT = Status.from_string("Initialized")
S_LOCKED = Status.from_string("Locked")
S_READY = Status.from_string("Ready")
S_SUB = Status.from_string("Submitted")
S_RUN = Status.from_string("Running")
S_DONE = Status.from_string("Done")
S_ABICRITICAL = Status.from_string("AbiCritical")
S_QCRITICAL = Status.from_string("QCritical")
S_UNCONVERGED = Status.from_string("Unconverged")
S_ERROR = Status.from_string("Error")
S_OK = Status.from_string("Completed")
ALL_STATUS = [
S_INIT,
S_LOCKED,
S_READY,
S_SUB,
S_RUN,
S_DONE,
S_ABICRITICAL,
S_QCRITICAL,
S_UNCONVERGED,
S_ERROR,
S_OK,
]
# Color used to plot the network in networkx
color_rgb = (0, 0, 0)
def __init__(self):
self._in_spectator_mode = False
# Node identifier.
self._node_id = get_newnode_id()
# List of dependencies
self._deps = []
# List of files (products) needed by this node.
self._required_files = []
# Used to push additional info during the execution.
self.history = NodeHistory(maxlen=80)
# Actions performed to fix abicritical events.
self._corrections = NodeCorrections()
# Set to true if the node has been finalized.
self._finalized = False
self._status = self.S_INIT
def __eq__(self, other):
if not isinstance(other, Node): return False
return self.node_id == other.node_id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.node_id)
def __repr__(self):
try:
return "<%s, node_id=%s, workdir=%s>" % (
self.__class__.__name__, self.node_id, self.relworkdir)
except AttributeError:
# this usually happens when workdir has not been initialized
return "<%s, node_id=%s, workdir=None>" % (self.__class__.__name__, self.node_id)
#def __setattr__(self, name, value):
# if self.in_spectator_mode:
# raise RuntimeError("You should not call __setattr__ in spectator_mode")
# return super(Node, self).__setattr__(name,value)
@classmethod
def as_node(cls, obj):
"""
Convert obj into a Node instance.
Return:
obj if obj is a Node instance,
cast obj to :class:`FileNode` instance of obj is a string.
None if obj is None
"""
if isinstance(obj, cls):
return obj
elif is_string(obj):
# Assume filepath.
return FileNode(obj)
elif obj is None:
return obj
else:
raise TypeError("Don't know how to convert %s to Node instance." % obj)
@property
def name(self):
"""
The name of the node
(only used for facilitating its identification in the user interface).
"""
try:
return self._name
except AttributeError:
if self.is_task:
try:
return self.pos_str
except:
return os.path.basename(self.workdir)
else:
return os.path.basename(self.workdir)
@property
def relworkdir(self):
"""Return a relative version of the workdir"""
try:
return os.path.relpath(self.workdir)
except OSError:
# current working directory may not be defined!
return self.workdir
def set_name(self, name):
"""Set the name of the Node."""
self._name = name
@property
def node_id(self):
"""Node identifier."""
return self._node_id
@check_spectator
def set_node_id(self, node_id):
"""Set the node identifier. Use it carefully!"""
self._node_id = node_id
@property
def finalized(self):
"""True if the `Node` has been finalized."""
return self._finalized
@finalized.setter
def finalized(self, boolean):
self._finalized = boolean
self.history.info("Status set to finalized")
@property
def in_spectator_mode(self):
return self._in_spectator_mode
@in_spectator_mode.setter
def in_spectator_mode(self, mode):
self._in_spectator_mode = bool(mode)
#self.history.info("in_spectator_mode set to %s" % mode)
@property
def corrections(self):
"""
List of dictionaries with infornation on the actions performed to solve `AbiCritical` Events.
Each dictionary contains the `AbinitEvent` who triggered the correction and
a human-readable message with the description of the operation performed.
"""
return self._corrections
@property
def num_corrections(self):
return len(self.corrections)
def log_correction(self, event, action):
"""
This method should be called once we have fixed the problem associated to this event.
It adds a new entry in the correction history of the node.
Args:
event: :class:`AbinitEvent` that triggered the correction.
action (str): Human-readable string with info on the action perfomed to solve the problem.
"""
# TODO: Create CorrectionObject
action = str(action)
self.history.info(action)
self._corrections.append(dict(
event=event.as_dict(),
action=action,
))
@property
def is_file(self):
"""True if this node is a file"""
return isinstance(self, FileNode)
@property
def is_task(self):
"""True if this node is a Task"""
from .tasks import Task
return isinstance(self, Task)
@property
def is_work(self):
"""True if this node is a Work"""
from .works import Work
return isinstance(self, Work)
@property
def is_flow(self):
"""True if this node is a Flow"""
from .flows import Flow
return isinstance(self, Flow)
@property
def deps(self):
"""
List of :class:`Dependency` objects defining the dependencies
of this `Node`. Empty list if this :class:`Node` does not have dependencies.
"""
return self._deps
@check_spectator
def add_deps(self, deps):
"""
Add a list of dependencies to the :class:`Node`.
Args:
deps: List of :class:`Dependency` objects specifying the dependencies of the node.
or dictionary mapping nodes to file extensions e.g. {task: "DEN"}
"""
if isinstance(deps, collections.Mapping):
# Convert dictionary into list of dependencies.
deps = [Dependency(node, exts) for node, exts in deps.items()]
# We want a list
if not isinstance(deps, (list, tuple)):
deps = [deps]
assert all(isinstance(d, Dependency) for d in deps)
# Add the dependencies to the node
self._deps.extend(deps)
if self.is_work:
# The task in the work should inherit the same dependency.
for task in self:
task.add_deps(deps)
@check_spectator
def remove_deps(self, deps):
"""
Remove a list of dependencies from the :class:`Node`.
Args:
deps: List of :class:`Dependency` objects specifying the dependencies of the node.
"""
if not isinstance(deps, (list, tuple)):
deps = [deps]
assert all(isinstance(d, Dependency) for d in deps)
self._deps = [d for d in self._deps if d not in deps]
if self.is_work:
# remove the same list of dependencies from the task in the work
for task in self:
task.remove_deps(deps)
@property
def deps_status(self):
"""Returns a list with the status of the dependencies."""
if not self.deps:
return [self.S_OK]
return [d.status for d in self.deps]
def depends_on(self, other):
"""True if this node depends on the other node."""
return other in [d.node for d in self.deps]
def get_parents(self):
"""Return the list of nodes in the :class:`Flow` required by this :class:`Node`"""
parents = []
for work in self.flow:
if self.depends_on(work): parents.append(work)
for task in work:
if self.depends_on(task): parents.append(task)
return parents
def get_children(self):
"""Return the list of nodes in the :class:`Flow` that depends on this :class:`Node`"""
children = []
for work in self.flow:
if work.depends_on(self): children.append(work)
for task in work:
if task.depends_on(self): children.append(task)
return children
def str_deps(self):
"""Return the string representation of the dependencies of the node."""
lines = []
app = lines.append
app("Dependencies of node %s:" % str(self))
for i, dep in enumerate(self.deps):
app("%d) %s, status=%s" % (i, dep.info, str(dep.status)))
return "\n".join(lines)
def set_gc(self, gc):
"""
Set the garbage collector.
"""
assert isinstance(gc, GarbageCollector)
self._gc = gc
@property
def gc(self):
"""
Garbage collector. None if garbage collection is deactivated.
Use flow.set_garbage_collector to initialize the object.
"""
try:
return self._gc
except AttributeError:
#if not self.is_flow and self.flow.gc: return self.flow.gc
return None
@property
def event_handlers(self):
"""
The list of handlers registered for this node.
If the node is not a `Flow` and does not have its own list of
`handlers` the handlers registered at the level of the flow are returned.
This trick allows one to registered different handlers at the level of the Task
for testing purposes. By default, we have a common list of handlers for all the nodes in the flow.
This choice facilitates the automatic installation of the handlers when we use callbacks to generate
new Works and Tasks!
"""
if self.is_flow:
return self._event_handlers
try:
return self._event_handlers
except AttributeError:
return self.flow._event_handlers
@check_spectator
def install_event_handlers(self, categories=None, handlers=None):
"""
Install the `EventHandlers for this `Node`. If no argument is provided
the default list of handlers is installed.
Args:
categories: List of categories to install e.g. base + can_change_physics
handlers: explicit list of :class:`EventHandler` instances.
This is the most flexible way to install handlers.
.. note::
categories and handlers are mutually exclusive.
"""
if categories is not None and handlers is not None:
raise ValueError("categories and handlers are mutually exclusive!")
from .events import get_event_handler_classes
if categories:
raise NotImplementedError()
handlers = [cls() for cls in get_event_handler_classes(categories=categories)]
else:
handlers = handlers or [cls() for cls in get_event_handler_classes()]
self._event_handlers = handlers
def show_event_handlers(self, stream=sys.stdout, verbose=0):
"""Print to `stream` the event handlers installed for this flow."""
lines = ["List of event handlers installed:"]
for handler in self.event_handlers:
if verbose: lines.extend(handler.__class__.cls2str().split("\n"))
lines.extend(str(handler).split("\n"))
stream.write("\n".join(lines))
stream.write("\n")
def send_signal(self, signal):
"""
Send signal from this node to all connected receivers unless the node is in spectator mode.
signal -- (hashable) signal value, see `dispatcher` connect for details
Return a list of tuple pairs [(receiver, response), ... ]
or None if the node is in spectator mode.
if any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an error.
"""
if self.in_spectator_mode: return None
logger.debug("Node %s broadcasts signal %s" % (self, signal))
dispatcher.send(signal=signal, sender=self)
##########################
### Abstract protocol ####
##########################
@abc.abstractproperty
def status(self):
"""The status of the `Node`."""
@abc.abstractmethod
def check_status(self):
"""Check the status of the `Node`."""
class FileNode(Node):
"""
A Node that consists of a file. May be not yet existing
Mainly used to connect :class:`Task` objects to external files produced in previous runs.
"""
color_rgb = np.array((102, 51, 255)) / 255
def __init__(self, filename):
super(FileNode, self).__init__()
self.filepath = os.path.abspath(filename)
# Directories with input|output|temporary data.
self.workdir = os.path.dirname(self.filepath)
self.indir = Directory(self.workdir)
self.outdir = Directory(self.workdir)
self.tmpdir = Directory(self.workdir)
@property
def products(self):
return [Product.from_file(self.filepath)]
def opath_from_ext(self, ext):
return self.filepath
@property
def status(self):
return self.S_OK if os.path.exists(self.filepath) else self.S_ERROR
def check_status(self):
return self.status
def get_results(self, **kwargs):
results = super(FileNode, self).get_results(**kwargs)
#results.register_gridfs_files(filepath=self.filepath)
return results
class HistoryRecord(object):
"""
A `HistoryRecord` instance represents an entry in the :class:`NodeHistory`.
`HistoryRecord` instances are created every time something is logged.
They contain all the information pertinent to the event being logged.
The main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record.
The record also includes information such as when the record was created,
the source line where the logging call was made
.. attribute:: levelno
Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL)
.. attribute:: levelname
Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
.. attribute:: pathname
Full pathname of the source file where the logging call was issued (if available)
.. attribute:: filename
Filename portion of pathname
.. attribute:: module
Module (name portion of filename)
.. attribute:: lineno
Source line number where the logging call was issued (if available)
.. attribute:: func_name
Function name
.. attribute:: created
Time when the HistoryRecord was created (time.time() return value)
.. attribute:: asctime
Textual time when the HistoryRecord was created
.. attribute:: message
The result of record.getMessage(), computed just as the record is emitted
"""
def __init__(self, level, pathname, lineno, msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelno = level
self.pathname = pathname
self.msg = msg
self.levelname = "FOOBAR" #getLevelName(level)
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.func_name = func
self.created = time.time()
self.asctime = time.asctime()
# Remove milliseconds
i = self.asctime.find(".")
if i != -1: self.asctime = self.asctime[:i]
def __repr__(self):
return '<%s, %s, %s, %s,\n"%s">' % (self.__class__.__name__, self.levelno, self.pathname, self.lineno, self.msg)
def __str__(self):
return self.get_message(metadata=False)
def get_message(self, metadata=False, asctime=True):
"""
Return the message after merging any user-supplied arguments with the message.
Args:
metadata: True if function and module name should be added.
asctime: True if time string should be added.
"""
msg = self.msg if is_string(self.msg) else str(self.msg)
if self.args:
try:
msg = msg % self.args
except:
msg += str(self.args)
if asctime: msg = "[" + self.asctime + "] " + msg
# Add metadata
if metadata:
msg += "\nCalled by %s at %s:%s\n" % (self.func_name, self.pathname, self.lineno)
return msg
@pmg_serialize
def as_dict(self):
return {'level': self.levelno, 'pathname': self.pathname, 'lineno': self.lineno, 'msg': self.msg,
'args': self.args, 'exc_info': self.exc_info, 'func': self.func_name}
@classmethod
def from_dict(cls, d):
return cls(level=d['level'], pathname=d['pathname'], lineno=int(d['lineno']), msg=d['msg'], args=d['args'],
exc_info=d['exc_info'], func=d['func'])
class NodeHistory(collections.deque):
"""Logger-like object"""
def __str__(self):
return self.to_string()
def to_string(self, metadata=False):
"""Returns a string with the history. Set metadata to True to have info on function and module."""
return "\n".join(rec.get_message(metadata=metadata) for rec in self)
def info(self, msg, *args, **kwargs):
"""Log 'msg % args' with the info severity level"""
self._log("INFO", msg, args, kwargs)
def warning(self, msg, *args, **kwargs):
"""Log 'msg % args' with the warning severity level"""
self._log("WARNING", msg, args, kwargs)
def critical(self, msg, *args, **kwargs):
"""Log 'msg % args' with the critical severity level"""
self._log("CRITICAL", msg, args, kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None):
"""Low-level logging routine which creates a :class:`HistoryRecord`."""
if exc_info and not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
self.append(HistoryRecord(level, "unknown filename", 0, msg, args, exc_info, func="unknown func"))
#from monty.inspect import find_caller, caller_name
# FIXME: Rewrite this! It does not work if find_caller is not in the module.
#c = find_caller()
#print(caller_name(skip=3))
#self.append(HistoryRecord(level, c.filename, c.lineno, msg, args, exc_info, func=c.name))
class NodeCorrections(list):
"""Iterable storing the correctios performed by the :class:`EventHandler`"""
#TODO
# Correction should have a human-readable message
# and a list of operatins in JSON format (Modder?) so that
# we can read them and re-apply the corrections to another task if needed.
#def count_event_class(self, event_class):
# """
# Return the number of times the event class has been already fixed.
# """
# #return len([c for c in self if c["event"]["@class"] == str(event_class)])
#def _find(self, event_class)
class GarbageCollector(object):
"""This object stores information on the """
def __init__(self, exts, policy):
self.exts, self.policy = set(exts), policy
# The code below initializes a counter from a file when the module is imported
# and save the counter's updated value automatically when the program terminates
# without relying on the application making an explicit call into this module at termination.
_COUNTER = None
_COUNTER_FILE = os.path.join(os.getenv("HOME"), ".abinit", "abipy", "nodecounter")
def init_counter():
global _COUNTER
# Make dir and file if not present.
if not os.path.exists(os.path.dirname(_COUNTER_FILE)):
os.makedirs(os.path.dirname(_COUNTER_FILE))
if not os.path.exists(_COUNTER_FILE):
with open(_COUNTER_FILE, "wt") as fh:
fh.write("%d\n" % -1)
if _COUNTER is None:
with open(_COUNTER_FILE, "r") as fh:
_COUNTER = int(fh.read())
def get_newnode_id():
"""
Returns a new node identifier used for :class:`Task`, :class:`Work` and :class:`Flow` objects.
.. warning:
The id is unique inside the same python process so be careful when
Works and Tasks are constructed at run-time or when threads are used.
"""
init_counter()
global _COUNTER
_COUNTER += 1
return _COUNTER
def save_lastnode_id():
"""Save the id of the last node created."""
init_counter()
with FileLock(_COUNTER_FILE):
with AtomicFile(_COUNTER_FILE, mode="w") as fh:
fh.write("%d\n" % _COUNTER)
# Register function atexit
import atexit
atexit.register(save_lastnode_id)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Australian Government, Department of the Environment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Metadata driver for DIMAP imagery
B{Format specification}:
- U{http://www.spotimage.fr/dimap/spec/documentation/refdoc.htm}
@todo: Implement pleiades (dimap v2.0) once gdal fixes dimap driver, http://trac.osgeo.org/gdal/ticket/5018 and http://trac.osgeo.org/gdal/ticket/4826
'''
#format_regex=[r'metadata\.dim$'] #DIMAP
#format_regex=[r'\.dim$'] #DIMAP any *.dim
format_regex=[r'(?<!vol_list)\.dim$', #DIMAP - any *.dim (excluding vol_list.dim)
r'dim_phr.*\.xml$'] #Pleiades image metadata not yet implemented, see
#http://trac.osgeo.org/gdal/ticket/5018 and
#http://trac.osgeo.org/gdal/ticket/4826
'''Regular expression list of file formats'''
#import base dataset modules
#import __dataset__
import __default__
# import other modules
import sys, os, re, glob, time, math, string
from metageta import utilities, geometry
from lxml import etree
try:
from osgeo import gdal
from osgeo import gdalconst
from osgeo import osr
from osgeo import ogr
except ImportError:
import gdal
import gdalconst
import osr
import ogr
gdal.AllRegister()
class Dataset(__default__.Dataset):
'''Subclass of __default__.Dataset class so we get a load of metadata populated automatically'''
def __init__(self,f):
if f[:4]=='/vsi':
import warnings
warnings.warn('DIMAP files in zip/tar archives are not supported')
raise NotImplementedError
'''Open the dataset'''
if not f:f=self.fileinfo['filepath']
self.filelist=[r for r in glob.glob('%s/*'%os.path.dirname(f))]
#dom=etree.parse(f) #Takes tooo long to parse the whole file, so just read as far as we need...
strxml=''
for line in open(f, 'r'):
if line.upper().strip()=='<DATA_STRIP>':break
else: strxml+=line
if not '</Dimap_Document>' in strxml:strxml+='</Dimap_Document>'
self._dom=etree.fromstring(strxml)
self.dimap_version=map(int, self._dom.xpath('string(//*/METADATA_FORMAT/@version)').split('.'))
if self.dimap_version[0]>2:
import warnings
warnings.warn('DIMAP V%s is not supported'%self.dimap_version[0])
raise NotImplementedError
def __getmetadata__(self,f=None):
'''Read Metadata for a DIMAP image as GDAL doesn't quite get it all...'''
if not f:f=self.fileinfo['filepath']
self.metadata['filetype']='DIMAP / %s'%'.'.join(map(str,self.dimap_version))
if self.dimap_version[0]==1:
self.v1(f)
elif self.dimap_version[0]==2:
self.v2(f)
def v1(self,f=None):
dom = self._dom
self.metadata['sceneid'] = dom.xpath('string(/Dimap_Document/Dataset_Id/DATASET_NAME)')
bands=dom.xpath('/Dimap_Document/Spectral_Band_Info/BAND_DESCRIPTION')
self.metadata['bands']=','.join([band.xpath('string(.)') for band in bands])
try:__default__.Dataset.__getmetadata__(self, f) #autopopulate basic metadata
except geometry.GDALError,err: #Work around reading images with lowercase filenames when
#the DATA_FILE_PATH is uppercase
# - eg samba filesystems which get converted to lowercase
dfp=dom.xpath('/Dimap_Document/Data_Access/Data_File/DATA_FILE_PATH')[0]
fn=utilities.encode(dfp.xpath('string(@href)')) #XML is unicode, gdal.Open doesn't like unicode
if not os.path.dirname(fn):fn=os.path.join(os.path.dirname(f),fn)
exists,img=utilities.exists(fn,True)
if exists and not os.path.exists(fn):
import tempfile
tmpfd,tmpfn=tempfile.mkstemp(suffix='.dim',prefix='metadata')
dfp.set('href',img)
tmpfo=os.fdopen(tmpfd,'w')
tmpfo.write(etree.tostring(dom))
tmpfo.flush()
tmpfo.close()
cwd=os.path.abspath(os.curdir)
tmp=os.path.split(tmpfn)
os.chdir(tmp[0]) #CD to the tmp dir so __default__.Dataset.__getmetadata__ doesn't
__default__.Dataset.__getmetadata__(self, tmp[1])
gdalmd=self._gdaldataset.GetMetadata()
self._gdaldataset=geometry.OpenDataset(img)
self._gdaldataset.SetMetadata(gdalmd)
os.unlink(tmpfn)
os.chdir(cwd)
else:raise
dates={}
for src in dom.xpath('//Scene_Source'):
datetime='%sT%s'%(src.xpath('string(IMAGING_DATE)'),src.xpath('string(IMAGING_TIME)'))
dts=time.mktime(time.strptime(datetime,utilities.datetimeformat))#ISO 8601
dates[dts]=datetime
self.metadata['imgdate']='%s/%s'%(dates[min(dates.keys())],dates[max(dates.keys())])
gdalmd=self._gdaldataset.GetMetadata()
self.metadata['satellite']='%s %s' % (gdalmd['MISSION'],gdalmd['MISSION_INDEX'])
try:self.metadata['sensor']='%s %s' % (gdalmd['INSTRUMENT'],gdalmd['INSTRUMENT_INDEX'])
except:self.metadata['sensor']='%s' % gdalmd['INSTRUMENT']
try:self.metadata['sunelevation'] = float(gdalmd['SUN_ELEVATION'])
except:pass
try:self.metadata['sunazimuth'] = float(gdalmd['SUN_AZIMUTH'])
except:pass
try:self.metadata['level'] = gdalmd['PROCESSING_LEVEL']
except:self.metadata['level'] = gdalmd['PRODUCT_TYPE']
self.metadata['viewangle'] = gdalmd.get('VIEWING_ANGLE',gdalmd.get('INCIDENCE_ANGLE',''))
#Processing, store in lineage field
lineage=[]
for step in dom.find('Data_Processing').getchildren():
lineage.append('%s: %s' % (step.tag.replace('_',' '), step.text.replace('_',' ')))
self.metadata['lineage']='\n'.join(lineage)
def v2(self,f=None):
if not f:f=self.fileinfo['filepath']
dom = self._dom
self.metadata['sceneid'] = dom.xpath('string(/Dimap_Document/Dataset_Identification/DATASET_NAME)')
try:
self._gdaldataset=geometry.OpenDataset(f)
__default__.Dataset.__getmetadata__(self) #autopopulate basic metadata
except:
ncols=dom.xpath('number(//*/NCOLS)')
nrows=dom.xpath('number(//*/NROWS)')
nbands=dom.xpath('number(//*/NBANDS)')
nbits=dom.xpath('number(//*/NBITS)')
if nbits==16:datatype='UInt16'
else:datatype='Byte'
if nbands==1:
bands=[1]
else:
bands=[int(b[1:]) for b in [dom.xpath('string(//*/RED_CHANNEL)'),
dom.xpath('string(//*/GREEN_CHANNEL)'),
dom.xpath('string(//*/BLUE_CHANNEL)')]]
self.metadata['bands']=','.join(map(str,bands))
if dom.xpath('string(//*/DATA_FILE_TILES)')=='true':
import math
ntiles=dom.xpath('number(//*/NTILES)')
if dom.xpath('boolean(//*/NTILES_COUNT/@ntiles_x)'):
ntiles_x=dom.xpath('number(//*/NTILES_COUNT/@ntiles_x)')
ntiles_y=dom.xpath('number(//*/NTILES_COUNT/@ntiles_y)')
elif dom.xpath('boolean(//*/NTILES_COUNT/@ntiles_C)'):
ntiles_x=dom.xpath('number(//*/NTILES_COUNT/@ntiles_C)')
ntiles_y=dom.xpath('number(//*/NTILES_COUNT/@ntiles_R)')
tile_cols=math.ceil(ncols/ntiles_x)
last_tile_cols=tile_cols-(ntiles_x*tile_cols-ncols)
tile_rows=math.ceil(nrows/ntiles_y)
last_tile_rows=tile_rows-(ntiles_y*tile_rows-nrows)
srcrects,dstrects=[],[]
files=[]
for df in dom.xpath('//*/Data_File'):
col=df.xpath('number(@tile_C)')
row=df.xpath('number(@tile_R)')
datafile=os.path.join(os.path.dirname(f),df.xpath('string(DATA_FILE_PATH/@href)'))
exists,datafile=utilities.exists(datafile,True) #Work around reading images with lowercase filenames when the DATA_FILE_PATH is uppercase
# - eg samba filesystems which get converted to lowercase
if (row,col)==(1,1):r1c1=datafile
srcrect=[0,0,tile_cols,tile_rows]
dstrect=[(ntiles_x-1)*tile_cols,(ntiles_y-1)*tile_rows,tile_cols,tile_rows]
if col==ntiles_x:#last col
srcrect[2]=last_tile_cols
dstrect[2]=last_tile_cols
if row==ntiles_y:#last row
srcrect[3]=last_tile_rows
dstrect[3]=last_tile_rows
files.append(datafile)
srcrects.append(srcrect)
dstrects.append(dstrect)
self._gdaldataset=geometry.OpenDataset(geometry.CreateMosaicedVRT(files,bands,srcrects,dstrects,ncols,nrows,datatype))
ds=geometry.OpenDataset(r1c1)
self._gdaldataset.SetGeoTransform(ds.GetGeoTransform())
self._gdaldataset.SetProjection(ds.GetProjection())
else:
datafile=os.path.join(os.path.dirname(f),dom.xpath('string(//*/DATA_FILE_PATH/@href)'))
exists,datafile=utilities.exists(datafile,True)
self._gdaldataset=geometry.OpenDataset(datafile)
__default__.Dataset.__getmetadata__(self)
dates={}
for src in dom.xpath('//Source_Identification'):
datetime='%sT%s'%(src.xpath('string(//*/IMAGING_DATE)'),src.xpath('string(//*/IMAGING_TIME)')[:8])
dts=time.mktime(time.strptime(datetime,utilities.datetimeformat))#ISO 8601
dates[dts]=datetime
if len(dates)==1:
self.metadata['imgdate']=datetime
else:
self.metadata['imgdate']='%s/%s'%(dates[min(dates.keys())],dates[max(dates.keys())])
self.metadata['satellite']='%s %s' % (src.xpath('string(//*/MISSION)'),src.xpath('string(//*/MISSION_INDEX)'))
try:self.metadata['sensor']='%s %s' % (src.xpath('string(//*/INSTRUMENT)'),src.xpath('string(//*/INSTRUMENT_INDEX)'))
except:self.metadata['sensor']='%s' % src.xpath('string(//*/INSTRUMENT)')
try:
sunangles=dom.xpath('//*/Located_Geometric_Values[LOCATION_TYPE="Center"]/Solar_Incidences')[0]
self.metadata['sunelevation'] = sunangles.xpath('number(SUN_ELEVATION)')
self.metadata['sunazimuth'] = sunangles.xpath('number(SUN_AZIMUTH)')
except:pass
try:
self.metadata['viewangle'] = dom.xpath('number(//*/Located_Geometric_Values[LOCATION_TYPE="Center"]/Acquisition_Angles/VIEWING_ANGLE)')
self.metadata['satelevation'] = dom.xpath('number(//*/Located_Geometric_Values[LOCATION_TYPE="Center"]/Acquisition_Angles/INCIDENCE_ANGLE)')
self.metadata['satazimuth'] = dom.xpath('number(//*/Located_Geometric_Values[LOCATION_TYPE="Center"]/Acquisition_Angles/AZIMUTH_ANGLE)')
except:pass
try:self.metadata['level'] = dom.xpath('string(//*/Processing_Information/Product_Settings/PROCESSING_LEVEL)')
except:pass
try:self.metadata['resampling'] = dom.xpath('string(//*/Processing_Information/Product_Settings/Sampling_Settings/RESAMPLING_KERNEL)')
except:pass
self.metadata['metadata']=etree.tostring(dom, pretty_print=True)
#Get cloud cover from MASKS/CLD_*_MSK.GML???
def getoverview(self,outfile=None,width=800,format='JPG'):
'''
Generate overviews for DIMAP imagery
@type outfile: str
@param outfile: a filepath to the output overview image. If supplied, format is determined from the file extension
@type width: int
@param width: image width
@type format: str
@param format: format to generate overview image, one of ['JPG','PNG','GIF','BMP','TIF']. Not required if outfile is supplied.
@rtype: str
@return: filepath (if outfile is supplied)/binary image data (if outfile is not supplied)
@todo:
- Should we do something with the band display order metadata?
<Band_Display_Order>
<RED_CHANNEL>1</RED_CHANNEL>
<GREEN_CHANNEL>2</GREEN_CHANNEL>
<BLUE_CHANNEL>3</BLUE_CHANNEL>
</Band_Display_Order>
'''
from metageta import overviews
try:
#First check for a browse graphic, no point re-inventing the wheel...
f=self.fileinfo['filepath']
#if self.dimap_version[0]==1:
# fp=self._dom.xpath('/Dimap_Document/Dataset_Id/DATASET_QL_PATH')[0]
#else:
# fp=self._dom.xpath('/Dimap_Document/Dataset_Identification/DATASET_QL_PATH')[0]
fp=self._dom.xpath('string(//*/DATASET_QL_PATH/@href)')
fn=utilities.encode(fp) #XML is unicode, gdal.Open doesn't like unicode
browse=os.path.join(os.path.dirname(f),fn)
if os.path.exists(browse) and gdal.Open(browse).RasterXSize >= width:
return overviews.resize(browse,outfile,width)
except:pass
return __default__.Dataset.getoverview(self,outfile,width,format)#Do it the slow way...
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.providers.apache.spark.hooks.spark_jdbc import SparkJDBCHook
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class SparkJDBCOperator(SparkSubmitOperator):
"""
This operator extends the SparkSubmitOperator specifically for performing data
transfers to/from JDBC-based databases with Apache Spark. As with the
SparkSubmitOperator, it assumes that the "spark-submit" binary is available on the
PATH.
:param spark_app_name: Name of the job (default airflow-spark-jdbc)
:type spark_app_name: str
:param spark_conn_id: Connection id as configured in Airflow administration
:type spark_conn_id: str
:param spark_conf: Any additional Spark configuration properties
:type spark_conf: dict
:param spark_py_files: Additional python files used (.zip, .egg, or .py)
:type spark_py_files: str
:param spark_files: Additional files to upload to the container running the job
:type spark_files: str
:param spark_jars: Additional jars to upload and add to the driver and
executor classpath
:type spark_jars: str
:param num_executors: number of executor to run. This should be set so as to manage
the number of connections made with the JDBC database
:type num_executors: int
:param executor_cores: Number of cores per executor
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G)
:type driver_memory: str
:param verbose: Whether to pass the verbose flag to spark-submit for debugging
:type verbose: bool
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param cmd_type: Which way the data should flow. 2 possible values:
spark_to_jdbc: data written by spark from metastore to jdbc
jdbc_to_spark: data written by spark from jdbc to metastore
:type cmd_type: str
:param jdbc_table: The name of the JDBC table
:type jdbc_table: str
:param jdbc_conn_id: Connection id used for connection to JDBC database
:type jdbc_conn_id: str
:param jdbc_driver: Name of the JDBC driver to use for the JDBC connection. This
driver (usually a jar) should be passed in the 'jars' parameter
:type jdbc_driver: str
:param metastore_table: The name of the metastore table,
:type metastore_table: str
:param jdbc_truncate: (spark_to_jdbc only) Whether or not Spark should truncate or
drop and recreate the JDBC table. This only takes effect if
'save_mode' is set to Overwrite. Also, if the schema is
different, Spark cannot truncate, and will drop and recreate
:type jdbc_truncate: bool
:param save_mode: The Spark save-mode to use (e.g. overwrite, append, etc.)
:type save_mode: str
:param save_format: (jdbc_to_spark-only) The Spark save-format to use (e.g. parquet)
:type save_format: str
:param batch_size: (spark_to_jdbc only) The size of the batch to insert per round
trip to the JDBC database. Defaults to 1000
:type batch_size: int
:param fetch_size: (jdbc_to_spark only) The size of the batch to fetch per round trip
from the JDBC database. Default depends on the JDBC driver
:type fetch_size: int
:param num_partitions: The maximum number of partitions that can be used by Spark
simultaneously, both for spark_to_jdbc and jdbc_to_spark
operations. This will also cap the number of JDBC connections
that can be opened
:type num_partitions: int
:param partition_column: (jdbc_to_spark-only) A numeric column to be used to
partition the metastore table by. If specified, you must
also specify:
num_partitions, lower_bound, upper_bound
:type partition_column: str
:param lower_bound: (jdbc_to_spark-only) Lower bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, upper_bound
:type lower_bound: int
:param upper_bound: (jdbc_to_spark-only) Upper bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, lower_bound
:type upper_bound: int
:param create_table_column_types: (spark_to_jdbc-only) The database column data types
to use instead of the defaults, when creating the
table. Data type information should be specified in
the same format as CREATE TABLE columns syntax
(e.g: "name CHAR(64), comments VARCHAR(1024)").
The specified types should be valid spark sql data
types.
"""
# pylint: disable=too-many-arguments,too-many-locals
@apply_defaults
def __init__(self,
spark_app_name='airflow-spark-jdbc',
spark_conn_id='spark-default',
spark_conf=None,
spark_py_files=None,
spark_files=None,
spark_jars=None,
num_executors=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
verbose=False,
keytab=None,
principal=None,
cmd_type='spark_to_jdbc',
jdbc_table=None,
jdbc_conn_id='jdbc-default',
jdbc_driver=None,
metastore_table=None,
jdbc_truncate=False,
save_mode=None,
save_format=None,
batch_size=None,
fetch_size=None,
num_partitions=None,
partition_column=None,
lower_bound=None,
upper_bound=None,
create_table_column_types=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self._spark_app_name = spark_app_name
self._spark_conn_id = spark_conn_id
self._spark_conf = spark_conf
self._spark_py_files = spark_py_files
self._spark_files = spark_files
self._spark_jars = spark_jars
self._num_executors = num_executors
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._verbose = verbose
self._keytab = keytab
self._principal = principal
self._cmd_type = cmd_type
self._jdbc_table = jdbc_table
self._jdbc_conn_id = jdbc_conn_id
self._jdbc_driver = jdbc_driver
self._metastore_table = metastore_table
self._jdbc_truncate = jdbc_truncate
self._save_mode = save_mode
self._save_format = save_format
self._batch_size = batch_size
self._fetch_size = fetch_size
self._num_partitions = num_partitions
self._partition_column = partition_column
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._create_table_column_types = create_table_column_types
def execute(self, context):
"""
Call the SparkSubmitHook to run the provided spark job
"""
self._hook = SparkJDBCHook(
spark_app_name=self._spark_app_name,
spark_conn_id=self._spark_conn_id,
spark_conf=self._spark_conf,
spark_py_files=self._spark_py_files,
spark_files=self._spark_files,
spark_jars=self._spark_jars,
num_executors=self._num_executors,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
driver_memory=self._driver_memory,
verbose=self._verbose,
keytab=self._keytab,
principal=self._principal,
cmd_type=self._cmd_type,
jdbc_table=self._jdbc_table,
jdbc_conn_id=self._jdbc_conn_id,
jdbc_driver=self._jdbc_driver,
metastore_table=self._metastore_table,
jdbc_truncate=self._jdbc_truncate,
save_mode=self._save_mode,
save_format=self._save_format,
batch_size=self._batch_size,
fetch_size=self._fetch_size,
num_partitions=self._num_partitions,
partition_column=self._partition_column,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound,
create_table_column_types=self._create_table_column_types
)
self._hook.submit_jdbc_job()
def on_kill(self):
self._hook.on_kill()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates java source files from a mojom.Module."""
import argparse
import ast
import contextlib
import os
import re
import shutil
import tempfile
import zipfile
from jinja2 import contextfilter
import mojom.generate.generator as generator
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'java'
_HEADER_SIZE = 8
_spec_to_java_type = {
mojom.BOOL.spec: 'boolean',
mojom.DCPIPE.spec: 'org.chromium.mojo.system.DataPipe.ConsumerHandle',
mojom.DOUBLE.spec: 'double',
mojom.DPPIPE.spec: 'org.chromium.mojo.system.DataPipe.ProducerHandle',
mojom.FLOAT.spec: 'float',
mojom.HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle',
mojom.INT16.spec: 'short',
mojom.INT32.spec: 'int',
mojom.INT64.spec: 'long',
mojom.INT8.spec: 'byte',
mojom.MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec:
'org.chromium.mojo.system.DataPipe.ConsumerHandle',
mojom.NULLABLE_DPPIPE.spec:
'org.chromium.mojo.system.DataPipe.ProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'org.chromium.mojo.system.UntypedHandle',
mojom.NULLABLE_MSGPIPE.spec: 'org.chromium.mojo.system.MessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec:
'org.chromium.mojo.system.SharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'String',
mojom.SHAREDBUFFER.spec: 'org.chromium.mojo.system.SharedBufferHandle',
mojom.STRING.spec: 'String',
mojom.UINT16.spec: 'short',
mojom.UINT32.spec: 'int',
mojom.UINT64.spec: 'long',
mojom.UINT8.spec: 'byte',
}
_spec_to_decode_method = {
mojom.BOOL.spec: 'readBoolean',
mojom.DCPIPE.spec: 'readConsumerHandle',
mojom.DOUBLE.spec: 'readDouble',
mojom.DPPIPE.spec: 'readProducerHandle',
mojom.FLOAT.spec: 'readFloat',
mojom.HANDLE.spec: 'readUntypedHandle',
mojom.INT16.spec: 'readShort',
mojom.INT32.spec: 'readInt',
mojom.INT64.spec: 'readLong',
mojom.INT8.spec: 'readByte',
mojom.MSGPIPE.spec: 'readMessagePipeHandle',
mojom.NULLABLE_DCPIPE.spec: 'readConsumerHandle',
mojom.NULLABLE_DPPIPE.spec: 'readProducerHandle',
mojom.NULLABLE_HANDLE.spec: 'readUntypedHandle',
mojom.NULLABLE_MSGPIPE.spec: 'readMessagePipeHandle',
mojom.NULLABLE_SHAREDBUFFER.spec: 'readSharedBufferHandle',
mojom.NULLABLE_STRING.spec: 'readString',
mojom.SHAREDBUFFER.spec: 'readSharedBufferHandle',
mojom.STRING.spec: 'readString',
mojom.UINT16.spec: 'readShort',
mojom.UINT32.spec: 'readInt',
mojom.UINT64.spec: 'readLong',
mojom.UINT8.spec: 'readByte',
}
_java_primitive_to_boxed_type = {
'boolean': 'Boolean',
'byte': 'Byte',
'double': 'Double',
'float': 'Float',
'int': 'Integer',
'long': 'Long',
'short': 'Short',
}
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k':
components = components[1:]
return '_'.join([x.upper() for x in components])
def GetNameForElement(element):
if (mojom.IsEnumKind(element) or mojom.IsInterfaceKind(element) or
mojom.IsStructKind(element)):
return UpperCamelCase(element.name)
if mojom.IsInterfaceRequestKind(element):
return GetNameForElement(element.kind)
if isinstance(element, (mojom.Method,
mojom.Parameter,
mojom.Field)):
return CamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (GetNameForElement(element.enum) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant)):
return ConstantStyle(element.name)
raise Exception('Unexpected element: ' % element)
def GetInterfaceResponseName(method):
return UpperCamelCase(method.name + 'Response')
def ParseStringAttribute(attribute):
assert isinstance(attribute, basestring)
return attribute
def GetJavaTrueFalse(value):
return 'true' if value else 'false'
def GetArrayNullabilityFlags(kind):
"""Returns nullability flags for an array type, see Decoder.java.
As we have dedicated decoding functions for arrays, we have to pass
nullability information about both the array itself, as well as the array
element type there.
"""
assert mojom.IsAnyArrayKind(kind)
ARRAY_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.ARRAY_NULLABLE'
ELEMENT_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.ELEMENT_NULLABLE'
NOTHING_NULLABLE = \
'org.chromium.mojo.bindings.BindingsHelper.NOTHING_NULLABLE'
flags_to_set = []
if mojom.IsNullableKind(kind):
flags_to_set.append(ARRAY_NULLABLE)
if mojom.IsNullableKind(kind.kind):
flags_to_set.append(ELEMENT_NULLABLE)
if not flags_to_set:
flags_to_set = [NOTHING_NULLABLE]
return ' | '.join(flags_to_set)
def AppendEncodeDecodeParams(initial_params, context, kind, bit):
""" Appends standard parameters shared between encode and decode calls. """
params = list(initial_params)
if (kind == mojom.BOOL):
params.append(str(bit))
if mojom.IsReferenceKind(kind):
if mojom.IsAnyArrayKind(kind):
params.append(GetArrayNullabilityFlags(kind))
else:
params.append(GetJavaTrueFalse(mojom.IsNullableKind(kind)))
if mojom.IsAnyArrayKind(kind):
if mojom.IsFixedArrayKind(kind):
params.append(str(kind.length))
else:
params.append(
'org.chromium.mojo.bindings.BindingsHelper.UNSPECIFIED_ARRAY_LENGTH');
if mojom.IsInterfaceKind(kind):
params.append('%s.MANAGER' % GetJavaType(context, kind))
if mojom.IsAnyArrayKind(kind) and mojom.IsInterfaceKind(kind.kind):
params.append('%s.MANAGER' % GetJavaType(context, kind.kind))
return params
@contextfilter
def DecodeMethod(context, kind, offset, bit):
def _DecodeMethodName(kind):
if mojom.IsAnyArrayKind(kind):
return _DecodeMethodName(kind.kind) + 's'
if mojom.IsEnumKind(kind):
return _DecodeMethodName(mojom.INT32)
if mojom.IsInterfaceRequestKind(kind):
return 'readInterfaceRequest'
if mojom.IsInterfaceKind(kind):
return 'readServiceInterface'
return _spec_to_decode_method[kind.spec]
methodName = _DecodeMethodName(kind)
params = AppendEncodeDecodeParams([ str(offset) ], context, kind, bit)
return '%s(%s)' % (methodName, ', '.join(params))
@contextfilter
def EncodeMethod(context, kind, variable, offset, bit):
params = AppendEncodeDecodeParams(
[ variable, str(offset) ], context, kind, bit)
return 'encode(%s)' % ', '.join(params)
def GetPackage(module):
if 'JavaPackage' in module.attributes:
return ParseStringAttribute(module.attributes['JavaPackage'])
# Default package.
return 'org.chromium.mojom.' + module.namespace
def GetNameForKind(context, kind):
def _GetNameHierachy(kind):
hierachy = []
if kind.parent_kind:
hierachy = _GetNameHierachy(kind.parent_kind)
hierachy.append(GetNameForElement(kind))
return hierachy
module = context.resolve('module')
elements = []
if GetPackage(module) != GetPackage(kind.module):
elements += [GetPackage(kind.module)]
elements += _GetNameHierachy(kind)
return '.'.join(elements)
def GetBoxedJavaType(context, kind):
unboxed_type = GetJavaType(context, kind, False)
if unboxed_type in _java_primitive_to_boxed_type:
return _java_primitive_to_boxed_type[unboxed_type]
return unboxed_type
@contextfilter
def GetJavaType(context, kind, boxed=False):
if boxed:
return GetBoxedJavaType(context, kind)
if mojom.IsStructKind(kind) or mojom.IsInterfaceKind(kind):
return GetNameForKind(context, kind)
if mojom.IsInterfaceRequestKind(kind):
return ('org.chromium.mojo.bindings.InterfaceRequest<%s>' %
GetNameForKind(context, kind.kind))
if mojom.IsAnyArrayKind(kind):
return '%s[]' % GetJavaType(context, kind.kind)
if mojom.IsEnumKind(kind):
return 'int'
return _spec_to_java_type[kind.spec]
@contextfilter
def DefaultValue(context, field):
assert field.default
if isinstance(field.kind, mojom.Struct):
assert field.default == 'default'
return 'new %s()' % GetJavaType(context, field.kind)
return '(%s) %s' % (
GetJavaType(context, field.kind),
ExpressionToText(context, field.default, kind_spec=field.kind.spec))
@contextfilter
def ConstantValue(context, constant):
return '(%s) %s' % (
GetJavaType(context, constant.kind),
ExpressionToText(context, constant.value, kind_spec=constant.kind.spec))
@contextfilter
def NewArray(context, kind, size):
if mojom.IsAnyArrayKind(kind.kind):
return NewArray(context, kind.kind, size) + '[]'
return 'new %s[%s]' % (GetJavaType(context, kind.kind), size)
@contextfilter
def ExpressionToText(context, token, kind_spec=''):
def _TranslateNamedValue(named_value):
entity_name = GetNameForElement(named_value)
if named_value.parent_kind:
return GetJavaType(context, named_value.parent_kind) + '.' + entity_name
# Handle the case where named_value is a module level constant:
if not isinstance(named_value, mojom.EnumValue):
entity_name = (GetConstantsMainEntityName(named_value.module) + '.' +
entity_name)
if GetPackage(named_value.module) == GetPackage(context.resolve('module')):
return entity_name
return GetPackage(named_value.module) + '.' + entity_name
if isinstance(token, mojom.NamedValue):
return _TranslateNamedValue(token)
if kind_spec.startswith('i') or kind_spec.startswith('u'):
# Add Long suffix to all integer literals.
number = ast.literal_eval(token.lstrip('+ '))
if not isinstance(number, (int, long)):
raise ValueError('got unexpected type %r for int literal %r' % (
type(number), token))
# If the literal is too large to fit a signed long, convert it to the
# equivalent signed long.
if number >= 2 ** 63:
number -= 2 ** 64
return '%dL' % number
if isinstance(token, mojom.BuiltinValue):
if token.value == 'double.INFINITY':
return 'java.lang.Double.POSITIVE_INFINITY'
if token.value == 'double.NEGATIVE_INFINITY':
return 'java.lang.Double.NEGATIVE_INFINITY'
if token.value == 'double.NAN':
return 'java.lang.Double.NaN'
if token.value == 'float.INFINITY':
return 'java.lang.Float.POSITIVE_INFINITY'
if token.value == 'float.NEGATIVE_INFINITY':
return 'java.lang.Float.NEGATIVE_INFINITY'
if token.value == 'float.NAN':
return 'java.lang.Float.NaN'
return token
def IsPointerArrayKind(kind):
if not mojom.IsAnyArrayKind(kind):
return False
sub_kind = kind.kind
return mojom.IsObjectKind(sub_kind)
def GetResponseStructFromMethod(method):
return generator.GetDataHeader(
False, generator.GetResponseStructFromMethod(method))
def GetStructFromMethod(method):
return generator.GetDataHeader(
False, generator.GetStructFromMethod(method))
def GetConstantsMainEntityName(module):
if 'JavaConstantsClassName' in module.attributes:
return ParseStringAttribute(module.attributes['JavaConstantsClassName'])
# This constructs the name of the embedding classes for module level constants
# by extracting the mojom's filename and prepending it to Constants.
return (UpperCamelCase(module.path.split('/')[-1].rsplit('.', 1)[0]) +
'Constants')
def GetMethodOrdinalName(method):
return ConstantStyle(method.name) + '_ORDINAL'
def HasMethodWithResponse(interface):
for method in interface.methods:
if method.response_parameters:
return True
return False
def HasMethodWithoutResponse(interface):
for method in interface.methods:
if not method.response_parameters:
return True
return False
@contextlib.contextmanager
def TempDir():
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
def ZipContentInto(root, zip_filename):
with zipfile.ZipFile(zip_filename, 'w') as zip_file:
for dirname, _, files in os.walk(root):
for filename in files:
path = os.path.join(dirname, filename)
path_in_archive = os.path.relpath(path, root)
zip_file.write(path, path_in_archive)
class Generator(generator.Generator):
java_filters = {
'interface_response_name': GetInterfaceResponseName,
'constant_value': ConstantValue,
'default_value': DefaultValue,
'decode_method': DecodeMethod,
'expression_to_text': ExpressionToText,
'encode_method': EncodeMethod,
'has_method_with_response': HasMethodWithResponse,
'has_method_without_response': HasMethodWithoutResponse,
'is_fixed_array_kind': mojom.IsFixedArrayKind,
'is_handle': mojom.IsNonInterfaceHandleKind,
'is_nullable_kind': mojom.IsNullableKind,
'is_pointer_array_kind': IsPointerArrayKind,
'is_struct_kind': mojom.IsStructKind,
'java_type': GetJavaType,
'java_true_false': GetJavaTrueFalse,
'method_ordinal_name': GetMethodOrdinalName,
'name': GetNameForElement,
'new_array': NewArray,
'response_struct_from_method': GetResponseStructFromMethod,
'struct_from_method': GetStructFromMethod,
'struct_size': lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
}
def GetJinjaExports(self):
return {
'package': GetPackage(self.module),
}
def GetJinjaExportsForInterface(self, interface):
exports = self.GetJinjaExports()
exports.update({'interface': interface})
if interface.client:
for client in self.module.interfaces:
if client.name == interface.client:
exports.update({'client': client})
return exports
@UseJinja('java_templates/enum.java.tmpl', filters=java_filters)
def GenerateEnumSource(self, enum):
exports = self.GetJinjaExports()
exports.update({'enum': enum})
return exports
@UseJinja('java_templates/struct.java.tmpl', filters=java_filters)
def GenerateStructSource(self, struct):
exports = self.GetJinjaExports()
exports.update({'struct': struct})
return exports
@UseJinja('java_templates/interface.java.tmpl', filters=java_filters)
def GenerateInterfaceSource(self, interface):
return self.GetJinjaExportsForInterface(interface)
@UseJinja('java_templates/interface_internal.java.tmpl', filters=java_filters)
def GenerateInterfaceInternalSource(self, interface):
return self.GetJinjaExportsForInterface(interface)
@UseJinja('java_templates/constants.java.tmpl', filters=java_filters)
def GenerateConstantsSource(self, module):
exports = self.GetJinjaExports()
exports.update({'main_entity': GetConstantsMainEntityName(module),
'constants': module.constants})
return exports
def DoGenerateFiles(self):
if not os.path.exists(self.output_dir):
try:
os.makedirs(self.output_dir)
except:
# Ignore errors on directory creation.
pass
# Keep this above the others as .GetStructs() changes the state of the
# module, annotating structs with required information.
for struct in self.GetStructs():
self.Write(self.GenerateStructSource(struct),
'%s.java' % GetNameForElement(struct))
for enum in self.module.enums:
self.Write(self.GenerateEnumSource(enum),
'%s.java' % GetNameForElement(enum))
for interface in self.module.interfaces:
self.Write(self.GenerateInterfaceSource(interface),
'%s.java' % GetNameForElement(interface))
self.Write(self.GenerateInterfaceInternalSource(interface),
'%s_Internal.java' % GetNameForElement(interface))
if self.module.constants:
self.Write(self.GenerateConstantsSource(self.module),
'%s.java' % GetConstantsMainEntityName(self.module))
def GenerateFiles(self, unparsed_args):
parser = argparse.ArgumentParser()
parser.add_argument('--java_output_directory', dest='java_output_directory')
args = parser.parse_args(unparsed_args)
package_path = GetPackage(self.module).replace('.', '/')
# Generate the java files in a temporary directory and place a single
# srcjar in the output directory.
zip_filename = os.path.join(self.output_dir,
"%s.srcjar" % self.module.name)
with TempDir() as temp_java_root:
self.output_dir = os.path.join(temp_java_root, package_path)
self.DoGenerateFiles();
ZipContentInto(temp_java_root, zip_filename)
if args.java_output_directory:
# If requested, generate the java files directly into indicated directory.
self.output_dir = os.path.join(args.java_output_directory, package_path)
self.DoGenerateFiles();
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'namespace': self.module.namespace,
'module': self.module,
}
|
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Checks code for common issues before submitting."""
import argparse
import os
import subprocess
import sys
import unittest
import yaml
import constants
_SRC_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def _is_project_file(actual_path, expected_filename):
"""Returns True if actual_path's name is |expected_filename| and is a file
that exists and is in in projects/."""
if os.path.basename(actual_path) != expected_filename:
return False
if os.path.basename(os.path.dirname(
os.path.dirname(actual_path))) != 'projects':
return False
return os.path.exists(actual_path)
# TODO: Check for -fsanitize=fuzzer in files as well.
def _check_one_lib_fuzzing_engine(build_sh_file):
"""Returns False if |build_sh_file| contains -lFuzzingEngine.
This is deprecated behavior. $LIB_FUZZING_ENGINE should be used instead
so that -fsanitize=fuzzer is used."""
if not _is_project_file(build_sh_file, 'build.sh'):
return True
with open(build_sh_file) as build_sh:
build_sh_lines = build_sh.readlines()
for line_num, line in enumerate(build_sh_lines):
uncommented_code = line.split('#')[0]
if '-lFuzzingEngine' in uncommented_code:
print(
'Error: build.sh contains deprecated "-lFuzzingEngine" on line: {0}. '
'Please use "$LIB_FUZZING_ENGINE" instead.'.format(line_num))
return False
return True
def check_lib_fuzzing_engine(paths):
"""Calls _check_one_lib_fuzzing_engine on each path in |paths|. Returns True
if the result of every call is True."""
return all([_check_one_lib_fuzzing_engine(path) for path in paths])
class ProjectYamlChecker:
"""Checks for a project.yaml file."""
# Sections in a project.yaml and the constant values that they are allowed
# to have.
SECTIONS_AND_CONSTANTS = {
'sanitizers': constants.SANITIZERS,
'architectures': constants.ARCHITECTURES,
'fuzzing_engines': constants.ENGINES,
}
# Note: this list must be updated when we allow new sections.
VALID_SECTION_NAMES = [
'architectures',
'auto_ccs',
'blackbox',
'builds_per_day',
'coverage_extra_args',
'disabled',
'fuzzing_engines',
'help_url',
'homepage',
'language',
'labels', # For internal use only, hard to lint as it uses fuzzer names.
'main_repo',
'primary_contact',
'run_tests',
'sanitizers',
'selective_unpack',
'vendor_ccs',
'view_restrictions',
]
# Note that some projects like boost only have auto-ccs. However, forgetting
# primary contact is probably a mistake.
REQUIRED_SECTIONS = ['primary_contact', 'main_repo']
def __init__(self, filename):
self.filename = filename
with open(filename) as file_handle:
self.data = yaml.safe_load(file_handle)
self.success = True
def do_checks(self):
"""Does all project.yaml checks. Returns True if they pass."""
if self.is_disabled():
return True
checks = [
self.check_project_yaml_constants,
self.check_required_sections,
self.check_valid_section_names,
self.check_valid_emails,
self.check_valid_language,
self.check_dataflow,
]
for check_function in checks:
check_function()
return self.success
def is_disabled(self):
"""Returns True if this project is disabled."""
return self.data.get('disabled', False)
def error(self, message):
"""Prints an error message and sets self.success to False."""
self.success = False
print('Error in {filename}: {message}'.format(filename=self.filename,
message=message))
def check_dataflow(self):
"""Checks that if "dataflow" is specified in "fuzzing_engines", it is also
specified in "sanitizers", and that if specified in "sanitizers", it is also
specified in "fuzzing_engines". Returns True if this condition is met."""
engines = self.data.get('fuzzing_engines', [])
dfsan_engines = 'dataflow' in engines
sanitizers = self.data.get('sanitizers', [])
dfsan_sanitizers = 'dataflow' in sanitizers
if dfsan_engines and not dfsan_sanitizers:
self.error('"dataflow" only specified in "fuzzing_engines" must also be '
'specified in "sanitizers" or in neither.')
return
if dfsan_sanitizers and not dfsan_engines:
self.error('"dataflow" only specified in "sanitizers" must also be '
'specified in "fuzzing_engines" or in neither.')
return
def check_project_yaml_constants(self):
"""Returns True if certain sections only have certain constant values."""
for section, allowed_constants in self.SECTIONS_AND_CONSTANTS.items():
if section not in self.data:
continue
actual_constants = self.data[section]
for constant in actual_constants:
if isinstance(constant, str):
if constant not in allowed_constants:
self.error(('{constant} (in {section} section) is not a valid '
'constant ({allowed_constants}).').format(
constant=constant,
section=section,
allowed_constants=', '.join(allowed_constants)))
elif isinstance(constant, dict):
# The only alternative value allowed is the experimental flag, i.e.
# `constant == {'memory': {'experimental': True}}`. Do not check the
# experimental flag, but assert that the sanitizer is a valid one.
if (len(constant.keys()) > 1 or
list(constant.keys())[0] not in allowed_constants):
self.error('Not allowed value in the project.yaml: ' +
str(constant))
else:
self.error('Not allowed value in the project.yaml: ' + str(constant))
def check_valid_section_names(self):
"""Returns True if all section names are valid."""
for name in self.data:
if name not in self.VALID_SECTION_NAMES:
self.error('{name} is not a valid section name ({valid_names})'.format(
name=name, valid_names=self.VALID_SECTION_NAMES))
def check_required_sections(self):
"""Returns True if all required sections are in |self.data|."""
for section in self.REQUIRED_SECTIONS:
if section not in self.data:
self.error(section + ' section is missing.')
def check_valid_emails(self):
"""Returns True if emails are valid looking.."""
# Get email addresses.
email_addresses = []
primary_contact = self.data.get('primary_contact')
if primary_contact:
email_addresses.append(primary_contact)
auto_ccs = self.data.get('auto_ccs')
if auto_ccs:
email_addresses.extend(auto_ccs)
# Check that email addresses seem normal.
for email_address in email_addresses:
if '@' not in email_address or '.' not in email_address:
self.error(email_address + ' is an invalid email address.')
def check_valid_language(self):
"""Returns True if the language is specified and valid."""
language = self.data.get('language')
if not language:
self.error('Missing "language" attribute in project.yaml.')
elif language not in constants.LANGUAGES:
self.error(
'"language: {language}" is not supported ({supported}).'.format(
language=language, supported=constants.LANGUAGES))
def _check_one_project_yaml(project_yaml_filename):
"""Does checks on the project.yaml file. Returns True on success."""
if _is_project_file(project_yaml_filename, 'project.yml'):
print(project_yaml_filename, 'must be named project.yaml.')
return False
if not _is_project_file(project_yaml_filename, 'project.yaml'):
return True
checker = ProjectYamlChecker(project_yaml_filename)
return checker.do_checks()
def check_project_yaml(paths):
"""Calls _check_one_project_yaml on each path in |paths|. Returns True if the
result of every call is True."""
return all([_check_one_project_yaml(path) for path in paths])
def do_checks(changed_files):
"""Runs all presubmit checks. Returns False if any fails."""
checks = [
check_license, yapf, lint, check_project_yaml, check_lib_fuzzing_engine
]
# Use a list comprehension here and in other cases where we use all() so that
# we don't quit early on failure. This is more user-friendly since the more
# errors we spit out at once, the less frequently the less check-fix-check
# cycles they need to do.
return all([check(changed_files) for check in checks])
_CHECK_LICENSE_FILENAMES = ['Dockerfile']
_CHECK_LICENSE_EXTENSIONS = [
'.bash',
'.c',
'.cc',
'.cpp',
'.css',
'.Dockerfile',
'.h',
'.htm',
'.html',
'.js',
'.proto',
'.py',
'.sh',
]
THIRD_PARTY_DIR_NAME = 'third_party'
_LICENSE_STRING = 'http://www.apache.org/licenses/LICENSE-2.0'
def check_license(paths):
"""Validates license header."""
if not paths:
return True
success = True
for path in paths:
path_parts = str(path).split(os.sep)
if any(path_part == THIRD_PARTY_DIR_NAME for path_part in path_parts):
continue
filename = os.path.basename(path)
extension = os.path.splitext(path)[1]
if (filename not in _CHECK_LICENSE_FILENAMES and
extension not in _CHECK_LICENSE_EXTENSIONS):
continue
with open(path) as file_handle:
if _LICENSE_STRING not in file_handle.read():
print('Missing license header in file %s.' % str(path))
success = False
return success
def bool_to_returncode(success):
"""Returns 0 if |success|. Otherwise returns 1."""
if success:
print('Success.')
return 0
print('Failed.')
return 1
def is_nonfuzzer_python(path):
"""Returns True if |path| ends in .py."""
return os.path.splitext(path)[1] == '.py' and '/projects/' not in path
def lint(_=None):
"""Runs python's linter on infra. Returns False if it fails linting."""
command = ['python3', '-m', 'pylint', '-j', '0', 'infra']
returncode = subprocess.run(command, check=False).returncode
return returncode == 0
def yapf(paths, validate=True):
"""Does yapf on |path| if it is Python file. Only validates format if
|validate|. Otherwise, formats the file. Returns False if validation or
formatting fails."""
paths = [path for path in paths if is_nonfuzzer_python(path)]
if not paths:
return True
validate_argument = '-d' if validate else '-i'
command = ['yapf', validate_argument, '-p']
command.extend(paths)
returncode = subprocess.run(command, check=False).returncode
return returncode == 0
def get_changed_files():
"""Returns a list of absolute paths of files changed in this git branch."""
branch_commit_hash = subprocess.check_output(
['git', 'merge-base', 'HEAD', 'origin/HEAD']).strip().decode()
diff_commands = [
# Return list of modified files in the commits on this branch.
['git', 'diff', '--name-only', branch_commit_hash + '..'],
# Return list of modified files from uncommitted changes.
['git', 'diff', '--name-only']
]
changed_files = set()
for command in diff_commands:
file_paths = subprocess.check_output(command).decode().splitlines()
for file_path in file_paths:
if not os.path.isfile(file_path):
continue
changed_files.add(file_path)
print('Changed files: {changed_files}'.format(
changed_files=' '.join(changed_files)))
return [os.path.abspath(f) for f in changed_files]
def run_build_tests():
"""Runs build tests because they can't be run in parallel."""
suite_list = [
unittest.TestLoader().discover(os.path.join(_SRC_ROOT, 'infra', 'build'),
pattern='*_test.py'),
]
suite = unittest.TestSuite(suite_list)
print('Running build tests.')
result = unittest.TextTestRunner().run(suite)
return not result.failures and not result.errors
def run_nonbuild_tests(parallel):
"""Runs all tests but build tests. Does them in parallel if |parallel|. The
reason why we exclude build tests is because they use an emulator that
prevents them from being used in parallel."""
# We look for all project directories because otherwise pytest won't run tests
# that are not in valid modules (e.g. "base-images").
relevant_dirs = set()
all_files = get_all_files()
for file_path in all_files:
directory = os.path.dirname(file_path)
relevant_dirs.add(directory)
# Use ignore-glob because ignore doesn't seem to work properly with the way we
# pass directories to pytest.
command = [
'pytest',
'--ignore-glob=infra/build/*',
]
if parallel:
command.extend(['-n', 'auto'])
command += list(relevant_dirs)
print('Running non-build tests.')
# TODO(metzman): Get rid of this once config_utils stops using it.
env = os.environ.copy()
env['CIFUZZ_TEST'] = '1'
return subprocess.run(command, check=False, env=env).returncode == 0
def run_tests(_=None, parallel=False, build_tests=True, nonbuild_tests=True):
"""Runs all unit tests."""
build_success = True
nonbuild_success = True
if nonbuild_tests:
nonbuild_success = run_nonbuild_tests(parallel)
else:
print('Skipping nonbuild tests as specified.')
if build_tests:
build_success = run_build_tests()
else:
print('Skipping build tests as specified.')
return nonbuild_success and build_success
def get_all_files():
"""Returns a list of absolute paths of files in this repo."""
get_all_files_command = ['git', 'ls-files']
output = subprocess.check_output(get_all_files_command).decode().splitlines()
return [os.path.abspath(path) for path in output if os.path.isfile(path)]
def main():
"""Check changes on a branch for common issues before submitting."""
# Get program arguments.
parser = argparse.ArgumentParser(description='Presubmit script for oss-fuzz.')
parser.add_argument('command',
choices=['format', 'lint', 'license', 'infra-tests'],
nargs='?')
parser.add_argument('-a',
'--all-files',
action='store_true',
help='Run presubmit check(s) on all files',
default=False)
parser.add_argument('-p',
'--parallel',
action='store_true',
help='Run tests in parallel.',
default=False)
parser.add_argument('-s',
'--skip-build-tests',
action='store_true',
help='Skip build tests which are slow and must run '
'sequentially.',
default=False)
parser.add_argument('-n',
'--skip-nonbuild-tests',
action='store_true',
help='Only do build tests.',
default=False)
args = parser.parse_args()
if args.all_files:
relevant_files = get_all_files()
else:
relevant_files = get_changed_files()
os.chdir(_SRC_ROOT)
# Do one specific check if the user asked for it.
if args.command == 'format':
success = yapf(relevant_files, False)
return bool_to_returncode(success)
if args.command == 'lint':
success = lint()
return bool_to_returncode(success)
if args.command == 'license':
success = check_license(relevant_files)
return bool_to_returncode(success)
if args.command == 'infra-tests':
success = run_tests(relevant_files,
parallel=args.parallel,
build_tests=(not args.skip_build_tests),
nonbuild_tests=(not args.skip_nonbuild_tests))
return bool_to_returncode(success)
# Do all the checks (but no tests).
success = do_checks(relevant_files)
return bool_to_returncode(success)
if __name__ == '__main__':
sys.exit(main())
|
|
from __future__ import division, print_function
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from train_semisupervised.train import (
PREVIOUS_STATES_DISTANCES,
MODEL_HEIGHT,
MODEL_WIDTH,
MODEL_PREV_HEIGHT,
MODEL_PREV_WIDTH
)
from batching import states_to_batch
from train_semisupervised import models as models_semisupervised
from lib.util import to_variable, to_cuda, to_numpy
from lib import util
from lib import actions as actionslib
from config import Config
import imgaug as ia
from imgaug import augmenters as iaa
import cPickle as pickle
import argparse
import numpy as np
import cv2
import gzip as gz
import torch
import torch.nn.functional as F
import collections
from scipy import misc, ndimage
from skimage import draw
import glob
try:
xrange
except NameError:
xrange = range
def main():
parser = argparse.ArgumentParser(description="Generate frames for a video. This requires a previous recording done via train.py --record='filename'.")
parser.add_argument("--record", default=None, help="Filepath to recording.", required=True)
parser.add_argument("--outdir", default="video_output", help="Output directory name.", required=False)
parser.add_argument("--y", default=False, action="store_true", help="Always let ffmpeg/avconv overwrite existing video files without asking.", required=False)
parser.add_argument("--noredraw", default=False, action="store_true", help="Skip drawing frames for videos for which the target directory already exists.", required=False)
args = parser.parse_args()
assert args.record is not None
if "*" in args.record:
record_fps = glob.glob(args.record)
assert len(record_fps) > 0
for record_fp in record_fps:
fn = os.path.basename(record_fp)
assert fn != ""
fn_root = fn[0:fn.index(".")] #os.path.splitext(fn)
assert fn_root != ""
outdir = os.path.join(args.outdir, fn_root + "-" + str(abs(hash(record_fp)))[0:8])
process(record_fp, outdir, args.y, args.noredraw)
else:
process(args.record, args.outdir, args.y, args.noredraw)
def process(record_fp, outdir_frames, y, noredraw):
if outdir_frames[-1] == "/":
outdir_videos = os.path.dirname(outdir_frames[:-1])
dirname_frames_last = os.path.basename(outdir_frames[:-1])
else:
outdir_videos = os.path.dirname(outdir_frames)
dirname_frames_last = os.path.basename(outdir_frames)
assert os.path.isfile(record_fp)
print("Processing recording '%s'..." % (record_fp,))
print("Writing frames to '%s', videos to '%s' with filename start '%s'..." % (outdir_frames, outdir_videos, dirname_frames_last))
with gz.open(record_fp, "rb") as f:
recording = pickle.load(f)
if os.path.exists(outdir_frames) and noredraw:
print("Video frames were already drawn, not redrawing")
else:
if not os.path.exists(outdir_frames):
print("Target directory for frames does not exist, creating it...")
os.makedirs(outdir_frames)
fd = FrameDrawer(outdir_videos)
for fidx, frames_drawn in enumerate(fd.draw_frames(recording)):
print("Frame %06d of around %06d..." % (fidx, len(recording["frames"])))
frame_plans = frames_drawn[0]
frame_atts = frames_drawn[1]
frame_grids = frames_drawn[2]
if frame_plans is not None:
misc.imsave(os.path.join(outdir_frames, "plans_%06d.jpg" % (fidx,)), frame_plans)
if frame_atts is not None:
misc.imsave(os.path.join(outdir_frames, "atts_%06d.jpg" % (fidx,)), frame_atts)
if frame_grids is not None:
misc.imsave(os.path.join(outdir_frames, "grids_%06d.jpg" % (fidx,)), frame_grids)
#if fidx > 200:
# break
if not os.path.exists(outdir_videos):
print("Target directory for videos does not exist, creating it...")
os.makedirs(outdir_videos)
frame_fps = ["plans_%06d.jpg", "atts_%06d.jpg", "grids_%06d.jpg"]
frame_fps = [os.path.join(outdir_frames, fp) for fp in frame_fps]
video_fps = ["plans.mp4", "atts.mp4", "grids.mp4"]
video_fps = [os.path.join(outdir_videos, "%s-%s" % (dirname_frames_last, fp)) for fp in video_fps]
for frame_fp, video_fp in zip(frame_fps, video_fps):
#os.system('avconv %s -framerate 10 -i "%s" -crf 25 -b:v 2000k -vcodec mpeg4 %s' % ("-y" if y else "", frame_fp, video_fp))
os.system('avconv %s -framerate 10 -i "%s" -crf 25 -b:v 2000k -vcodec h264 %s' % ("-y" if y else "", frame_fp, video_fp))
class FrameDrawer(object):
def __init__(self, outdir):
self.outdir = outdir
checkpoint_supervised = torch.load("../train_semisupervised/train_semisupervised_model_withshortcuts.tar")
embedder_supervised = models_semisupervised.PredictorWithShortcuts()
embedder_supervised.eval()
embedder_supervised.load_state_dict(checkpoint_supervised["predictor_state_dict"])
if Config.GPU >= 0:
embedder_supervised.cuda(Config.GPU)
self.embedder_supervised = embedder_supervised
def draw_frames(self, recording):
previous_states = collections.deque(maxlen=max(PREVIOUS_STATES_DISTANCES))
for frame in recording["frames"]:
scr = util.decompress_img(frame["scr"])
scr = np.clip(scr.astype(np.float32) * 1.5, 0, 255).astype(np.uint8)
current_state = frame["state"]
current_plan_idx = frame["current_plan_idx"]
current_plan_step_idx = frame["current_plan_step_idx"]
idr_v = frame["idr_v"]
idr_adv = frame["idr_adv"]
plan_to_rewards_direct = frame["plan_to_rewards_direct"]
plan_to_reward_indirect = frame["plan_to_reward_indirect"]
plan_to_reward = frame["plan_to_reward"]
plans_ranking = frame["plans_ranking"]
if current_plan_idx is not None:
frame_plans = self.draw_frame_plans(
scr, current_state,
recording["plans"],
current_plan_idx, current_plan_step_idx,
idr_v, idr_adv,
plan_to_rewards_direct, plan_to_reward_indirect, plan_to_reward,
plans_ranking
)
else:
frame_plans = None
if len(previous_states) == previous_states.maxlen:
batch = states_to_batch([list(previous_states)], [[current_state]], iaa.Noop(), PREVIOUS_STATES_DISTANCES, MODEL_HEIGHT, MODEL_WIDTH, MODEL_PREV_HEIGHT, MODEL_PREV_WIDTH)
inputs_supervised = batch.inputs_supervised(volatile=True, gpu=Config.GPU)
x_ae, x_grids, x_atts, x_ma, x_flow, x_canny, x_flipped, x_emb = self.embedder_supervised.forward(inputs_supervised[0], inputs_supervised[1])
frame_attributes = self.draw_frame_attributes(scr, x_atts)
frame_grids = self.draw_frame_grids(scr, x_grids)
else:
frame_attributes = None
frame_grids = None
yield (frame_plans, frame_attributes, frame_grids)
previous_states.append(current_state)
def draw_frame_plans(self, scr, state, plans, current_plan_idx, current_plan_step_idx, idr_v, idr_adv, plan_to_rewards_direct, plan_to_reward_indirect, plan_to_reward, plans_ranking):
mincolf = 0.2
bgcolor = [0, 0, 0]
image = np.zeros((720, 1280, 3), dtype=np.uint8)
scr_main = ia.imresize_single_image(scr, (int(720*0.58), int(1280*0.58)))
util.draw_image(
image,
y=int((image.shape[0]-scr_main.shape[0])/2),
x=1280-scr_main.shape[1]-2,
other_img=scr_main,
copy=False
)
image = util.draw_text(
image,
x=1280-(scr_main.shape[1]//2)-125,
y=image.shape[0] - int((image.shape[0]-scr_main.shape[0])/2) + 10,
text="Framerate matches the one that the model sees (10fps).",
size=10,
color=[128, 128, 128]
)
def draw_key(key):
btn = [
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]
]
btn = np.array(btn, dtype=np.uint8) * 255
btn = np.tile(btn[:, :, np.newaxis], (1, 1, 3))
if key is None:
return np.zeros_like(btn)
elif key == "":
return btn
else:
return util.draw_text(btn, x=3, y=3, text=key, size=9, color=[255, 255, 255])
def multiaction_idx_to_image(multiaction_idx):
#btn = np.pad(btn, ((0, 0), (0, 4), (0, 0)), mode="constant", constant_values=0)
key_to_img = dict()
for key in ["W", "A", "S", "D", None]:
key_to_img[key] = draw_key(key)
multiaction = actionslib.ALL_MULTIACTIONS[multiaction_idx]
sw = 1.0 if multiaction[0] == "W" else mincolf
sa = 1.0 if multiaction[1] == "A" else mincolf
ss = 1.0 if multiaction[0] == "S" else mincolf
sd = 1.0 if multiaction[1] == "D" else mincolf
buttons = [
[key_to_img[None], key_to_img["W"]*sw, key_to_img[None]],
[key_to_img["A"]*sa, key_to_img["S"]*ss, key_to_img["D"]*sd]
]
buttons_img = np.vstack([
np.hstack([btn.astype(np.uint8) for btn in buttons[0]]),
np.hstack([btn.astype(np.uint8) for btn in buttons[1]])
])
buttons_img = np.pad(buttons_img, ((0, 0), (0, 4), (0, 0)), mode="constant", constant_values=0)
return buttons_img
multiaction_idx_to_image_dict = dict([(i, multiaction_idx_to_image(i)) for i in range(len(actionslib.ALL_MULTIACTIONS))])
multiaction_to_image_dict = dict([(ma, multiaction_idx_to_image(i)) for i, ma in enumerate(actionslib.ALL_MULTIACTIONS)])
def plan_to_image(p_multiactions, p_direct_rewards, p_v, padding_bottom=8, minwidth=200):
plan_viz = [multiaction_to_image_dict[ma] for ma in p_multiactions]
#plan_viz = [np.pad(a, ((0, 20), (2, 2), (0, 0)), mode="constant", constant_values=0) for a in plan_viz]
plan_viz = [np.pad(a, ((0, 20), (0, 1), (0, 0)), mode="constant", constant_values=0) for a in plan_viz]
if p_direct_rewards is not None:
for j in xrange(len(plan_viz)):
#plan_viz[j] = util.draw_text(plan_viz[j], x=9, y=plan_viz[j].shape[0]-16, text="r", size=9, color=[128, 128, 128])
plan_viz[j] = util.draw_text(plan_viz[j], x=11, y=plan_viz[j].shape[0]-13, text="r %.1f" % (p_direct_rewards[j],), size=9, color=[128, 128, 128])
if p_v is not None:
plan_viz.append(np.zeros_like(plan_viz[-1]))
#plan_viz[-1] = util.draw_text(plan_viz[-1], x=3, y=5, text="V", size=9, color=[128, 128, 128])
#plan_viz[-1] = util.draw_text(plan_viz[-1], x=9, y=11, text="V %.1f" % (p_v,), size=9, color=[255, 255, 255])
plan_viz[-1] = util.draw_text(plan_viz[-1], x=5, y=16, text="V %.1f" % (p_v,), size=9, color=[255, 255, 255])
plan_viz = np.hstack(plan_viz)
width_extend = minwidth - plan_viz.shape[1] if plan_viz.shape[1] < minwidth else 0
#print("width_extend", width_extend, minwidth, plan_viz.shape[0])
plan_viz = np.pad(plan_viz, ((0, padding_bottom), (0, width_extend), (0, 0)), mode="constant", constant_values=0)
return plan_viz
# -------------
# current plan
# -------------
current_plan_viz = plan_to_image(
plans[current_plan_idx][current_plan_step_idx:],
None, None
)
#current_plan_viz = np.pad(current_plan_viz, ((50, 0), (20, 0), (0, 0)), mode="constant", constant_values=0)
current_plan_viz = np.pad(current_plan_viz, ((50, 0), (2, 0), (0, 0)), mode="constant", constant_values=0)
current_plan_viz = util.draw_text(current_plan_viz, x=4, y=4, text="Current Plan", color=[255, 255, 255])
util.draw_image(image, y=10, x=10, other_img=current_plan_viz, copy=False)
# -------------
# best plans
# -------------
best_plans_viz = []
for i in range(4):
plan_idx = plans_ranking[::-1][i]
plan = plans[plan_idx]
r = plan_to_rewards_direct[plan_idx]
v = plan_to_reward_indirect[plan_idx]
plan_viz = plan_to_image(plan, r, v)
best_plans_viz.append(plan_viz)
best_plans_viz = np.vstack(best_plans_viz)
#best_plans_viz = np.pad(best_plans_viz, ((50, 30), (20, 0), (0, 0)), mode="constant", constant_values=0)
best_plans_viz = np.pad(best_plans_viz, ((50, 30), (2, 0), (0, 0)), mode="constant", constant_values=0)
best_plans_viz = util.draw_text(best_plans_viz, x=4, y=4, text="Best Plans", color=[255, 255, 255])
best_plans_viz = util.draw_text(best_plans_viz, x=30, y=best_plans_viz.shape[0]-20, text="r = expected direct reward at timestep (discounted)\nV = expected indirect reward at last timestep (discounted)", color=[128, 128, 128], size=9)
util.draw_image(image, y=110, x=10, other_img=best_plans_viz, copy=False)
# --------------
# top15
# --------------
n = 15
top_viz = []
counts_ud = dict([(action, 0) for action in actionslib.ACTIONS_UP_DOWN])
counts_lr = dict([(action, 0) for action in actionslib.ACTIONS_LEFT_RIGHT])
for i in range(n):
plan_idx = plans_ranking[::-1][i]
plan = plans[plan_idx]
for ma in plan:
counts_ud[ma[0]] += 1
counts_lr[ma[1]] += 1
sum_ud = np.sum(list(counts_ud.values()))
sum_lr = np.sum(list(counts_lr.values()))
fracs_ud = [counts_ud["W"]/sum_ud, counts_ud["S"]/sum_ud, counts_ud["~WS"]/sum_ud]
fracs_lr = [counts_lr["A"]/sum_lr, counts_lr["D"]/sum_lr, counts_lr["~AD"]/sum_lr]
def draw_bar(frac, key, h=30, w=20, margin_right=15):
bar = np.zeros((h, 1), dtype=np.uint8) + 32
bar[0:int(h*frac)+1] = 255
bar = np.flipud(bar)
bar = np.tile(bar[:, :, np.newaxis], (1, w, 3))
bar = np.pad(bar, ((20, 30), (0, margin_right), (0, 0)), mode="constant", constant_values=0)
textx = 5
if frac*100 >= 10:
textx = textx - 3
elif frac*100 >= 100:
textx = textx - 6
bar = ia.draw_text(bar, x=textx, y=2, text="%.0f%%" % (frac*100,), size=8, color=[255, 255, 255])
keyimg = draw_key(key)
util.draw_image(bar, x=(w//2)-keyimg.shape[1]//2, y=bar.shape[0]-keyimg.shape[0]-8, other_img=keyimg, copy=False)
return bar
bars_ud = [draw_bar(fracs_ud[0], "W"), draw_bar(fracs_ud[1], "S"), draw_bar(fracs_ud[2], "", margin_right=55)]
bars_lr = [draw_bar(fracs_lr[0], "A"), draw_bar(fracs_lr[1], "D"), draw_bar(fracs_lr[2], "")]
top_viz = np.hstack(bars_ud + bars_lr)
top_viz = np.pad(top_viz, ((50, 30), (20, 180), (0, 0)), mode="constant", constant_values=0)
top_viz = util.draw_text(top_viz, x=4, y=4, text="Share Of Keys (Top %d Plans)" % (n,), color=[255, 255, 255])
top_viz = util.draw_text(top_viz, x=4, y=top_viz.shape[0]-20, text="Percent of actions among top %d plans that contain a top/down or left/right key" % (n,), color=[128, 128, 128], size=9)
util.draw_image(image, y=430, x=10, other_img=top_viz, copy=False)
# --------------
# other
# --------------
other_viz = np.zeros((300, 500, 3), dtype=np.uint8)
other_viz = util.draw_text(other_viz, x=4, y=4, text="Speed", color=[255, 255, 255])
other_viz = util.draw_text(other_viz, x=150, y=4, text="Steering Wheel", color=[255, 255, 255])
other_viz = util.draw_text(other_viz, x=12, y=65, text="%d km/h" % (state.speed if state.speed is not None else -1), color=[255, 255, 255])
sw_angle = state.steering_wheel_cnn if state.steering_wheel_cnn is not None else 0
sw_circle = np.zeros((80, 80, 3), dtype=np.int32)
if sw_angle <= -360 or sw_angle >= 360:
rr, cc = draw.circle(r=40, c=40, radius=30)
sw_circle[rr, cc, :] = 128
col = [128, 128, 128] if -360 < sw_angle < 360 else [255, 255, 255]
if abs(sw_angle % 360) > 1:
if sw_angle < 0:
sw_circle = util.draw_direction_circle(
sw_circle,
y=40, x=40,
r_inner=0, r_outer=30,
angle_start=360-(abs(int(sw_angle)) % 360), angle_end=360,
color_border=col,
color_fill=col
#color_fill=[255,0,0]
)
#sw_circle = util.draw_text(sw_circle, x=5, y=5, text="%.2f\n%.2f" % (abs(int(sw_angle)) % 360, 360-(abs(int(sw_angle)) % 360)), size=12, color=[255, 255, 255])
else:
sw_circle = util.draw_direction_circle(
sw_circle,
y=40, x=40,
r_inner=0, r_outer=30,
angle_start=0, angle_end=int(sw_angle) % 360,
color_border=col,
color_fill=col
#color_fill=[0,255,0]
)
rr, cc, val = draw.circle_perimeter_aa(40, 40, radius=30)
#sw_circle[rr, cc, :] = sw_circle[rr, cc, :] + np.tile((val * 255)[:,:,np.newaxis], (1, 1, 3))
sw_circle[rr, cc, :] += np.tile((val * 255).astype(np.int32)[:,np.newaxis], (1, 3))
sw_circle = np.clip(sw_circle, 0, 255).astype(np.uint8)
sw_circle = np.pad(sw_circle, ((0, 0), (0, 140), (0, 0)), mode="constant", constant_values=0)
sw_circle = util.draw_text(sw_circle, x=92, y=27, text="%d deg" % (sw_angle,), color=[255, 255, 255])
util.draw_image(other_viz, x=150, y=40, other_img=sw_circle, copy=False)
util.draw_image(image, y=590, x=10, other_img=other_viz, copy=False)
return image
def draw_frame_attributes(self, scr, atts):
atts = atts[0]
mincolf = 0.2
#print("space_front raw", atts[33:37], F.softmax(atts[33:37]))
#print("space_left raw", atts[37:41], F.softmax(atts[37:41]))
#print("space_right raw", atts[41:45], F.softmax(atts[41:45].unsqueeze(0)).squeeze(0))
road_type = simplesoftmax(to_numpy(atts[0:10]))
intersection = simplesoftmax(to_numpy(atts[10:17]))
direction = simplesoftmax(to_numpy(atts[17:20]))
lane_count = simplesoftmax(to_numpy(atts[20:25]))
curve = simplesoftmax(to_numpy(atts[25:33]))
space_front = simplesoftmax(to_numpy(atts[33:37]))
space_left = simplesoftmax(to_numpy(atts[37:41]))
space_right = simplesoftmax(to_numpy(atts[41:45]))
offroad = simplesoftmax(to_numpy(atts[45:48]))
bgcolor = [0, 0, 0]
image = np.zeros((720, 1280, 3), dtype=np.uint8) + bgcolor
scr_main = ia.imresize_single_image(scr, (int(720*0.58), int(1280*0.58)))
util.draw_image(
image,
y=int((image.shape[0]-scr_main.shape[0])/2),
x=1280-scr_main.shape[1]-2,
other_img=scr_main,
copy=False
)
image = util.draw_text(
image,
x=1280-(scr_main.shape[1]//2)-125,
y=image.shape[0] - int((image.shape[0]-scr_main.shape[0])/2) + 10,
text="Framerate matches the one that the model sees (10fps).",
size=10,
color=[128, 128, 128]
)
# ---------------
# Curve
# ---------------
"""
street = np.zeros((65, 65, 3), dtype=np.float32)
street[:, 0:2, :] = 255
street[:, -2:, :] = 255
street[:, 32:35, :] = 255
street_left_strong = curve(street
"""
curve_left_strong = 255 - ndimage.imread("../images/video/curve-left-strong.png", mode="RGB")
curve_left_medium = 255 - ndimage.imread("../images/video/curve-left-medium.png", mode="RGB")
curve_left_slight = 255 - ndimage.imread("../images/video/curve-left-slight.png", mode="RGB")
curve_straight = 255 - ndimage.imread("../images/video/curve-straight.png", mode="RGB")
curve_right_strong = np.fliplr(curve_left_strong)
curve_right_medium = np.fliplr(curve_left_medium)
curve_right_slight = np.fliplr(curve_left_slight)
curve_straight = (curve_straight * np.clip(curve[0], mincolf, 1.0)).astype(np.uint8)
curve_left_slight = (curve_left_slight * np.clip(curve[1], mincolf, 1.0)).astype(np.uint8)
curve_left_medium = (curve_left_medium * np.clip(curve[2], mincolf, 1.0)).astype(np.uint8)
curve_left_strong = (curve_left_strong * np.clip(curve[3], mincolf, 1.0)).astype(np.uint8)
curve_right_slight = (curve_right_slight * np.clip(curve[4], mincolf, 1.0)).astype(np.uint8)
curve_right_medium = (curve_right_medium * np.clip(curve[5], mincolf, 1.0)).astype(np.uint8)
curve_right_strong = (curve_right_strong * np.clip(curve[6], mincolf, 1.0)).astype(np.uint8)
def add_perc(curve_img, perc, x_correct):
col = np.clip(255 * perc, mincolf*255, 255)
col = np.array([col, col, col], dtype=np.uint8)
curve_img_pad = np.pad(curve_img, ((0, 20), (0, 0), (0, 0)), mode="constant", constant_values=0)
x = int(curve_img_pad.shape[1]/2) - 6
if (perc*100) >= 100:
x = x - 9
elif (perc*100) >= 10:
x = x - 6
x = x + x_correct
curve_img_pad = util.draw_text(
curve_img_pad,
x=x,
y=curve_img_pad.shape[0]-15,
text="%.0f%%" % (perc*100,),
color=col,
size=9
)
return curve_img_pad
curve_straight = add_perc(curve_straight, curve[0], x_correct=0)
curve_left_slight = add_perc(curve_left_slight, curve[1], x_correct=3)
curve_left_medium = add_perc(curve_left_medium, curve[2], x_correct=1)
curve_left_strong = add_perc(curve_left_strong, curve[3], x_correct=-1)
curve_right_slight = add_perc(curve_right_slight, curve[4], x_correct=-3)
curve_right_medium = add_perc(curve_right_medium, curve[5], x_correct=-2)
curve_right_strong = add_perc(curve_right_strong, curve[6], x_correct=0)
curves = np.hstack([
curve_left_strong, curve_left_medium, curve_left_slight,
curve_straight,
curve_right_slight, curve_right_medium, curve_right_strong
])
curves = np.pad(curves, ((50, 0), (20, 0), (0, 0)), mode="constant", constant_values=0)
curves = util.draw_text(curves, x=4, y=4, text="Curve", color=[255, 255, 255])
util.draw_image(image, y=50, x=2, other_img=curves, copy=False)
# ---------------
# Lane count
# ---------------
pics = []
for lc_idx in range(4):
col = int(np.clip(255*lane_count[lc_idx], 255*mincolf, 255))
col = np.array([col, col, col], dtype=np.uint8)
lc = lc_idx + 1
marking_width = 2
street = np.zeros((64, 64, 3), dtype=np.float32)
street[:, 0:marking_width, :] = col
street[:, -marking_width:, :] = col
inner_width = street.shape[1] - 2*marking_width
lane_width = int((inner_width - (lc-1)*marking_width) // lc)
start = marking_width
for i in range(lc-1):
mstart = start + lane_width
mend = mstart + marking_width
street[1::6, mstart:mend, :] = col
street[2::6, mstart:mend, :] = col
street[3::6, mstart:mend, :] = col
start = mend
x = 14 + 24
if lane_count[lc_idx]*100 >= 10:
x = x - 8
elif lane_count[lc_idx]*100 >= 100:
x = x - 12
street = np.pad(street, ((0, 20), (14, 14), (0, 0)), mode="constant", constant_values=0)
street = util.draw_text(street, x=x, y=street.shape[0]-14, text="%.0f%%" % (lane_count[lc_idx]*100,), size=9, color=col)
pics.append(street)
pics = np.hstack(pics)
pics = np.pad(pics, ((55, 0), (20, 0), (0, 0)), mode="constant", constant_values=0)
pics = util.draw_text(pics, x=4, y=4, text="Lane Count", color=[255, 255, 255])
util.draw_image(image, y=250, x=2, other_img=pics, copy=False)
# ---------------
# Space
# ---------------
truck = np.zeros((100, 55, 3), dtype=np.uint8)
truck[0:2, :, :] = 255
truck[0:20, 0:2, :] = 255
truck[0:20, -2:, :] = 255
truck[20:22, :, :] = 255
truck[22:25, 25:27, :] = 255
truck[22:25, 29:31, :] = 255
truck[24:26, :, :] = 255
truck[24:, 0:2, :] = 255
truck[24:, -2:, :] = 255
truck[24:, -2:, :] = 255
truck[-2:, :, :] = 255
truck_full = np.pad(truck, ((50, 50), (100, 50), (0, 0)), mode="constant", constant_values=np.average(bgcolor))
#print("space_front", space_front)
#print("space_right", space_right)
#print("space_left", space_left)
fill_top = 1 * space_front[0] + 0.6 * space_front[1] + 0.25 * space_front[2] + 0 * space_front[3]
fill_right = 1 * space_right[0] + 0.6 * space_right[1] + 0.25 * space_right[2] + 0 * space_right[3]
fill_left = 1 * space_left[0] + 0.6 * space_left[1] + 0.25 * space_left[2] + 0 * space_left[3]
r_outer_top = 8 + int((30-8) * fill_top)
r_outer_right = 8 + int((30-8) * fill_right)
r_outer_left = 8 + int((30-8) * fill_left)
def fill_to_text(fill):
col = np.array([255, 255, 255], dtype=np.uint8)
if fill > 0.75:
text = "plenty"
elif fill > 0.5:
text = "some"
elif fill > 0.25:
text = "low"
else:
text = "minimal"
return text, col
#top
truck_full = util.draw_direction_circle(
truck_full,
y=33, x=100+27,
r_inner=8, r_outer=30,
angle_start=-60, angle_end=60,
color_border=[255, 255, 255],
color_fill=[0, 0, 0]
)
truck_full = util.draw_direction_circle(
truck_full,
y=33, x=100+27,
r_inner=8, r_outer=r_outer_top,
angle_start=-60, angle_end=60,
color_border=[255, 255, 255],
color_fill=[255, 255, 255]
)
#text, col = fill_to_text(fill_top)
#truck_full = util.draw_text(truck_full, x=100+27, y=15, text=text, size=9, color=col)
# right
truck_full = util.draw_direction_circle(
truck_full,
y=100, x=170,
r_inner=8, r_outer=30,
angle_start=30, angle_end=180-30,
color_border=[255, 255, 255],
color_fill=[0, 0, 0]
)
truck_full = util.draw_direction_circle(
truck_full,
y=100, x=170,
r_inner=8, r_outer=r_outer_right,
angle_start=30, angle_end=180-30,
color_border=[255, 255, 255],
color_fill=[255, 255, 255]
)
#text, col = fill_to_text(fill_right)
#truck_full = util.draw_text(truck_full, x=170, y=100, text=text, size=9, color=col)
# left
truck_full = util.draw_direction_circle(
truck_full,
y=100, x=83,
r_inner=8, r_outer=30,
angle_start=180+30, angle_end=360-30,
color_border=[255, 255, 255],
color_fill=[0, 0, 0]
)
truck_full = util.draw_direction_circle(
truck_full,
y=100, x=83,
r_inner=8, r_outer=r_outer_left,
angle_start=180+30, angle_end=360-30,
color_border=[255, 255, 255],
color_fill=[255, 255, 255]
)
#text, col = fill_to_text(fill_left)
#truck_full = util.draw_text(truck_full, x=75, y=100, text=text, size=9, color=col)
truck_full = np.pad(truck_full, ((50, 0), (110, 0), (0, 0)), mode="constant", constant_values=0)
truck_full = util.draw_text(truck_full, x=4, y=4, text="Space", color=[255, 255, 255])
util.draw_image(image, y=450, x=10, other_img=truck_full, copy=False)
return image
def draw_frame_grids(self, scr, grids):
grids_meta = [
(0, "street boundaries"),
(3, "crashables (except cars)"),
(7, "street markings"),
(4, "current lane"),
(1, "cars"),
(2, "cars in mirrors")
]
titles = [title for idx, title in grids_meta]
grids = to_numpy(grids[0])
grids = [grids[idx] for idx, title in grids_meta]
#self.grid_to_graph(scr, grids[0])
bgcolor = [0, 0, 0]
image = np.zeros((720, 1280, 3), dtype=np.uint8) + bgcolor
scr_main = ia.imresize_single_image(scr, (int(720*0.58), int(1280*0.58)))
#util.draw_image(image, y=720-scr_main.shape[0], x=1080-scr_main.shape[1], other_img=scr_main, copy=False)
util.draw_image(
image,
y=int((image.shape[0]-scr_main.shape[0])/2),
x=1280-scr_main.shape[1]-2,
other_img=scr_main,
copy=False
)
image = util.draw_text(
image,
x=1280-(scr_main.shape[1]//2)-125,
y=image.shape[0] - int((image.shape[0]-scr_main.shape[0])/2) + 10,
text="Framerate matches the one that the model sees (10fps).",
size=10,
color=[128, 128, 128]
)
grid_rel_size = 0.19
scr_small = ia.imresize_single_image(scr, (int(720*grid_rel_size), int(1280*grid_rel_size)))
grid_hms = []
for grid, title in zip(grids, titles):
grid = (grid*255).astype(np.uint8)[:,:,np.newaxis]
grid = ia.imresize_single_image(grid, (int(720*grid_rel_size), int(1280*grid_rel_size)), interpolation="nearest")
grid_hm = util.draw_heatmap_overlay(scr_small, grid/255)
grid_hm = np.pad(grid_hm, ((2, 0), (2, 2), (0, 0)), mode="constant", constant_values=np.average(bgcolor))
#grid_hm = np.pad(grid_hm, ((0, 20), (0, 0), (0, 0)), mode="constant", constant_values=0)
#grid_hm[-20:, 2:-2, :] = [128, 128, 255]
#grid_hm = util.draw_text(grid_hm, x=4, y=grid_hm.shape[0]-16, text=title, size=10, color=[255, 255, 255])
grid_hm = np.pad(grid_hm, ((40, 0), (0, 0), (0, 0)), mode="constant", constant_values=0)
grid_hm = util.draw_text(grid_hm, x=4, y=20, text=title, size=12, color=[255, 255, 255])
grid_hms.append(grid_hm)
grid_hms = ia.draw_grid(grid_hms, cols=2)
util.draw_image(image, y=70, x=0, other_img=grid_hms, copy=False)
return image
def tsalesman(self, graph):
if len(graph) <= 2:
return graph
paths = []
for _ in xrange(1000):
ids = list(range(len(graph)))
np.random.shuffle(ids)
path = [graph[idx] for idx in ids]
paths.append(path)
def length_edge(node1, node2):
d = math.sqrt((node1[0]-node2[0])**2 + (node1[1]-node2[1])**2)
return d
def length_path(path):
length_sum = length_edge(path[0], path[-1])
for i in xrange(1, len(path)):
length_sum += length_edge(path[i-1], path[i])
return length_sum
paths_l = [(path, length_path(path)) for path in paths]
paths = sorted(paths_l, key=lambda t: t[1])
return paths[0][0]
def grid_to_graph(self, scr, grid):
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
data = grid
neighborhood_size = 7
#threshold_max = 0.5
threshold_diff = 0.1
threshold_score = 0.2
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
#diff = ((data_max - data_min) > threshold_diff)
#maxima[diff == 0] = 0
maxima[data_max < 0.2] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
xx, yy, score = [], [], []
for dy, dx in slices:
x_center = (dx.start + dx.stop - 1)/2
y_center = (dy.start + dy.stop - 1)/2
s = np.average(data[dy.start:dy.stop+1, dx.start:dx.stop+1])
if s > threshold_score:
xx.append(x_center / grid.shape[1])
yy.append(y_center / grid.shape[0])
score.append(s)
graph = list(zip(xx, yy, score))
path = tsalesman(graph)
paths_final = [path]
scr_viz = np.copy(scr)
h, w = scr.shape[0:2]
#hup, wup = h/grid.shape[0], w/grid.shape[1]
hup, wup = h, w
for i, (x, y, s) in enumerate(zip(xx, yy, score)):
size = 3*int(s*10)
size = size if size % 2 != 0 else size - 1
scr_viz = util.draw_point(scr_viz, y=int(y*hup), x=int(x*wup), size=size, color=[0, 255, 0])
scr_viz = util.draw_text(scr_viz, y=int(y*hup), x=int(x*wup), text=str(i), color=[0, 255, 0])
colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 255], [0, 0, 0]]
for path, col in zip(paths_final, colors):
last_x = None
last_y = None
for (x, y, s) in path:
if last_x is not None:
scr_viz = util.draw_line(scr_viz, y1=int(last_y*hup), x1=int(last_x*wup), y2=int(y*hup), x2=int(x*wup), color=col, thickness=2)
last_x = x
last_y = y
misc.imshow(scr_viz)
"""
paths_final = []
graph = list(zip(xx, yy, score))
for _ in range(1):
paths_final_flat = flatten_list(paths_final)
print("paths_final_flat", paths_final_flat)
source_candidates = [(i, (x, y, s)) for i, (x, y, s) in enumerate(graph) if (x, y, s) not in paths_final_flat]
if len(source_candidates) == 0:
break
else:
#print("source_candidates", source_candidates)
#source_score = max([s for (i, (x, y, s)) in source_candidates])
#print("source_score", source_score)
#source_id = [i for (i, (x, y, s)) in source_candidates if s == source_score][0]
source_val = min([x for (i, (x, y, s)) in source_candidates])
source_id = [i for (i, (x, y, s)) in source_candidates if x == source_val][0]
print("source_id", source_id)
_, _, paths = self.dijkstra(graph, source_id, already_done=[i for i, (x, y, s) in enumerate(paths_final_flat)])
if len(paths) == 0:
break
else:
print("paths", paths)
#best_path = sorted(paths, key=lambda t: -t[1]+t[2], reverse=True)[0]
best_path = sorted(paths, key=lambda t: t[2], reverse=True)[0]
best_path = best_path[0]
print("best_path ids", best_path)
best_path = [graph[idx] for idx in best_path]
print("best_path", best_path)
paths_final.append(best_path)
paths_final = [path for path in paths_final if len(path) > 1]
scr_viz = np.copy(scr)
h, w = scr.shape[0:2]
#hup, wup = h/grid.shape[0], w/grid.shape[1]
hup, wup = h, w
for i, (x, y, s) in enumerate(zip(xx, yy, score)):
size = 3*int(s*10)
size = size if size % 2 != 0 else size - 1
scr_viz = util.draw_point(scr_viz, y=int(y*hup), x=int(x*wup), size=size, color=[0, 255, 0])
scr_viz = util.draw_text(scr_viz, y=int(y*hup), x=int(x*wup), text=str(i), color=[0, 255, 0])
colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 255], [0, 0, 0]]
for path, col in zip(paths_final, colors):
last_x = None
last_y = None
for (x, y, s) in path:
if last_x is not None:
scr_viz = util.draw_line(scr_viz, y1=int(last_y*hup), x1=int(last_x*wup), y2=int(y*hup), x2=int(x*wup), color=col, thickness=2)
last_x = x
last_y = y
misc.imshow(scr_viz)
"""
#misc.imshow(np.hstack([scr, scr_viz]))
"""
def shortest(self, graph):
edges = []
def dijkstra(self, graph, source_id, distance_threshold=0.5, already_done=None):
already_done = set() if already_done is None else set(already_done)
id_to_vertex = dict([(i, v) for i, v in enumerate(graph)])
vertex_to_id = dict([(v, i) for i, v in enumerate(graph)])
graph_ids = [i for i, _ in enumerate(graph)]
def length(id1, id2):
d = (graph[id1][0]-graph[id2][0])**2 + (graph[id1][1]-graph[id2][1])**2
if id1 in already_done or id2 in already_done:
d = d + 0.2
d = d / (0.5*(id_to_vertex[id1][2] + id_to_vertex[id2][2]))
print("length", id1, id2, d)
return d
def neighbours(id1):
n = []
for id2, v in id_to_vertex.items():
#print(id1, id2, length(id1, id2))
if id1 != id2 and length(id1, id2) < distance_threshold:
n.append(id2)
return n
def mindist(dist, Q):
mindist_val = 999999
mindist_id = -1
for vid in Q:
if dist[vid] < mindist_val:
mindist_val = mindist_val
mindist_id = vid
return mindist_id
dist = dict()
prev = dict()
Q = set()
for vid in graph_ids:
dist[vid] = 999999
prev[vid] = None
Q.add(vid)
dist[source_id] = 0
prev[source_id] = None
while len(Q) > 0:
uid = mindist(dist, Q)
print("do", uid)
if uid == -1:
print(Q)
print(dist)
if uid == -1:
break
else:
Q.remove(uid)
for vid in neighbours(uid):
alt = dist[uid] + length(uid, vid)
print("neighbour %d -> %d | d=%.2f" % (uid, vid, alt))
if alt < dist[vid]:
print("closer!")
dist[vid] = alt
prev[vid] = uid
print("dist", dist)
print("prev", prev)
# paths
ps = []
for i, v in enumerate(graph):
last_node_id = i
p = [last_node_id]
sum_dist = 0
count_nodes = 1
while True:
curr_node_id = prev[last_node_id]
if curr_node_id is None:
break
else:
p.append(curr_node_id)
count_nodes += 1
sum_dist += length(last_node_id, curr_node_id)
last_node_id = curr_node_id
ps.append((p, sum_dist, count_nodes))
print("ps", ps)
ps = [p for p in ps if p[0][-1] == source_id]
print("ps red", ps)
return dist, prev, ps
"""
def flatten_list(l):
return [item for sublist in l for item in sublist]
def simplesoftmax(l):
s = np.sum(l)
if s > 0:
return l/s
else:
return np.zeros_like(s)
if __name__ == "__main__":
main()
|
|
from dateutil.parser import parse as dtparse
import locale
locale.setlocale(locale.LC_ALL, '')
class ReleaseNoteTxt(object):
def __init__(self, changes):
self.changes = changes
#pprint(self.changes)
def save(self, filepath):
try:
import prettytable
except ImportError:
raise ImportError(
"Please install prettytable to use this rendered")
def format_number(n, sign=None):
s = ""
if sign:
if n > 0:
s = "+"
elif n < 0:
s = "-"
try:
n = abs(n)
strn = "%s%s" % (s, locale.format("%d", n, grouping=True))
except TypeError:
# something wrong with converting, maybe we don't even have a number to format...
strn = "N.A"
return strn
txt = ""
title = "Build version: '%s'" % self.changes["new"]["_version"]
txt += title + "\n"
txt += "".join(["="] * len(title)) + "\n"
dt = dtparse(self.changes["generated_on"])
txt += "Previous build version: '%s'\n" % self.changes["old"][
"_version"]
txt += "Generated on: %s\n" % dt.strftime("%Y-%m-%d at %H:%M:%S")
txt += "\n"
table = prettytable.PrettyTable([
"Updated datasource", "prev. release", "new release",
"prev. # of docs", "new # of docs"
])
table.align["Updated datasource"] = "l"
table.align["prev. release"] = "c"
table.align["new release"] = "c"
table.align["prev. # of docs"] = "r"
table.align["new # of docs"] = "r"
for src, info in sorted(self.changes["sources"]["added"].items(),
key=lambda e: e[0]):
main_info = dict([(k, v) for k, v in info.items()
if k.startswith("_")])
sub_infos = dict([(k, v) for k, v in info.items()
if not k.startswith("_")])
if sub_infos:
for sub, sub_info in sub_infos.items():
table.add_row([
"%s.%s" % (src, sub), "-", main_info["_version"], "-",
format_number(sub_info["_count"])
]) # only _count avail there
else:
main_count = main_info.get("_count") and format_number(
main_info["_count"]) or ""
table.add_row(
[src, "-",
main_info.get("_version", ""), "-", main_count])
for src, info in sorted(self.changes["sources"]["deleted"].items(),
key=lambda e: e[0]):
main_info = dict([(k, v) for k, v in info.items()
if k.startswith("_")])
sub_infos = dict([(k, v) for k, v in info.items()
if not k.startswith("_")])
if sub_infos:
for sub, sub_info in sub_infos.items():
table.add_row([
"%s.%s" % (src, sub),
main_info.get("_version", ""), "-",
format_number(sub_info["_count"]), "-"
]) # only _count avail there
else:
main_count = main_info.get("_count") and format_number(
main_info["_count"]) or ""
table.add_row(
[src,
main_info.get("_version", ""), "-", main_count, "-"])
for src, info in sorted(self.changes["sources"]["updated"].items(),
key=lambda e: e[0]):
# extract information from main-source
old_main_info = dict([(k, v) for k, v in info["old"].items()
if k.startswith("_")])
new_main_info = dict([(k, v) for k, v in info["new"].items()
if k.startswith("_")])
old_main_count = old_main_info.get("_count") and format_number(
old_main_info["_count"]) or None
new_main_count = new_main_info.get("_count") and format_number(
new_main_info["_count"]) or None
if old_main_count is None:
assert new_main_count is None, "Sub-sources found for '%s', old and new count should " % src + \
"both be None. Info was: %s" % info
old_sub_infos = dict([(k, v) for k, v in info["old"].items()
if not k.startswith("_")])
new_sub_infos = dict([(k, v) for k, v in info["new"].items()
if not k.startswith("_")])
# old & new sub_infos should have the same structure (same existing keys)
# so we just use one of them to explore
if old_sub_infos:
assert new_sub_infos
for sub, sub_info in old_sub_infos.items():
table.add_row([
"%s.%s" % (src, sub),
old_main_info.get("_version", ""),
new_main_info.get("_version", ""),
format_number(sub_info["_count"]),
format_number(new_sub_infos[sub]["_count"])
])
else:
assert new_main_count is not None, "No sub-sources found, old and new count should NOT " + \
"both be None. Info was: %s" % info
table.add_row([
src,
old_main_info.get("_version", ""),
new_main_info.get("_version", ""), old_main_count,
new_main_count
])
if table._rows:
txt += table.get_string()
txt += "\n"
else:
txt += "No datasource changed.\n"
total_count = self.changes["new"].get("_count")
if self.changes["sources"]["added"]:
txt += "New datasource(s): %s\n" % ", ".join(
sorted(list(self.changes["sources"]["added"])))
if self.changes["sources"]["deleted"]:
txt += "Deleted datasource(s): %s\n" % ", ".join(
sorted(list(self.changes["sources"]["deleted"])))
if self.changes["sources"]:
txt += "\n"
table = prettytable.PrettyTable(["Updated stats.", "previous", "new"])
table.align["Updated stats."] = "l"
table.align["previous"] = "r"
table.align["new"] = "r"
for stat_name, stat in sorted(self.changes["stats"]["added"].items(),
key=lambda e: e[0]):
table.add_row([stat_name, "-", format_number(stat["_count"])])
for stat_name, stat in sorted(self.changes["stats"]["deleted"].items(),
key=lambda e: e[0]):
table.add_row([stat_name, format_number(stat["_count"]), "-"])
for stat_name, stat in sorted(self.changes["stats"]["updated"].items(),
key=lambda e: e[0]):
table.add_row([
stat_name,
format_number(stat["old"]["_count"]),
format_number(stat["new"]["_count"])
])
if table._rows:
txt += table.get_string()
txt += "\n\n"
if self.changes["new"]["_fields"]:
new_fields = sorted(self.changes["new"]["_fields"].get("add", []))
deleted_fields = self.changes["new"]["_fields"].get("remove", [])
updated_fields = self.changes["new"]["_fields"].get("replace", [])
if new_fields:
txt += "New field(s): %s\n" % ", ".join(new_fields)
if deleted_fields:
txt += "Deleted field(s): %s\n" % ", ".join(deleted_fields)
if updated_fields:
txt += "Updated field(s): %s\n" % ", ".join(updated_fields)
txt += "\n"
if total_count is not None:
txt += "Overall, %s documents in this release\n" % (
format_number(total_count))
if self.changes["new"]["_summary"]:
sumups = []
sumups.append(
"%s document(s) added" %
format_number(self.changes["new"]["_summary"].get("add", 0)))
sumups.append("%s document(s) deleted" % format_number(
self.changes["new"]["_summary"].get("delete", 0)))
sumups.append("%s document(s) updated" % format_number(
self.changes["new"]["_summary"].get("update", 0)))
txt += ", ".join(sumups) + "\n"
else:
txt += "No information available for added/deleted/updated documents\n"
if self.changes.get("note"):
txt += "\n"
txt += "Note: %s\n" % self.changes["note"]
with open(filepath, "w") as fout:
fout.write(txt)
return txt
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend sinertalers received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a sinertalerd or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SIN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the sinertaler data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.sinertaler")
def read_sinertaler_config(dbdir):
"""Read the sinertaler.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "sinertaler.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a sinertaler JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 49876 if testnet else 39876
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the sinertalerd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(sinertalerd):
info = sinertalerd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
sinertalerd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = sinertalerd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(sinertalerd):
address_summary = dict()
address_to_account = dict()
for info in sinertalerd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = sinertalerd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = sinertalerd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-sinertaler-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(sinertalerd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(sinertalerd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SIN available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to sinertalerd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = sinertalerd.createrawtransaction(inputs, outputs)
signed_rawtx = sinertalerd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(sinertalerd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = sinertalerd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(sinertalerd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = sinertalerd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(sinertalerd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get sinertalers from")
parser.add_option("--to", dest="to", default=None,
help="address to get send sinertalers to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of sinertaler.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_sinertaler_config(options.datadir)
if options.testnet: config['testnet'] = True
sinertalerd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(sinertalerd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(sinertalerd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(sinertalerd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(sinertalerd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = sinertalerd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
#
# Copyright 2014 Red Hat
#
# Author: Chris Dent <chdent@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Converters for producing hardware sensor data sample messages from
notification events.
"""
from oslo.config import cfg
from oslo import messaging
from ceilometer.openstack.common import log
from ceilometer import plugin
from ceilometer import sample
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('ironic_exchange',
default='ironic',
help='Exchange name for Ironic notifications.'),
]
cfg.CONF.register_opts(OPTS)
# Map unit name to SI
UNIT_MAP = {
'Watts': 'W',
'Volts': 'V',
}
def validate_reading(data):
"""Some sensors read "Disabled"."""
return data != 'Disabled'
def transform_id(data):
return data.lower().replace(' ', '_')
def parse_reading(data):
try:
volume, unit = data.split(' ', 1)
unit = unit.rsplit(' ', 1)[-1]
return float(volume), UNIT_MAP.get(unit, unit)
except ValueError:
raise InvalidSensorData('unable to parse sensor reading: %s' %
data)
def parse_event_reading(key,real_data):
try:
element_array=real_data[key].split('|')
if element_array[5].strip(' ').lower()=='asser':
volume=1
else:
if element_array[5].strip(' ').lower()=='deasser':
volume=0
else:
volume=2
return volume
except ValueError:
raise InvalidSensorData('unable to parse sensor reading: %s' %
data)
class InvalidSensorData(ValueError):
pass
class SensorNotification(plugin.NotificationBase):
"""A generic class for extracting samples from sensor data notifications.
A notification message can contain multiple samples from multiple
sensors, all with the same basic structure: the volume for the sample
is found as part of the value of a 'Sensor Reading' key. The unit
is in the same value.
Subclasses exist solely to allow flexibility with stevedore configuration.
"""
event_types = ['hardware.ipmi.*']
metric = None
@staticmethod
def get_targets(conf):
"""oslo.messaging.TargetS for this plugin."""
return [messaging.Target(topic=topic,
exchange=conf.ironic_exchange)
for topic in conf.notification_topics]
def _get_sample(self, message):
try:
return (payload for _, payload
in message['payload'][self.metric].items())
except KeyError:
return []
def _package_event_payload(self, message, payload):
# NOTE(chdent): How much of the payload should we keep?
payload['node'] = message['payload']['node_uuid']
info = {'publisher_id': message['publisher_id'],
'timestamp': message['payload']['timestamp'],
'event_type': message['payload']['event_type'],
'user_id': message['payload'].get('user_id'),
'project_id': message['payload'].get('project_id'),
'payload': payload}
return info
def _package_payload(self, message, payload):
# NOTE(chdent): How much of the payload should we keep?
payload['node'] = message['payload']['node_uuid']
info = {'publisher_id': message['publisher_id'],
'timestamp': message['payload']['timestamp'],
'event_type': message['payload']['event_type'],
'user_id': message['payload'].get('user_id'),
'project_id': message['payload'].get('project_id'),
'payload': payload}
return info
def process_notification(self, message):
"""Read and process a notification.
The guts of a message are in dict value of a 'payload' key
which then itself has a payload key containing a dict of
multiple sensor readings.
If expected keys in the payload are missing or values
are not in the expected form for transformations,
KeyError and ValueError are caught and the current
sensor payload is skipped.
"""
payloads = self._get_sample(message['payload'])
if self.metric =="sel":
message_payload=message['payload']
name=message_payload['event_type']
short_name=name.split(".")[-1]
unit="NA"
this_uuid=message_payload['node_uuid']
real_message=message_payload['payload']
event_key_list=real_message[short_name].keys()
for each in event_key_list:
volume= parse_event_reading(each,real_message[short_name])
resource_id = '%(nodeid)s-%(evenname)s' % {'nodeid':this_uuid,'evenname':each}
info=self._package_event_payload(message,real_message)
yield sample.Sample.from_notification(
name=name,
type=sample.TYPE_GAUGE,
unit=unit,
volume=volume,
resource_id=resource_id,
message=info,
user_id=info['user_id'],
project_id=info['project_id'])
continue
else:
for payload in payloads:
try:
# Provide a fallback resource_id in case parts are missing.
print payload
resource_id = 'missing id'
try:
resource_id = '%(nodeid)s-%(sensorid)s' % {
'nodeid': message['payload']['node_uuid'],
'sensorid': transform_id(payload['Sensor ID'])
}
except KeyError as exc:
raise InvalidSensorData('missing key in payload: %s' % exc)
info = self._package_payload(message, payload)
try:
sensor_reading = info['payload']['Sensor Reading']
except KeyError as exc:
raise InvalidSensorData(
"missing 'Sensor Reading' in payload"
)
if validate_reading(sensor_reading):
volume, unit = parse_reading(sensor_reading)
yield sample.Sample.from_notification(
name='hardware.ipmi.%s' % self.metric.lower(),
type=sample.TYPE_GAUGE,
unit=unit,
volume=volume,
resource_id=resource_id,
message=info,
user_id=info['user_id'],
project_id=info['project_id'])
except InvalidSensorData as exc:
LOG.warn(
'invalid sensor data for %(resource)s: %(error)s' %
dict(resource=resource_id, error=exc)
)
continue
class TemperatureSensorNotification(SensorNotification):
metric = 'Temperature'
class CurrentSensorNotification(SensorNotification):
metric = 'Current'
class FanSensorNotification(SensorNotification):
metric = 'Fan'
class VoltageSensorNotification(SensorNotification):
metric = 'Voltage'
class SelNotification(SensorNotification):
metric = 'sel'
|
|
#!/usr/bin/python
# (c) 2016, Marcin Skarbek <github@skarbek.name>
# (c) 2016, Andreas Olsson <andreas@arrakis.se>
# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
#
# This module was ported from https://github.com/mskarbek/ansible-nsupdate
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsupdate
short_description: Manage DNS records.
description:
- Create, update and remove DNS records using DDNS updates
- DDNS works well with both bind and Microsoft DNS (see https://technet.microsoft.com/en-us/library/cc961412.aspx)
version_added: "2.3"
requirements:
- dnspython
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Manage DNS record.
choices: ['present', 'absent']
default: 'present'
server:
description:
- Apply DNS modification on this server.
required: true
key_name:
description:
- Use TSIG key name to authenticate against DNS C(server)
key_secret:
description:
- Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
key_algorithm:
description:
- Specify key algorithm used by C(key_secret).
choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hamc-sha384',
'hmac-sha512']
default: 'hmac-md5'
zone:
description:
- DNS record will be modified on this C(zone).
required: true
record:
description:
- Sets the DNS record to modify.
required: true
type:
description:
- Sets the record type.
default: 'A'
ttl:
description:
- Sets the record TTL.
default: 3600
value:
description:
- Sets the record value.
default: None
'''
EXAMPLES = '''
- name: Add or modify ansible.example.org A to 192.168.1.1"
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "ansible"
value: "192.168.1.1"
- name: Remove puppet.example.org CNAME
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "puppet"
type: "CNAME"
state: absent
'''
RETURN = '''
changed:
description: If module has modified record
returned: success
type: string
record:
description: DNS record
returned: success
type: string
sample: 'ansible'
ttl:
description: DNS record TTL
returned: success
type: int
sample: 86400
type:
description: DNS record type
returned: success
type: string
sample: 'CNAME'
value:
description: DNS record value
returned: success
type: string
sample: '192.168.1.1'
zone:
description: DNS record zone
returned: success
type: string
sample: 'example.org.'
dns_rc:
description: dnspython return code
returned: always
type: int
sample: 4
dns_rc_str:
description: dnspython return code (string representation)
returned: always
type: string
sample: 'REFUSED'
'''
from binascii import Error as binascii_error
from socket import error as socket_error
try:
import dns.update
import dns.query
import dns.tsigkeyring
import dns.message
import dns.resolver
HAVE_DNSPYTHON = True
except ImportError:
HAVE_DNSPYTHON = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class RecordManager(object):
def __init__(self, module):
self.module = module
if module.params['zone'][-1] != '.':
self.zone = module.params['zone'] + '.'
else:
self.zone = module.params['zone']
if module.params['key_name']:
try:
self.keyring = dns.tsigkeyring.from_text({
module.params['key_name']: module.params['key_secret']
})
except TypeError:
module.fail_json(msg='Missing key_secret')
except binascii_error as e:
module.fail_json(msg='TSIG key error: %s' % to_native(e))
else:
self.keyring = None
if module.params['key_algorithm'] == 'hmac-md5':
self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
else:
self.algorithm = module.params['key_algorithm']
self.dns_rc = 0
def __do_update(self, update):
response = None
try:
response = dns.query.tcp(update, self.module.params['server'], timeout=10)
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
return response
def create_or_update_record(self):
result = {'changed': False, 'failed': False}
exists = self.record_exists()
if exists in [0, 2]:
if self.module.check_mode:
self.module.exit_json(changed=True)
if exists == 0:
self.dns_rc = self.create_record()
if self.dns_rc != 0:
result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
elif exists == 2:
self.dns_rc = self.modify_record()
if self.dns_rc != 0:
result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
if self.dns_rc != 0:
result['failed'] = True
else:
result['changed'] = True
else:
result['changed'] = False
return result
def create_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
try:
update.add(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
self.module.params['value'])
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def modify_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.replace(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
self.module.params['value'])
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def remove_record(self):
result = {'changed': False, 'failed': False}
if self.record_exists() == 0:
return result
# Check mode and record exists, declared fake change.
if self.module.check_mode:
self.module.exit_json(changed=True)
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.delete(self.module.params['record'], self.module.params['type'])
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc != 0:
result['failed'] = True
result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
else:
result['changed'] = True
return result
def record_exists(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
try:
update.present(self.module.params['record'], self.module.params['type'])
except dns.rdatatype.UnknownRdatatype as e:
self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
if self.module.params['state'] == 'absent':
return 1
try:
update.present(self.module.params['record'], self.module.params['type'], self.module.params['value'])
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
return 1
else:
return 2
else:
return 0
def main():
tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
'hmac-sha256', 'hamc-sha384', 'hmac-sha512']
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
server=dict(required=True, type='str'),
key_name=dict(required=False, type='str'),
key_secret=dict(required=False, type='str', no_log=True),
key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
zone=dict(required=True, type='str'),
record=dict(required=True, type='str'),
type=dict(required=False, default='A', type='str'),
ttl=dict(required=False, default=3600, type='int'),
value=dict(required=False, default=None, type='str')
),
supports_check_mode=True
)
if not HAVE_DNSPYTHON:
module.fail_json(msg='python library dnspython required: pip install dnspython')
if len(module.params["record"]) == 0:
module.fail_json(msg='record cannot be empty.')
record = RecordManager(module)
result = {}
if module.params["state"] == 'absent':
result = record.remove_record()
elif module.params["state"] == 'present':
result = record.create_or_update_record()
result['dns_rc'] = record.dns_rc
result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
if result['failed']:
module.fail_json(**result)
else:
result['record'] = dict(zone=record.zone,
record=module.params['record'],
type=module.params['type'],
ttl=module.params['ttl'],
value=module.params['value'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 Hamilton Kibbe <ham@hamiltonkib.be>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CAM File
============
**AM file classes**
This module provides common base classes for Excellon/Gerber CNC files
"""
class FileSettings(object):
""" CAM File Settings
Provides a common representation of gerber/excellon file settings
Parameters
----------
notation: string
notation format. either 'absolute' or 'incremental'
units : string
Measurement units. 'inch' or 'metric'
zero_suppression: string
'leading' to suppress leading zeros, 'trailing' to suppress trailing zeros.
This is the convention used in Gerber files.
format : tuple (int, int)
Decimal format
zeros : string
'leading' to include leading zeros, 'trailing to include trailing zeros.
This is the convention used in Excellon files
Notes
-----
Either `zeros` or `zero_suppression` should be specified, there is no need to
specify both. `zero_suppression` will take on the opposite value of `zeros`
and vice versa
"""
def __init__(self, notation='absolute', units='inch',
zero_suppression=None, format=(2, 5), zeros=None,
angle_units='degrees'):
if notation not in ['absolute', 'incremental']:
raise ValueError('Notation must be either absolute or incremental')
self.notation = notation
if units not in ['inch', 'metric']:
raise ValueError('Units must be either inch or metric')
self.units = units
if zero_suppression is None and zeros is None:
self.zero_suppression = 'trailing'
elif zero_suppression == zeros:
raise ValueError('Zeros and Zero Suppression must be different. \
Best practice is to specify only one.')
elif zero_suppression is not None:
if zero_suppression not in ['leading', 'trailing']:
# This is a common problem in Eagle files, so just suppress it
self.zero_suppression = 'leading'
else:
self.zero_suppression = zero_suppression
elif zeros is not None:
if zeros not in ['leading', 'trailing']:
raise ValueError('Zeros must be either leading or trailling')
self.zeros = zeros
if len(format) != 2:
raise ValueError('Format must be a tuple(n=2) of integers')
self.format = format
if angle_units not in ('degrees', 'radians'):
raise ValueError('Angle units may be degrees or radians')
self.angle_units = angle_units
@property
def zero_suppression(self):
return self._zero_suppression
@zero_suppression.setter
def zero_suppression(self, value):
self._zero_suppression = value
self._zeros = 'leading' if value == 'trailing' else 'trailing'
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, value):
self._zeros = value
self._zero_suppression = 'leading' if value == 'trailing' else 'trailing'
def __getitem__(self, key):
if key == 'notation':
return self.notation
elif key == 'units':
return self.units
elif key == 'zero_suppression':
return self.zero_suppression
elif key == 'zeros':
return self.zeros
elif key == 'format':
return self.format
elif key == 'angle_units':
return self.angle_units
else:
raise KeyError()
def __setitem__(self, key, value):
if key == 'notation':
if value not in ['absolute', 'incremental']:
raise ValueError('Notation must be either \
absolute or incremental')
self.notation = value
elif key == 'units':
if value not in ['inch', 'metric']:
raise ValueError('Units must be either inch or metric')
self.units = value
elif key == 'zero_suppression':
if value not in ['leading', 'trailing']:
raise ValueError('Zero suppression must be either leading or \
trailling')
self.zero_suppression = value
elif key == 'zeros':
if value not in ['leading', 'trailing']:
raise ValueError('Zeros must be either leading or trailling')
self.zeros = value
elif key == 'format':
if len(value) != 2:
raise ValueError('Format must be a tuple(n=2) of integers')
self.format = value
elif key == 'angle_units':
if value not in ('degrees', 'radians'):
raise ValueError('Angle units may be degrees or radians')
self.angle_units = value
else:
raise KeyError('%s is not a valid key' % key)
def __eq__(self, other):
return (self.notation == other.notation and
self.units == other.units and
self.zero_suppression == other.zero_suppression and
self.format == other.format and
self.angle_units == other.angle_units)
def __str__(self):
return ('<Settings: %s %s %s %s %s>' %
(self.units, self.notation, self.zero_suppression, self.format, self.angle_units))
class CamFile(object):
""" Base class for Gerber/Excellon files.
Provides a common set of settings parameters.
Parameters
----------
settings : FileSettings
The current file configuration.
primitives : iterable
List of primitives in the file.
filename : string
Name of the file that this CamFile represents.
layer_name : string
Name of the PCB layer that the file represents
Attributes
----------
settings : FileSettings
File settings as a FileSettings object
notation : string
File notation setting. May be either 'absolute' or 'incremental'
units : string
File units setting. May be 'inch' or 'metric'
zero_suppression : string
File zero-suppression setting. May be either 'leading' or 'trailling'
format : tuple (<int>, <int>)
File decimal representation format as a tuple of (integer digits,
decimal digits)
"""
def __init__(self, statements=None, settings=None, primitives=None,
filename=None, layer_name=None):
if settings is not None:
self.notation = settings['notation']
self.units = settings['units']
self.zero_suppression = settings['zero_suppression']
self.zeros = settings['zeros']
self.format = settings['format']
else:
self.notation = 'absolute'
self.units = 'inch'
self.zero_suppression = 'trailing'
self.zeros = 'leading'
self.format = (2, 5)
self.statements = statements if statements is not None else []
if primitives is not None:
self.primitives = primitives
self.filename = filename
self.layer_name = layer_name
@property
def settings(self):
""" File settings
Returns
-------
settings : FileSettings (dict-like)
A FileSettings object with the specified configuration.
"""
return FileSettings(self.notation, self.units, self.zero_suppression,
self.format)
@property
def bounds(self):
""" File boundaries
"""
pass
@property
def bounding_box(self):
pass
def to_inch(self):
pass
def to_metric(self):
pass
def render(self, ctx=None, invert=False, filename=None):
""" Generate image of layer.
Parameters
----------
ctx : :class:`GerberContext`
GerberContext subclass used for rendering the image
filename : string <optional>
If provided, save the rendered image to `filename`
"""
if ctx is None:
from .render import GerberCairoContext
ctx = GerberCairoContext()
ctx.set_bounds(self.bounding_box)
ctx.paint_background()
ctx.invert = invert
ctx.new_render_layer()
for p in self.primitives:
ctx.render(p)
ctx.flatten()
if filename is not None:
ctx.dump(filename)
|
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v2 import volume_metadata
from cinder.api.v2 import volumes
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.volume import api as volume_api
CONF = cfg.CONF
def return_create_volume_metadata_max(context, volume_id, metadata, delete):
return stub_max_volume_metadata()
def return_create_volume_metadata(context, volume_id, metadata, delete):
return stub_volume_metadata()
def return_new_volume_metadata(context, volume_id, metadata, delete):
return stub_new_volume_metadata()
def return_create_volume_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_volume_metadata_insensitive()
def return_volume_metadata(context, volume_id):
if not isinstance(volume_id, str) or not len(volume_id) == 36:
msg = 'id %s must be a uuid in return volume metadata' % volume_id
raise Exception(msg)
return stub_volume_metadata()
def return_empty_volume_metadata(context, volume_id):
return {}
def return_empty_container_metadata(context, volume_id, metadata, delete):
return {}
def delete_volume_metadata(context, volume_id, key):
pass
def stub_volume_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_new_volume_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_volume_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_max_volume_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_volume(context, volume_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'metadata': {}}
def return_volume_nonexistent(context, volume_id):
raise exception.VolumeNotFound('bogus test message')
def fake_update_volume_metadata(self, context, volume, diff):
pass
class volumeMetaDataTest(test.TestCase):
def setUp(self):
super(volumeMetaDataTest, self).setUp()
self.volume_api = volume_api.API()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(db, 'volume_get', return_volume)
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(self.volume_api, 'update_volume_metadata',
fake_update_volume_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.volume_controller = volumes.VolumeController(self.ext_mgr)
self.controller = volume_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/volumes/%s/metadata' % self.req_id
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"metadata": {}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.volume_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_volume(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_volume(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'volume_metadata_delete',
delete_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3", }}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get',
return_volume_nonexistent)
self.stubs.Set(db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v2/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(db, 'volume_metadata_get',
return_create_volume_metadata)
self.stubs.Set(db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(db, 'volume_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get', return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_volume(self):
self.stubs.Set(db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/volumes/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
from collections import defaultdict
from .common import error_info
from .common import fail, warn
from .env import env
from .errors import Errors, Warnings
from .scope import Scope
from .ir import CONST, TEMP, MOVE
from .irvisitor import IRVisitor, IRTransformer
from .type import Type
from .typecheck import TypePropagation
class PortTypeProp(TypePropagation):
def visit_NEW(self, ir):
if ir.func_scope().is_port():
assert self.scope.is_ctor() and self.scope.parent.is_module()
attrs = {}
ctor = ir.func_scope().find_ctor()
for (_, a), p in zip(ir.args, ctor.params[1:]):
if a.is_a(CONST):
if p.copy.name == 'direction':
di = self._normalize_direction(a.value)
if not di:
fail(self.current_stm,
Errors.UNKNOWN_X_IS_SPECIFIED,
['direction', a.value])
attrs[p.copy.name] = di
else:
attrs[p.copy.name] = a.value
elif a.is_a(TEMP) and a.symbol().typ.is_class():
attrs[p.copy.name] = a.symbol().typ.name
else:
fail(self.current_stm, Errors.PORT_PARAM_MUST_BE_CONST)
assert len(ir.func_scope().type_args) == 1
assert 'direction' in attrs
attrs['dtype'] = ir.func_scope().type_args[0]
attrs['root_symbol'] = self.current_stm.dst.symbol()
if self.current_stm.is_a(MOVE) and self.current_stm.dst.is_a(TEMP):
attrs['port_kind'] = 'internal'
else:
if attrs['direction'] == 'input' or attrs['direction'] == 'output':
attrs['port_kind'] = 'external'
else:
attrs['port_kind'] = 'internal'
if 'protocol' not in attrs:
attrs['protocol'] = 'none'
if 'init' not in attrs or attrs['init'] is None:
attrs['init'] = 0
port_typ = Type.port(ir.func_scope(), attrs)
#port_typ.freeze()
ir.func_scope().return_type = port_typ
return ir.func_scope().return_type
def _normalize_direction(self, di):
if di == 'in' or di == 'input' or di == 'i':
return 'input'
elif di == 'out' or di == 'output' or di == 'o':
return 'output'
elif di == 'any' or not di:
return 'any'
return ''
def visit_CALL(self, ir):
if ir.func_scope().is_method() and ir.func_scope().parent.is_port():
sym = ir.func.tail()
assert sym.typ.is_port()
kind = sym.typ.get_port_kind()
root = sym.typ.get_root_symbol()
port_owner = root.scope
# if port is a local variable, we modify the port owner its parent
if port_owner.is_method():
port_owner = port_owner.parent
if self.scope.is_worker():
scope = self.scope.worker_owner
elif self.scope.is_method():
scope = self.scope.parent
else:
scope = self.scope
if not scope.is_subclassof(port_owner) and kind == 'internal':
fail(self.current_stm, Errors.PORT_ACCESS_IS_NOT_ALLOWED)
return super().visit_CALL(ir)
class PortConverter(IRTransformer):
def __init__(self):
super().__init__()
self.writers = defaultdict(set)
self.readers = defaultdict(set)
def process_all(self):
scopes = Scope.get_scopes(with_class=True)
modules = [s for s in scopes if s.is_module()]
if not modules:
return
typeprop = PortTypeProp()
for m in modules:
if not m.is_instantiated():
continue
ctor = m.find_ctor()
assert ctor
typeprop.process(ctor)
for w, args in m.workers:
typeprop.process(w)
for caller in env.depend_graph.preds(m):
if caller.is_namespace():
continue
typeprop.process(caller)
self.union_ports = defaultdict(set)
self.process(ctor)
for w, args in m.workers:
self.process(w)
for caller in env.depend_graph.preds(m):
if caller.is_namespace():
continue
self.process(caller)
# check for instance variable port
for field in m.class_fields().values():
if field.typ.is_port() and field not in self.readers and field not in self.writers:
if not env.depend_graph.preds(m):
continue
assert ctor.usedef
stm = ctor.usedef.get_stms_defining(field).pop()
warn(stm, Warnings.PORT_IS_NOT_USED,
[field.orig_name()])
# check for local variable port
for sym in ctor.symbols.values():
if sym.typ.is_port() and sym not in self.readers and sym not in self.writers:
assert ctor.usedef
stms = ctor.usedef.get_stms_defining(sym)
# This symbol might not be used (e.g. ancestor symbol),
# so we have to check if its definition statement exists.
if stms:
warn(list(stms)[0], Warnings.PORT_IS_NOT_USED,
[sym.orig_name()])
def _set_and_check_port_direction(self, expected_di, sym):
port_typ = sym.typ
rootsym = port_typ.get_root_symbol()
di = port_typ.get_direction()
kind = port_typ.get_port_kind()
if kind == 'external':
if di == 'any':
port_typ.set_port_kind('internal')
port_typ.set_direction('inout')
elif di != expected_di:
if sym.ancestor and sym.ancestor.scope is not sym.scope:
# the port has been accessed as opposite direction
# by a module includes original owner module
port_typ.set_port_kind('internal')
port_typ.set_direction('inout')
else:
fail(self.current_stm, Errors.DIRECTION_IS_CONFLICTED,
[sym.orig_name()])
elif kind == 'internal':
if self.scope.is_worker():
port_typ.set_direction('inout')
else:
fail(self.current_stm, Errors.DIRECTION_IS_CONFLICTED,
[sym.orig_name()])
if expected_di == 'output':
# write-write conflict
if self.writers[rootsym]:
assert len(self.writers[rootsym]) == 1
writer = list(self.writers[rootsym])[0]
if writer is not self.scope and writer.worker_owner is self.scope.worker_owner:
fail(self.current_stm, Errors.WRITING_IS_CONFLICTED,
[sym.orig_name()])
else:
if kind == 'internal':
assert self.scope.is_worker() or self.scope.parent.is_module()
self.writers[rootsym].add(self.scope)
elif expected_di == 'input':
# read-read conflict
if self.readers[rootsym]:
if all([s.is_testbench() for s in self.readers[rootsym]]):
if self.scope.is_testbench():
self.readers[rootsym].add(self.scope)
elif port_typ.get_scope().name.startswith('polyphony.io.Port') and port_typ.get_protocol() == 'none':
pass
else:
assert len(self.readers[rootsym]) == 1
reader = list(self.readers[rootsym])[0]
if reader is not self.scope and reader.worker_owner is self.scope.worker_owner:
fail(self.current_stm, Errors.READING_IS_CONFLICTED,
[sym.orig_name()])
else:
if kind == 'internal':
assert self.scope.is_worker() or self.scope.parent.is_module()
self.readers[rootsym].add(self.scope)
def _get_port_owner(self, sym):
assert sym.typ.is_port()
root = sym.typ.get_root_symbol()
if root.scope.is_ctor():
return root.scope.parent
else:
return root.scope
def _check_port_direction(self, sym, func_scope):
if func_scope.name.startswith('polyphony.io.Queue'):
if func_scope.orig_name in ('wr', 'full'):
expected_di = 'output'
else:
expected_di = 'input'
else:
if func_scope.orig_name == 'wr':
expected_di = 'output'
else:
expected_di = 'input'
port_owner = self._get_port_owner(sym)
if ((self.scope.is_worker() and not self.scope.worker_owner.is_subclassof(port_owner)) or
not self.scope.is_worker()):
expected_di = 'output' if expected_di == 'input' else 'input'
if sym in self.union_ports:
for s in self.union_ports[sym]:
self._set_and_check_port_direction(expected_di, s)
else:
self._set_and_check_port_direction(expected_di, sym)
def visit_CALL(self, ir):
if not ir.func_scope().is_lib():
return ir
if ir.func_scope().is_method() and ir.func_scope().parent.is_port():
sym = ir.func.tail()
assert sym.typ.is_port()
self._check_port_direction(sym, ir.func_scope())
if (self.current_stm.block.synth_params['scheduling'] == 'pipeline' and
self.scope.find_region(self.current_stm.block) is not self.scope.top_region()):
root_sym = sym.typ.get_root_symbol()
root_sym.add_tag('pipelined')
return ir
def visit_SYSCALL(self, ir):
if ir.sym.name.startswith('polyphony.timing.wait_'):
if (ir.sym.name == 'polyphony.timing.wait_rising' or
ir.sym.name == 'polyphony.timing.wait_falling'):
ports = ir.args
elif ir.sym.name == 'polyphony.timing.wait_edge':
ports = ir.args[2:]
elif ir.sym.name == 'polyphony.timing.wait_value':
ports = ir.args[1:]
for _, p in ports:
port = p.symbol().typ
assert port.is_port()
di = port.get_direction()
kind = port.get_port_kind()
if kind == 'external':
port_owner = self._get_port_owner(p.symbol())
#port.set_direction('input')
#port.freeze()
if ((self.scope.is_worker() and not self.scope.worker_owner.is_subclassof(port_owner)) or
not self.scope.is_worker()):
if di == 'input':
fail(self.current_stm, Errors.CANNOT_WAIT_INPUT)
else:
if di == 'output':
fail(self.current_stm, Errors.CANNOT_WAIT_OUTPUT)
return ir
def visit_PHI(self, ir):
if ir.var.symbol().typ.is_port():
for arg in ir.args:
self.union_ports[ir.var.symbol()].add(arg.symbol())
super().visit_PHI(ir)
class FlattenPortList(IRTransformer):
def visit_MREF(self, ir):
memsym = ir.mem.symbol()
memtyp = memsym.typ
assert memtyp.is_seq()
elm_t = memtyp.get_element()
if not elm_t.is_object():
return ir
if not elm_t.get_scope().is_port():
return ir
if not ir.offset.is_a(CONST):
return ir
portname = '{}_{}'.format(memsym.name, ir.offset.value)
scope = ir.mem.symbol().scope
portsym = scope.find_sym(portname)
assert portsym
ir.mem.set_symbol(portsym)
return ir.mem
|
|
from __future__ import absolute_import
from __future__ import print_function
import sys, os, yaml, glob
import subprocess
import pandas as pd
import gzip
import re
import string
import shutil
import numpy as np
from matplotlib import pyplot as plt
from nougat import common, align
from nougat.pdf.peakdetect import peakdet
def run(global_config, sample_config):
sorted_libraries_by_insert = common._sort_libraries_by_insert(
sample_config)
sample_config["commands"] = ""
if "tools" in sample_config:
"""If so, execute them one after the other in the specified order
(might not work)"""
for command in sample_config["tools"]:
"""with this I pick up at run time the correct function in the
current module"""
command_fn = getattr(sys.modules[__name__],
"_run_{}".format(command))
"""Update sample config, each command return sample_config and if
necessary it modifies it"""
sample_config = command_fn(global_config, sample_config,
sorted_libraries_by_insert)
else:
#run default pipeline for QC
sample_config = _run_trimmomatic(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_fastqc(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_abyss(global_config, sample_config,
sorted_libraries_by_insert)
with open("{}.nougat".format(sample_config.get("output", "sample")), "w") as f:
yaml.dump(sample_config, f)
def _run_align(global_config, sample_config,sorted_libraries_by_insert):
if "reference" not in sample_config:
print("reference sequence not provided, skypping alignment step.",
"Please provide a reference if you are intrested in aligning the reads",
"against a reference")
return sample_config
if not os.path.exists("alignments"):
os.makedirs("alignments")
os.chdir("alignments")
sorted_libraries_by_insert = align._align_reads(global_config,
sample_config, sorted_libraries_by_insert) # align reads
sorted_alignments_by_insert = align._merge_bam_files(global_config,
sample_config, sorted_libraries_by_insert) # merge alignments
sorted_alignments_by_insert = align.picard_CGbias(global_config,
sample_config,sorted_alignments_by_insert) # compute picard stats
sorted_alignments_by_insert = align.picard_collectInsertSizeMetrics(
global_config, sample_config,sorted_alignments_by_insert)
sorted_alignments_by_insert = align.picard_markDuplicates(global_config,
sample_config,sorted_alignments_by_insert)
os.chdir("..")
sample_config["alignments"] = sorted_alignments_by_insert
return sample_config
def _run_fastqc(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
FastqcFolder = os.path.join(os.getcwd(), "fastqc")
if not os.path.exists(FastqcFolder):
os.makedirs(FastqcFolder)
program=global_config["Tools"]["fastqc"]["bin"]
program_options=global_config["Tools"]["fastqc"]["options"]
for library, libraryInfo in sorted_libraries_by_insert:
command = [program]
for option in program_options:
command.append(option)
read1=libraryInfo["pair1"]
read2=libraryInfo["pair2"]
command.append(read1)
if read2 is not None:
command.append(read2)
common.print_command(command)
sample_config["commands"] += "\n" + common.get_command_str(command)
folder_output_name = os.path.join(FastqcFolder,
os.path.basename(read1).split(".fastq.gz")[0])
if not common.check_dryrun(sample_config) and not \
os.path.exists("{}_fastqc.zip".format(folder_output_name)):
fastq_stdOut = open(os.path.join(FastqcFolder,
"{}_fastqc.stdout".format(library)), "a")
fastq_stdErr = open(os.path.join(FastqcFolder,
"{}_fastqc.stderr".format(library)), "a")
subprocess.call(command, stdout=fastq_stdOut, stderr=fastq_stdErr)
sample_config["fastqc"] = FastqcFolder
return sample_config
def _run_abyss(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
ABySS_Kmer_Folder = os.path.join(os.getcwd(), "abyss_kmer")
if "kmer" not in sample_config:
sys.exit("error in _run_abyss QCcontrol: kmer must be present in \
sample_config.yaml")
kmer = sample_config["kmer"]
if not os.path.exists(ABySS_Kmer_Folder):
os.makedirs(ABySS_Kmer_Folder)
os.chdir(ABySS_Kmer_Folder)
program = global_config["Tools"]["abyss"]["bin"]
program = os.path.join(program, "ABYSS-P")
program_options=global_config["Tools"]["abyss"]["options"]
if "abyss" in sample_config:
program_options=sample_config["abyss"]
threads = 16 # default for UPPMAX
if "threads" in sample_config :
threads = sample_config["threads"]
command = "mpirun -np {} {} ".format(threads, program)
command += "-k {} ".format(kmer)
command += "--coverage-hist=histogram.hist -o preUnitgs.fa"
for library, libraryInfo in sorted_libraries_by_insert:
read1=libraryInfo["pair1"]
read2=libraryInfo["pair2"]
orientation = libraryInfo["orientation"]
if orientation=="innie" or orientation=="outtie":
command += " {} ".format(read1)
if read2 is not None:
command += " {} ".format(read2)
if orientation == "none":
command += " {} ".format(read1)
common.print_command(command)
sample_config["commands"] += "\n" + common.get_command_str(command)
if not common.check_dryrun(sample_config) and not \
os.path.exists("histogram.hist"):
ABySS_Kmer_stdOut = open("ABySS_Kmer_Folder.stdOut", "a")
ABySS_Kmer_stdErr = open("ABySS_Kmer_Folder.stdErr", "a")
returnValue = subprocess.call(command, shell=True, \
stdout=ABySS_Kmer_stdOut, stderr=ABySS_Kmer_stdErr)
if returnValue > 0:
print("ABySS kmer plotting failed: unkwnown reason")
else :
subprocess.call(("rm", "preUnitgs.fa"))
_plotKmerFixed(1,200, kmer, "kmer_coverage_1_200.png")
_plotKmerFixed(1,500, kmer, "kmer_coverage_1_500.png")
_plotKmerFixed(15,200, kmer, "kmer_coverage_15_200.png")
_plotKmerFixed(15,500, kmer, "kmer_coverage_15_500.png")
_plotKmer(kmer, "kmer_coverage.png")
os.chdir("..")
sample_config["abyss"] = ABySS_Kmer_Folder
return sample_config
def _plotKmer(kmer, output_name):
"""Kmer abundance as a single plot, suitable for the report
"""
Kmer_histogram = pd.io.parsers.read_csv("histogram.hist", sep='\t',
header=None)
Kmer_coverage = Kmer_histogram[Kmer_histogram.columns[0]].tolist()
Kmer_count = Kmer_histogram[Kmer_histogram.columns[1]].tolist()
# Not interested in coverage > 5000
kcov = [c for c in Kmer_coverage if c <= 5000]
# Multiply the abundance by a gradient
kcount_gradient = [kcov[i] * Kmer_count[i] for i in range(len(kcov))]
# Lazily drift towards the most spacious area under the curve
# using divide and conquer.
def get_bisect(chunk):
left = chunk[:int(len(chunk)/2)]
right = chunk[int(len(chunk)/2):]
lweight = sum(map(lambda x: x[0] * x[1], left)) / len(left)
rweight = sum(map(lambda x: x[0] * x[1], right)) / len(right)
if lweight > rweight:
return left
else:
return right
# Perform six bisections
cov_count = list(zip(kcov, kcount_gradient))
for i in range(0,6):
try:
cov_count = get_bisect(cov_count)
except ZeroDivisionError: # Already at the leftmost position
pass
xmax = cov_count[-1][0]
# We could always use more space
xmax = int(xmax * 1.3)
ymax = max(kcount_gradient)
# Find the highest peak x > 1. Works 70% of the time.
maxtab, mintab = peakdet(kcount_gradient, 100000.0)
first_peak = list(np.array(maxtab)[:,0])[0]
# Discard x = 0 peak
if first_peak == 0 and maxtab.size > 2:
maxtab = np.delete(maxtab, 0, 0)
peak = np.argmax(maxtab, axis=0)[1]
peak = maxtab[peak][0]
plt.xlim((0, xmax))
plt.ylim((0, ymax))
plt.plot(kcov, kcount_gradient)
plt.vlines(peak, 1, kcount_gradient[peak], colors='r',
linestyles='--')
plt.text(peak, kcount_gradient[peak], str(peak))
plotname = "{}".format(output_name)
plt.savefig(plotname)
plt.clf()
return 0
def _plotKmerFixed(min_limit, max_limit, kmer, output_name):
"""Old kmerplot, kept just in case...
"""
Kmer_histogram = pd.io.parsers.read_csv("histogram.hist", sep='\t',
header=None)
Kmer_coverage = Kmer_histogram[Kmer_histogram.columns[0]].tolist()
Kmer_count = Kmer_histogram[Kmer_histogram.columns[1]].tolist()
Kmer_freq = [Kmer_coverage[i]*Kmer_count[i] for i in \
range(len(Kmer_coverage))]
#coverage peak, disregarding initial peak
kmer_freq_peak = Kmer_freq.index(max(Kmer_freq[min_limit:max_limit]))
kmer_freq_peak_value=max(Kmer_freq[min_limit:max_limit])
xmax = max_limit
ymax = kmer_freq_peak_value + (kmer_freq_peak_value*0.30)
plt.plot(Kmer_coverage, Kmer_freq)
plt.title("K-mer length = {}".format(kmer))
plt.xlim((0,xmax))
plt.ylim((0,ymax))
plt.vlines(kmer_freq_peak, 0, kmer_freq_peak_value, colors='r',
linestyles='--')
plt.text(kmer_freq_peak, kmer_freq_peak_value+2000, str(kmer_freq_peak))
plotname = "{}".format(output_name)
plt.savefig(plotname)
plt.clf()
return 0
def _run_trimmomatic(global_config, sample_config, sorted_libraries_by_insert):
program = global_config["Tools"]["trimmomatic"]["bin"]
program_folder = os.path.dirname(program)
if "adapters" not in sample_config:
sys.exit("running MP pipeline, adapters file to be used in trimming"
"are needed for Trimmomatic. Please specify them"
"in the sample configuration file and rerun")
adapterFile = sample_config["adapters"]
if not os.path.exists(adapterFile):
sys.exit("Trimmomatic cannot be run as adapter file is not specified"
"or points to unknown position: {}".format(adapterFile))
mainDirectory = os.getcwd()
trimmomaticDir = os.path.join(mainDirectory, "Trimmomatic")
if not os.path.exists(trimmomaticDir):
os.makedirs(trimmomaticDir)
os.chdir(trimmomaticDir)
#now I am in running dir, I need to process one by one the libraries
threads = 8
if "threads" in sample_config:
threads = sample_config["threads"]
for library, libraryInfo in sorted_libraries_by_insert:
read1=libraryInfo["pair1"]
read2=libraryInfo["pair2"]
orientation = libraryInfo["orientation"]
if read2 is not None:
read1_baseName = os.path.split(read1)[1].split(".")[0]
read2_baseName = os.path.split(read2)[1].split(".")[0]
output_read1_pair = os.path.join(trimmomaticDir,
"{}.fastq.gz".format(read1_baseName))
output_read1_sing = os.path.join(trimmomaticDir,
"{}_u.fastq.gz".format(read1_baseName))
output_read2_pair = os.path.join(trimmomaticDir,
"{}.fastq.gz".format(read2_baseName))
output_read2_sing = os.path.join(trimmomaticDir,
"{}_u.fastq.gz".format(read2_baseName))
command = ["java", "-jar", program, "PE", "-threads",
"{}".format(threads), "-phred33", read1, read2,
output_read1_pair, output_read1_sing, output_read2_pair,
output_read2_sing,
"ILLUMINACLIP:{}:2:30:10".format(adapterFile),
"LEADING:3", "TRAILING:3", "SLIDINGWINDOW:4:15",
"MINLEN:30"]
common.print_command(command)
sample_config["commands"] += "\n" + common.get_command_str(command)
# do not execute is files have been already gennerated
if not common.check_dryrun(sample_config) and not \
os.path.exists(output_read1_pair):
stdOut = open("{}_trimmomatic.stdOut".format(read1_baseName),
"w")
stdErr = open("{}_trimmomatic.stdErr".format(read1_baseName),
"w")
returnValue = subprocess.call(command, stdout=stdOut,
stderr=stdErr) # run the program
if returnValue != 0:
print("error while running command: {}".format(command))
libraryInfo["pair1"] = output_read1_pair
libraryInfo["pair2"] = output_read2_pair
libraryInfo["trimmomatic"] = os.path.join(trimmomaticDir,
"{}_trimmomatic.stdErr".format(read1_baseName))
os.chdir(mainDirectory)
return sample_config
def _kmergenie_plot(hist_file):
"""Kmergenie outputs pdf plots. We want pngs without resorting to \
imagemagick
TODO: Abstract this to a common plotting function if possible"""
kgenie_hist = pd.io.parsers.read_csv(hist_file, sep=" ", header=0)
kmer_lengths = kgenie_hist[kgenie_hist.columns[0]].tolist()
genomic_kmers = kgenie_hist[kgenie_hist.columns[1]].tolist()
peak_value = max(genomic_kmers)
peak_idx = genomic_kmers.index(peak_value)
best_k = kmer_lengths[peak_idx]
plt.plot(kmer_lengths, genomic_kmers)
plt.title("Best K-mer length: {}".format(best_k))
plt.xlabel("K-mer size")
plt.ylabel("Number of genomic k-mers")
y_margin = (min(genomic_kmers) + peak_value) / 2 * 0.01
y_min = min(genomic_kmers) - y_margin
y_max = peak_value + y_margin
plt.ylim(y_min, y_max)
plt.xlim(min(kmer_lengths) - 5, max(kmer_lengths) + 5)
plt.vlines(best_k, 0, peak_value, colors = "r", linestyles='--')
plt.savefig(hist_file + ".png")
plt.clf()
def _run_kmergenie(global_config, sample_config, sorted_libraries_by_insert):
"""Runs kmergenie to establish a recommended kmer size for assembly"""
maindir = os.getcwd()
kmerdir = os.path.join(maindir, "kmergenie")
if not os.path.exists(kmerdir):
os.makedirs(kmerdir)
os.chdir(kmerdir)
#Write a list of input fastq files for kmergenie
kmer_input = os.path.join(kmerdir,
"{}kmerinput.txt".format(sample_config.get("output","")))
program = global_config["Tools"]["kmergenie"]["bin"]
program_options=global_config["Tools"]["kmergenie"]["options"]
# Could be useful to add --diploid if sample is highly heterozygous
if "kmergenie" in sample_config:
program_options=sample_config["kmergenie"]
threads = "" # Kmergenie will spawn number_of_cores - 1 threads by default
if "threads" in sample_config :
threads = sample_config["threads"]
cmd_list = [program, kmer_input]
for option in filter(None, program_options):
cmd_list.append(option)
if threads:
cmd_list.append("-t {}".format(threads))
command = " ".join(cmd_list)
common.print_command(command)
sample_config["commands"] += "\n" + common.get_command_str(command)
if not common.check_dryrun(sample_config):
with open(kmer_input, "w") as f:
for lib, lib_info in sorted_libraries_by_insert:
f.write(lib_info["pair1"] + "\n")
f.write(lib_info["pair2"] + "\n")
stdOut = open("kmergenie.stdOut", "w")
stdErr = open("kmergenie.stdErr", "w")
returnValue = subprocess.call(cmd_list, stdout=stdOut, stderr=stdErr)
if returnValue != 0:
print("error while running command: {}".format(command))
else:
_kmergenie_plot("histograms.dat")
sample_config["kmergenie"] = kmerdir
os.chdir(maindir)
return sample_config
|
|
import datetime
import hashlib
import json
from bson.objectid import ObjectId
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render
from crits.core.class_mapper import class_from_id, class_from_value
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.crits_mongoengine import create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import user_sources
from crits.core.user_tools import is_user_subscribed
from crits.certificates.certificate import Certificate
from crits.notifications.handlers import remove_user_from_notification
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.acls import CertificateACL
def generate_cert_csv(request):
"""
Generate a CSV file of the Certificate information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Certificate)
return response
def get_certificate_details(md5, user):
"""
Generate the data to render the Certificate details template.
:param md5: The MD5 of the Certificate to get details for.
:type md5: str
:param user: The user requesting this information.
:type user: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(user.username)
cert = Certificate.objects(md5=md5, source__name__in=sources).first()
if not user.check_source_tlp(cert):
cert = None
if not cert:
template = "error.html"
args = {'error': 'Certificate not yet available or you do not have access to view it.'}
else:
cert.sanitize("%s" % user.username)
# remove pending notifications for user
remove_user_from_notification("%s" % user.username, cert.id, 'Certificate')
# subscription
subscription = {
'type': 'Certificate',
'id': cert.id,
'subscribed': is_user_subscribed("%s" % user.username,
'Certificate', cert.id),
}
#objects
objects = cert.sort_objects()
#relationships
relationships = cert.sort_relationships("%s" % user.username, meta=True)
# relationship
relationship = {
'type': 'Certificate',
'value': cert.id
}
#comments
comments = {'comments': cert.get_comments(),
'url_key': md5}
#screenshots
screenshots = cert.get_screenshots(user.username)
# services
service_list = get_supported_services('Certificate')
# analysis results
service_results = cert.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
'service_results': service_results,
"cert": cert,
"CertificateACL": CertificateACL,}
return template, args
def generate_cert_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Certificate
type_ = "certificate"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Certificates",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits-%ss-views-%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits-%ss-views-%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Certificates'",
'text': "'All'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Certificates'",
'text': "'New'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Certificates'",
'text': "'In Progress'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Certificates'",
'text': "'Analyzed'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Certificates'",
'text': "'Deprecated'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Certificate'",
'text': "'Add Certificate'",
'click': "function () {$('#new-certificate').click()}",
},
]
if option == "inline":
return render(request, "jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
)
else:
return render(request, "%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
)
def handle_cert_file(filename, data, source_name, user=None,
description=None, related_md5=None, method='',
reference='', tlp=None, relationship=None, bucket_list=None,
ticket=None, related_id=None, related_type=None,
relationship_type=None):
"""
Add a Certificate.
:param filename: The filename of the Certificate.
:type filename: str
:param data: The filedata of the Certificate.
:type data: str
:param source_name: The source which provided this Certificate.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the Certificate.
:type user: str
:param description: Description of the Certificate.
:type description: str
:param related_md5: MD5 of a top-level object related to this Certificate.
:type related_md5: str
:param related_type: The CRITs type of the related top-level object.
:type related_type: str
:param method: The method of acquiring this Certificate.
:type method: str
:param reference: A reference to the source of this Certificate.
:type reference: str
:param tlp: The TLP for this certificate.
:type tlp: str
:param relationship: The relationship between the parent and the Certificate.
:type relationship: str
:param bucket_list: Bucket(s) to add to this Certificate
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this Certificate
:type ticket: str(comma separated) or list.
:param related_id: ID of object to create relationship with
:type related_id: str
:param related_type: Type of object to create relationship with
:type related_id: str
:param relationship_type: Type of relationship to create.
:type relationship_type: str
:returns: dict with keys:
'success' (boolean),
'message' (str),
'md5' (str) if successful.
"""
if not data:
status = {
'success': False,
'message': 'No data object passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
if ((related_type and not (related_id or related_md5)) or
(not related_type and (related_id or related_md5))):
status = {
'success': False,
'message': 'Must specify both related_type and related_id or related_md5.'
}
return status
related_obj = None
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
status = {
'success': False,
'message': 'Related object not found.'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate Certificate
cert = Certificate.objects(md5=md5).first()
if not cert:
cert = Certificate()
cert.filename = filename
cert.created = timestamp
cert.size = len(data)
cert.description = description
cert.md5 = md5
# generate source information and add to certificate
if isinstance(source_name, basestring) and len(source_name) > 0:
if user.check_source_write(source_name):
s = create_embedded_source(source_name,
reference=reference,
method=method,
tlp=tlp,
analyst=user.username)
else:
return {"success": False,
"message": "User does not have permission to add objects \
using source %s." % str(source_name)}
cert.add_source(s)
elif isinstance(source_name, EmbeddedSource):
cert.add_source(source_name, method=method, reference=reference,
tlp=tlp)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
cert.add_source(s, method=method, reference=reference,
tlp=tlp)
if bucket_list:
cert.add_bucket_list(bucket_list, user)
if ticket:
cert.add_ticket(ticket, user)
# add file to GridFS
if not isinstance(cert.filedata.grid_id, ObjectId):
cert.add_file_data(data)
# save cert
cert.save(username=user)
cert.reload()
# run certificate triage
if len(AnalysisResult.objects(object_id=str(cert.id))) < 1 and data:
run_triage(cert, user)
# update relationship if a related top-level object is supplied
if related_obj and cert:
if relationship_type:
relationship=RelationshipTypes.inverse(relationship=relationship_type)
if not relationship:
relationship = RelationshipTypes.RELATED_TO
cert.add_relationship(related_obj,
relationship,
analyst=user,
get_rels=False)
cert.save(username=user)
status = {
'success': True,
'message': 'Uploaded certificate',
'md5': md5,
'id': str(cert.id),
'object': cert
}
return status
def delete_cert(md5, username=None):
"""
Delete a Certificate.
:param md5: The MD5 of the Certificate to delete.
:type md5: str
:param username: The user deleting the certificate.
:type username: str
:returns: True, False
"""
cert = Certificate.objects(md5=md5).first()
if cert:
cert.delete(username=username)
return True
else:
return False
|
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
from past.utils import old_div
import numpy as np
import urllib.request, urllib.parse, urllib.error,json,csv
import xml.etree.cElementTree as ET
import urllib.request, urllib.error, urllib.parse
import time
# Our imports
import emission.core.common as ec
import emission.core.get_database as edb
import emission.analysis.modelling.tour_model.trajectory_matching as eatm
def find_near(lst,pnt,radius):
near=[]
for i in range(len(lst)):
# print(ec.calDistance(lst[i],pnt))
if ec.calDistance(lst[i],pnt)<radius:
near.append(i)
return near
def find_nearest(lst,pnt):
nearest=lst[0]
dis=99999999
for i in range(len(lst)):
# print(ec.calDistance(lst[i],pnt))
new_dis=ec.calDistance(lst[i],pnt)
if new_dis<dis:
dis=new_dis
nearest=lst[i]
print(dis)
return nearest
def cal_matching_score(lst1,lst2,radius):
len1=len(lst1)
len2=len(lst2)
max_len=max(len1,len2)
count=0
print('cal_matching start')
# print(len1)
# print(len2)
# print(max_len)
for i in range(max_len):
# print(int(i/max_len*len1))
# print(int(i/max_len*len2))
# print(lst1[int(i/max_len*len1)])
# print(lst2[int(i/max_len*len2)])
if ec.Is_place_2(lst1[int(i/max_len*len1)],lst2[int(i/max_len*len2)],radius):
count+=1
score=old_div(count,max_len)
return score
def route_matching(lst1,lst2,step,radius,len_match,min_score):
# input 2 lists of tracking points, each tracking points is geojson format
# the two lists must have at least two tracking points
if len(lst1)<2 or len(lst2)<2:
return False
start_pnt1=lst1[0]
end_pnt1=lst1[-1]
start_pnt2=lst2[0]
end_pnt2=lst2[-1]
# Case 1, lst2 is part of lst1:
lst1_extended=[]
for i in range(len(lst1)-1):
dis=ec.calDistance(lst1[i]['track_location']['coordinates'],lst1[i+1]['track_location']['coordinates'])
num_inter=int(round(old_div(dis,step)))
if num_inter==0:
lst1_extended.append(lst1[i]['track_location']['coordinates'])
else:
lon_list=np.linspace(lst1[i]['track_location']['coordinates'][0],lst1[i+1]['track_location']['coordinates'][0],num_inter,False)
lat_list=np.linspace(lst1[i]['track_location']['coordinates'][1],lst1[i+1]['track_location']['coordinates'][1],num_inter,False)
for j in range(len(lon_list)):
lst1_extended.append([lon_list[j],lat_list[j]])
lst1_extended.append(end_pnt1['track_location']['coordinates'])
lst2_extended=[]
for i in range(len(lst2)-1):
dis=ec.calDistance(lst2[i]['track_location']['coordinates'],lst2[i+1]['track_location']['coordinates'])
num_inter=int(round(old_div(dis,step)))
if num_inter==0:
lst2_extended.append(lst2[i]['track_location']['coordinates'])
else:
lon_list=np.linspace(lst2[i]['track_location']['coordinates'][0],lst2[i+1]['track_location']['coordinates'][0],num_inter,False)
lat_list=np.linspace(lst2[i]['track_location']['coordinates'][1],lst2[i+1]['track_location']['coordinates'][1],num_inter,False)
for j in range(len(lon_list)):
lst2_extended.append([lon_list[j],lat_list[j]])
lst2_extended.append(end_pnt2['track_location']['coordinates'])
# print(len(lst1_extended))
# print(len(lst2_extended))
near_start2=find_near(lst1_extended,start_pnt2['track_location']['coordinates'],radius)
near_end2=find_near(lst1_extended,end_pnt2['track_location']['coordinates'],radius)
near_start1=find_near(lst2_extended,start_pnt1['track_location']['coordinates'],radius)
near_end1=find_near(lst2_extended,end_pnt1['track_location']['coordinates'],radius)
# print(near_start2)
# print(near_end2)
# print(near_start1)
# print(near_end1)
best_score=[]
if len(near_start2)>0 and len(near_end2)>0:
print("start of case 1")
for near_s in near_start2:
for near_e in near_end2:
if old_div(min(abs(near_e-near_s)+1,len(lst2_extended)),max(abs(near_e-near_s)+1,len(lst2_extended)))>=len_match:
print("possible near_s is %s" % near_s)
print("possible near_e is %s" % near_e)
if near_e>near_s:
print("start index is %d" % near_s)
print("end index is %d" % near_e)
route1=lst1_extended[near_s:near_e+1:1]
route2=lst2_extended
print("route1 is %s" % route1)
print("route2 is %s" % route2)
else:
print("start index is %d" % near_s)
print("end index is %d" % near_e)
route1=lst1_extended[near_e:near_s+1:1][::-1]
route2=lst2_extended
print("route1 is %s" % route1)
print("route2 is %s" % route2)
best_score.append(cal_matching_score(route1,route2,radius))
if len(near_start1)>0 and len(near_end1)>0:
print("start of case 2")
for near_s in near_start1:
for near_e in near_end1:
if old_div(min(abs(near_e-near_s)+1,len(lst1_extended)),max(abs(near_e-near_s)+1,len(lst1_extended)))>=len_match:
if near_e>near_s:
print("start index is %d" % near_s)
print("end index is %d" % near_e)
route1=lst1_extended
route2=lst2_extended[near_s:near_e+1:1]
print("route1 is %s" % route1)
print("route2 is %s" % route2)
else:
route1=lst1_extended
route2=lst2_extended[near_e:near_s+1:1][::-1]
best_score.append(cal_matching_score(route1,route2,radius))
print(best_score)
if len(best_score)>0 and max(best_score)>min_score:
return True
else:
return False
def route_matching_2(lst1,lst2,step,radius,min_score):
# input 2 lists of tracking points, each tracking points is geojson format
# the two lists must have at least two tracking points
if len(lst1)<2 or len(lst2)<2:
return False
start_pnt1=lst1[0]
end_pnt1=lst1[-1]
start_pnt2=lst2[0]
end_pnt2=lst2[-1]
# Case 1, lst2 is part of lst1:
lst1_extended=[]
for i in range(len(lst1)-1):
dis=ec.calDistance(lst1[i]['track_location']['coordinates'],lst1[i+1]['track_location']['coordinates'])
num_inter=int(round(old_div(dis,step)))
if num_inter==0:
lst1_extended.append(lst1[i]['track_location']['coordinates'])
else:
lon_list=np.linspace(lst1[i]['track_location']['coordinates'][0],lst1[i+1]['track_location']['coordinates'][0],num_inter,False)
lat_list=np.linspace(lst1[i]['track_location']['coordinates'][1],lst1[i+1]['track_location']['coordinates'][1],num_inter,False)
for j in range(len(lon_list)):
lst1_extended.append([lon_list[j],lat_list[j]])
lst1_extended.append(end_pnt1['track_location']['coordinates'])
lst2_extended=[]
for i in range(len(lst2)-1):
dis=ec.calDistance(lst2[i]['track_location']['coordinates'],lst2[i+1]['track_location']['coordinates'])
num_inter=int(round(old_div(dis,step)))
if num_inter==0:
lst2_extended.append(lst2[i]['track_location']['coordinates'])
else:
lon_list=np.linspace(lst2[i]['track_location']['coordinates'][0],lst2[i+1]['track_location']['coordinates'][0],num_inter,False)
lat_list=np.linspace(lst2[i]['track_location']['coordinates'][1],lst2[i+1]['track_location']['coordinates'][1],num_inter,False)
for j in range(len(lon_list)):
lst2_extended.append([lon_list[j],lat_list[j]])
lst2_extended.append(end_pnt2['track_location']['coordinates'])
# print(len(lst1_extended))
# print(len(lst2_extended))
best_score=[]
score_2_in_1=0
for point2 in lst2:
if ec.Include_place_2(lst1_extended,point2['track_location']['coordinates'],radius):
score_2_in_1+=1
best_score.append(old_div(score_2_in_1,len(lst2)))
score_1_in_2=0
for point1 in lst1:
if ec.Include_place_2(lst2_extended,point1['track_location']['coordinates'],radius):
score_1_in_2+=1
best_score.append(old_div(score_1_in_2,len(lst1)))
print(best_score)
if max(best_score)>min_score:
return True
else:
return False
def getRoute(section_id):
route=[]
Sections=edb.get_section_db()
section=Sections.find_one({'_id':section_id})
for point in section['track_points']:
route.append(point['track_location']['coordinates'])
return route
def refineRoute(lst1,step):
if lst1 ==[]:
return lst1
# print(len(lst1))
lst1_extended=[]
for i in range(len(lst1)-1):
dis=ec.calDistance(lst1[i],lst1[i+1])
num_inter=int(round(old_div(dis,step)))
if num_inter==0:
lst1_extended.append(lst1[i])
else:
lon_list=np.linspace(lst1[i][0],lst1[i+1][0],num_inter,False)
lat_list=np.linspace(lst1[i][1],lst1[i+1][1],num_inter,False)
for j in range(len(lon_list)):
lst1_extended.append([lon_list[j],lat_list[j]])
lst1_extended.append(lst1[-1])
# print(len(lst1))
# print(len(lst1_extended))
return lst1_extended
def storeTransitStop(type,route):
Transit=edb.get_transit_db()
todo={}
stops=[]
tree = ET.ElementTree(file=urllib.request.urlopen('http://api.bart.gov/api/stn.aspx?cmd=stns&key=MW9S-E7SL-26DU-VV8V'))
root = tree.getroot()
# print(root[1][0].find('name').text)
file_name='/Users/Mogeng/Berkeley/Semester2/E-Mission/Transit_routes/'+type+'_'+route+'.csv'
with open(file_name, 'rU') as csvfile:
r = csv.reader(csvfile,delimiter=',',quotechar='|')
for row in r:
print(row[0])
for i in range(len(root[1])):
if row[0].replace(' / ','/').replace('Street','St.').replace('International',"Int'l")==root[1][i].find('name').text:
print((float(root[1][i].find('gtfs_longitude').text),float(root[1][i].find('gtfs_latitude').text)))
stops.append([float(root[1][i].find('gtfs_longitude').text),float(root[1][i].find('gtfs_latitude').text)])
break
todo['type']=type
todo['route']=route
todo['stops']=stops
Transit.insert(todo)
def storeCalTrainStop():
Transit=edb.get_transit_db()
todo={}
stops=[]
edb.get_transit_db().remove({'type':'CalTrain'})
# print(root[1][0].find('name').text)
file_name='/Users/Mogeng/Berkeley/Semester2/E-Mission/Transit_routes/CalTrain.csv'
with open(file_name, 'rU') as csvfile:
r = csv.reader(csvfile,delimiter=',',quotechar='|')
for row in r:
time.sleep(1)
print(row[0])
# print(add)
url='https://maps.googleapis.com/maps/api/geocode/json?address='+urllib.parse.quote_plus(row[0]+' caltrain station')
print(url)
geo= json.load(urllib.request.urlopen(url))
result=geo['results'][0]
print(result['geometry']['location'])
stops.append([result['geometry']['location']['lng'],result['geometry']['location']['lat']])
todo['type']='CalTrain'
todo['route']='CalTrain'
todo['stops']=stops
Transit.insert(todo)
## START ROUTE MATCHING!!! ##
def existingMatchDistance(route1,route2,step1=100000,step2=100000,method='lcs',radius1=2000):
## see how if "route1" can match with an existing route "route2"
## will be mainly used in matching with transit route
# print(lst[0],lst[-1])
# print(route)
dis=999999
if len(route1) < 2 or len(route2) < 2:
return dis
for start_route2 in range(len(route2)):
coverage_start=find_near(route1,route2[start_route2],radius1)
if coverage_start!=[]:
break
for end_route2 in range(len(route2)-1,-1,-1):
coverage_end=find_near(route1,route2[end_route2],radius1)
if coverage_end!=[]:
break
# print(start_route2,end_route2)
# print(coverage_start,coverage_end)
if abs(start_route2-end_route2)>1 and start_route2!=len(route2) and end_route2!=0:
start_route1=coverage_start[0]
end_route1=coverage_end[-1]
if abs(start_route1-end_route1)>=1:
## using DTW Iteration
if method=='dtw':
if start_route1<end_route1:
new_dis=eatm.DTW.dynamicTimeWarp(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
elif end_route1<start_route1:
new_dis=eatm.DTW.dynamicTimeWarp(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
## using DTW Recursion
if method=='DTW':
if start_route1<end_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
elif end_route1<start_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using symmetric DTW
if method=='DTWSym':
if start_route1<end_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
elif end_route1<start_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using DTW
if method=='DTWAsym':
if start_route1<end_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
elif end_route1<start_route1:
aa=eatm.DTW.Dtw(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using Frechet
if method=='Frechet':
if start_route1<end_route1:
new_dis=eatm.Frechet.Frechet(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2))
elif end_route1<start_route1:
new_dis=eatm.Frechet.Frechet(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2))
## using lcs
if method=='lcs':
if start_route1<end_route1:
new_dis=eatm.LCS.lcsScore(refineRoute(route1[start_route1:end_route1+1],step1),refineRoute(route2[start_route2:end_route2+1],step2),radius1)
elif end_route1<start_route1:
# print(route1[start_route1:end_route1-1])
# print(start_route1,end_route1)
# print(len(route1[start_route1:end_route1-1:-1]))
new_dis=eatm.LCS.lcsScore(refineRoute(route1[end_route1:start_route1+1][::-1],step1),refineRoute(route2[start_route2:end_route2+1],step2),radius1)
if new_dis<dis:
dis=new_dis
# print(dis)
return [start_route2,end_route2,dis]
def fullMatchDistance(route1,route2,step1=100000,step2=100000,method='lcs',radius1=2000):
## see how if "route1" can partially match with "route"
## will be mainly used in matching with transit route
# print(lst[0],lst[-1])
# print(route)
dis=999999
if len(route1) < 2 or len(route2) < 2:
return dis
## using DTW Iteration
if method=='dtw':
new_dis=eatm.DTW.dynamicTimeWarp(refineRoute(route1,step1),refineRoute(route2,step2),ec.calDistance)
## using DTW Recursion
if method=='DTW':
aa=eatm.DTW.Dtw(refineRoute(route1,step1),refineRoute(route2,step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using symmetric DTW
if method=='DTWSym':
aa=eatm.DTW.DtwSym(refineRoute(route1,step1),refineRoute(route2,step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using Asymmetric DTW
if method=='DTWAsym':
aa=eatm.DTW.DtwAsym(refineRoute(route1,step1),refineRoute(route2,step2),ec.calDistance)
new_dis=aa.calculate_distance()
## using Frechet
if method=='Frechet':
new_dis=eatm.Frechet.Frechet(refineRoute(route1,step1),refineRoute(route2,step2))
## using lcs
if method=='lcs':
new_dis=eatm.LCS.lcsScore(refineRoute(route1,step1),refineRoute(route2,step2),radius1)
if new_dis<dis:
dis=new_dis
# print(dis)
return dis
def matchTransitRoutes(lst,route,step1=100000,step2=100000,method='lcs',radius1=2500,threshold=0.5):
[final_start,final_end,dis]=existingMatchDistance(lst,route,step1,step2,method,radius1)
if dis<=threshold:
return 1
else:
return 0
def matchTransitStops(lst,route,radius1=2000):
if ec.Include_place_2(route,lst[0],radius1) and ec.Include_place_2(route,lst[-1],radius1):
return 1
else:
return 0
def matchTwoRoutes(route1,route2,step1=100000,step2=100000,method='lcs',radius1=2000,threshold=0.6):
[final_start,final_end,dis]=fullMatchDistance(route1,route2,step1,step2,method,radius1)
if dis<=threshold:
return 1
else:
return 0
def update_user_routeDistanceMatrix(user_id,data_feature,step1=100000,step2=100000,method='lcs',radius1=1000):
ids = list(data_feature.keys())
"""
user_query=edb.get_routeDistanceMatrix_db().find_one({'$and':[{'user':user_id},{'method':method}]})
if user_query==None:
user_disMat={}
for _id in ids:
user_disMat[_id] = {}
edb.get_routeDistanceMatrix_db().insert({'user':user_id,'method':method,'disMat':user_disMat})
else:
user_disMat=user_query['disMat']
"""
user_disMat = edb.get_routeDistanceMatrix_db(user_id, method)
a=0
# print(len(ids))
for _id in ids:
if a % 100 == 0:
print("In update_user_routeDistanceMatrix, a = %d" % a)
a+=1
for key in ids:
try:
user_disMat[_id][key]
#print("found it")
except KeyError:
#print('Updating matrix for the trip ' + _id + '. Doing calculations.')
dis=fullMatchDistance(data_feature[_id], data_feature[key],step1,step2,method,radius1)
#user_disMat[_id] = {}
if _id not in user_disMat:
user_disMat[_id] = {}
user_disMat[_id][key] = dis
#print('Update successful.')
#print(user_disMat[_id])
#edb.get_routeDistanceMatrix_db().update({'$and':[{'user':user_id},{'method':method}]},{'user':user_id,'method':method,'disMat':user_disMat})
print(type(user_disMat))
user_disMat = edb.update_routeDistanceMatrix_db(user_id, method, user_disMat)
return user_disMat
# for entry in edb.get_routeDistanceMatrix_db().find():
# print(entry)
def update_user_routeClusters(user_id,clusters,method='lcs'):
user_query=edb.get_routeCluster_db().find_one({'$and':[{'user':user_id},{'method':method}]})
if user_query==None:
edb.get_routeCluster_db().insert({'user':user_id,'method':method,'clusters':clusters})
else:
edb.get_routeCluster_db().update({'user':user_id,'method':method},{'user':user_id,'method':method,'clusters':clusters})
def get_common_routes_for_user(user_id,method='lcs'):
common_idxs = []
Sections = edb.get_section_db()
user_route_clusters = edb.get_routeCluster_db().find_one({'$and':[{'user':user_id},{'method':method}]})['clusters']
for idx in list(user_route_clusters.keys()):
# print(idx)
if len(user_route_clusters[idx]) >= 3:
section=Sections.find_one({'_id': idx})
# print(section)
if section['distance'] > 2000 and len(getRoute(idx)) > 10 and section['duration'] > 600:
common_idxs.append(idx)
return common_idxs
|
|
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
import lifelines
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test, multivariate_logrank_test
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
save_dir = os.path.join( results_path, "survival_concordance" )
check_and_mkdir(save_dir)
survival_curves_dir = os.path.join( save_dir, "sig_curves" )
check_and_mkdir(survival_curves_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
Z=Z.loc[barcodes]
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
#Overall Survival (OS) The event call is derived from "vital status" parameter. The time_to_event is in days, equals to days_to_death if patient deceased; in the case of a patient is still living, the time variable is the maximum(days_to_last_known_alive, days_to_last_followup). This pair of clinical parameters are called _EVENT and _TIME_TO_EVENT on the cancer browser.
ALL_SURVIVAL = data_store["/CLINICAL/data"][["patient.days_to_last_followup","patient.days_to_death","patient.days_to_birth"]]
tissue_barcodes = np.array( ALL_SURVIVAL.index.tolist(), dtype=str )
surv_barcodes = np.array([ x+"_"+y for x,y in tissue_barcodes])
NEW_SURVIVAL = pd.DataFrame( ALL_SURVIVAL.values, index =surv_barcodes, columns = ALL_SURVIVAL.columns )
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#clinical = data_store["/CLINICAL/data"].loc[barcodes]
Age = NEW_SURVIVAL[ "patient.days_to_birth" ].values.astype(int)
Times = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)+NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)
Events = (1-np.isnan( NEW_SURVIVAL[ "patient.days_to_death" ].astype(float)) ).astype(int)
ok_age_query = Age<-10
ok_age = pp.find(ok_age_query )
tissues = tissues[ ok_age_query ]
#pdb.set_trace()
Age=-Age[ok_age]
Times = Times[ok_age]
Events = Events[ok_age]
barcodes = barcodes[ok_age]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#ok_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values>=0
#ok_followup = pp.find( ok_followup_query )
bad_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)<0
bad_followup = pp.find( bad_followup_query )
ok_followup_query = 1-bad_followup_query
ok_followup = pp.find( ok_followup_query )
bad_death_query = NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)<0
bad_death = pp.find( bad_death_query )
#pdb.set_trace()
Age=Age[ok_followup]
Times = Times[ok_followup]
Events = Events[ok_followup]
barcodes = barcodes[ok_followup]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
Z = Z.loc[barcodes]
Z["E"] = Events
Z["T"] = Times
Z["Age"] = np.log(Age)
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
Z["Tissue"] = tissue_idx
n_tissues = len(tissue_names)
n_random = 100
random_names = ["r_%d"%(trial_idx) for trial_idx in range(n_random)]
alpha=0.02
nbr_to_plot = 5
concordance_values = {}
concordance_random = {}
concordance_z_values = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_z_random = pd.DataFrame( np.nan*np.ones((n_tissues,n_random) ), index = tissue_names, columns=random_names )
concordance_z_values_xval = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_I_values = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
concordance_I_random = pd.DataFrame( np.nan*np.ones((n_tissues,n_random) ), index = tissue_names, columns=random_names )
concordance_z_p_values = pd.DataFrame( np.ones( (n_tissues,n_z) ), \
index = tissue_names, \
columns = z_names )
# cf = CoxPHFitter()
# scores = k_fold_cross_validation(cf, Z, 'T', event_col='E', k=5)
# pdb.set_trace()
split_nbr = 2
for t_idx in range(n_tissues):
t_ids = tissue_idx == t_idx
tissue_name = tissue_names[t_idx]
if tissue_name == "gbm":
print "skipping gbm"
continue
print "working %s"%(tissue_name)
bcs = barcodes[t_ids]
Z_tissue = Z.loc[ bcs ]
events = Z_tissue["E"]
times = Z_tissue["T"]
Z_values = Z_tissue[z_names].values
n_tissue = len(bcs)
print " using z_values"
for z_idx in range(n_z):
z = Z_values[:,z_idx]
z_data = Z_tissue[ ["z_%d"%(z_idx), "E","T"] ]
I = np.argsort(z)
z_concordance = lifelines.utils.concordance_index(times[I], z, event_observed=events[I])
z_concordance = max( z_concordance, 1.0-z_concordance )
concordance_z_values["z_%d"%(z_idx)].loc[tissue_name] = z_concordance
print " using random"
for r_idx in range(n_random):
#z = Z_values[:,z_idx]
z = np.random.randn(n_tissue)
I = np.argsort(z) #np.random.permutation(n_tissue)
z_concordance = lifelines.utils.concordance_index(times[I], z, event_observed=events[I])
z_concordance = max( z_concordance, 1.0-z_concordance )
concordance_z_random["r_%d"%(r_idx)].loc[tissue_name] = z_concordance
v = concordance_z_values.loc[tissue_name].values
r = concordance_z_random.loc[tissue_name].values
concordance_z_p_values.loc[tissue_name] = (1.0 + (v[:,np.newaxis]>r).sum(1))/(1.0+len(r))
conc=concordance_z_p_values.loc[tissue_name]
sig = (concordance_z_p_values.loc[tissue_name] < alpha ).astype(int)
z_sig_names = sig[ sig==1 ].index.values
for z_name in z_sig_names:
z_idx = int( z_name.split("_")[1] )
z = Z_values[:,z_idx]
#z_data = Z_tissue[ ["z_%d"%(z_idx), "E","T"] ]
I = np.argsort(z)
cum_events = events[I].cumsum()
I_splits = [] #[[],[]] #np.array_split( I, split_nbr )
I_splits.append( pp.find( cum_events <= events.sum()/2.0 ) )
I_splits.append( pp.find( cum_events > events.sum()/2.0 ) )
#groups = np.zeros(n_tissue)
# k = 1
# for splits in I_splits[1:]:
# groups[splits] = k; k+=1
results = logrank_test(times[I_splits[0]], times[I_splits[-1]], events[ I_splits[0] ], events[ I_splits[-1] ] )
p_value = results.p_value
#results2 = logrank_test(times[I_splits[0]]/365.0, times[I_splits[-1]]/365.0, events[ I_splits[0] ], events[ I_splits[-1] ] )
#pdb.set_trace()
c = conc[ z_name ]
f = pp.figure()
ax= f.add_subplot(111)
kmf = KaplanMeierFitter()
k=0
for splits in I_splits:
kmf.fit(times[splits], event_observed=events[splits], label="q=%d/%d"%(k+1,split_nbr) )
ax=kmf.plot(ax=ax,at_risk_counts=False,show_censors=True,ci_show=False)
k+=1
pp.ylim(0,1)
pp.title( "%s %s p-value = %0.4f concordance = %0.3f "%( tissue_name, z_name, p_value, c ) )
pp.savefig( survival_curves_dir + "/%s_%s_p%0.5f_c%0.3f.png"%(tissue_name, z_name, p_value, c), format="png", dpi=300)
pp.savefig( survival_curves_dir + "/%s_%s_p%0.5f_c%0.3f.png"%(z_name, tissue_name, p_value, c), format="png", dpi=300)
#pdb.set_trace()
concordance_z_random.drop("gbm",inplace=True)
concordance_z_values.drop("gbm",inplace=True)
concordance_z_p_values.drop("gbm",inplace=True)
# concordance_z_p_values = pd.DataFrame( np.ones( concordance_z_values.values.shape), \
# index = concordance_z_values.index, \
# columns = concordance_z_values.columns )
# for tissue in concordance_z_random.index.values:
# v = concordance_z_values.loc[tissue].values
# r = concordance_z_random.loc[tissue].values
# concordance_z_p_values.loc[tissue] = (1.0 + (v[:,np.newaxis]>r).sum(1))/(1.0+len(r))
concordance_z_p_values.to_csv( save_dir + "/concordance_z_p_values.csv" )
concordance_z_random.to_csv( save_dir + "/concordance_z_random.csv" )
concordance_z_values.to_csv( save_dir + "/concordance_z_values.csv" )
#pdb.set_trace()
f = pp.figure()
ax_z = f.add_subplot(221)
ax_log_z = f.add_subplot(223)
ax_p = f.add_subplot(222)
ax_log_p = f.add_subplot(224)
bins_conc=np.linspace(0.5,1,21)
bins_p=np.linspace(0.0,1,21)
ax_z.hist( concordance_z_values.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=False)
ax_z.hist( concordance_z_random.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=False)
ax_log_z.hist( concordance_z_values.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=True)
ax_log_z.hist( concordance_z_random.values.flatten(), bins=bins_conc, normed=True, histtype="step", lw=2, log=True)
ax_p.hist( concordance_z_p_values.values.flatten(), bins=bins_p, normed=True, histtype="step", lw=2, log=False)
ax_log_p.hist( concordance_z_p_values.values.flatten(), bins=bins_p, normed=True, histtype="step", lw=2, log=True)
pp.savefig( save_dir + "/p_values.png", format="png", dpi=300)
return concordance_z_random, concordance_z_values, concordance_z_p_values
#, concordance_z_p_values_xval
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
concordance_z_random, concordance_z_values, concordance_z_p_values = main( data_location, results_location )
|
|
import logging
import os
import threading
from django.apps import apps
from django.conf import settings
from django.core.cache import cache
from django.db import models, connection
from boardinghouse import signals
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
_thread_locals = threading.local()
class Forbidden(Exception):
"""
An exception that will be raised when an attempt to activate a non-valid
schema is made.
"""
class TemplateSchemaActivation(Forbidden):
"""
An exception that will be raised when a user attempts to activate
the __template__ schema.
"""
def __init__(self, *args, **kwargs):
super(TemplateSchemaActivation, self).__init__(
'Activating template schema forbidden.', *args, **kwargs
)
def get_schema_model():
return apps.get_model(
getattr(settings, 'BOARDINGHOUSE_SCHEMA_MODEL', 'boardinghouse.Schema')
)
def _get_search_path():
cursor = connection.cursor()
cursor.execute('SELECT current_schema()')
search_path = cursor.fetchone()[0]
cursor.close()
return search_path.split(',')
def _set_search_path(search_path):
cursor = connection.cursor()
cursor.execute('SET search_path TO %s,{}'.format(_get_public_schema()), [search_path])
cursor.close()
def _get_public_schema():
return getattr(settings, 'PUBLIC_SCHEMA', 'public')
def _schema_exists(schema_name, cursor=None):
if cursor:
cursor.execute("SELECT schema_name FROM information_schema.schemata WHERE schema_name = %s", [schema_name])
return bool(cursor.fetchone())
cursor = connection.cursor()
try:
return _schema_exists(schema_name, cursor)
finally:
cursor.close()
def get_active_schema_name():
"""
Get the currently active schema.
This requires a database query to ask it what the current `search_path` is.
"""
active_schema = getattr(_thread_locals, 'schema', None)
if not active_schema:
reported_schema = _get_search_path()[0]
if _get_schema(reported_schema):
active_schema = reported_schema
else:
active_schema = None
_thread_locals.schema = active_schema
return active_schema
def get_active_schema():
"""
Get the (internal) name of the currently active schema.
"""
return _get_schema(get_active_schema_name())
def get_active_schemata():
"""
Get a (cached) list of all currently active schemata.
"""
schemata = cache.get('active-schemata')
if schemata is None:
schemata = get_schema_model().objects.active()
cache.set('active-schemata', schemata)
return schemata
def get_all_schemata():
"""
Get a (cached) list of all schemata.
"""
schemata = cache.get('all-schemata')
if schemata is None:
schemata = get_schema_model().objects.all()
cache.set('all-schemata', schemata)
return schemata
def _get_schema(schema_name):
"""
Get the matching active schema object for the given name,
if it exists.
"""
if not schema_name:
return
for schema in get_active_schemata():
if schema_name == schema.schema or schema_name == schema:
return schema
def activate_schema(schema_name):
"""
Activate the current schema: this will execute, in the database
connection, something like:
SET search_path TO "foo",public;
It sends signals before and after that the schema will be, and was
activated.
Must be passed a string: the internal name of the schema to activate.
"""
if schema_name == '__template__':
raise TemplateSchemaActivation()
signals.schema_pre_activate.send(sender=None, schema_name=schema_name)
_set_search_path(schema_name)
signals.schema_post_activate.send(sender=None, schema_name=schema_name)
_thread_locals.schema = schema_name
def activate_template_schema():
"""
Activate the template schema. You probably don't want to do this.
"""
_thread_locals.schema = None
schema_name = '__template__'
signals.schema_pre_activate.send(sender=None, schema_name=schema_name)
_set_search_path(schema_name)
signals.schema_post_activate.send(sender=None, schema_name=schema_name)
def get_template_schema():
return get_schema_model()('__template__')
def deactivate_schema(schema=None):
"""
Deactivate the provided (or current) schema.
"""
cursor = connection.cursor()
signals.schema_pre_activate.send(sender=None, schema_name=None)
cursor.execute('SET search_path TO "$user",{}'.format(_get_public_schema()))
signals.schema_post_activate.send(sender=None, schema_name=None)
_thread_locals.schema = None
cursor.close()
def create_schema(schema_name):
cursor = connection.cursor()
if _schema_exists(schema_name):
LOGGER.warn('Attempt to create an existing schema: %s' % schema_name)
return
cursor.execute("SELECT clone_schema('__template__', %s)", [schema_name])
cursor.close()
if schema_name != '__template__':
signals.schema_created.send(sender=get_schema_model(), schema=schema_name)
LOGGER.info('New schema created: %s' % schema_name)
#: These models are required to be shared by the system.
REQUIRED_SHARED_MODELS = [
'auth.user',
'auth.permission',
'auth.group',
'boardinghouse.schema',
'sites.site',
'sessions.session',
'contenttypes.contenttype',
'admin.logentry',
'migrations.migration',
]
def _is_join_model(model):
"""
We define a model as a join model if all of it's
fields are related fields (or it's primary key),
and there is more than just one field.
I am not 100% happy with this definition.
"""
return all([
(field.primary_key or field.rel)
for field in model._meta.fields
]) and len(model._meta.fields) > 1
def is_shared_model(model):
"""
Is the model (or instance of a model) one that should be in the
public/shared schema?
"""
if model._is_shared_model:
return True
app_model = '%s.%s' % (
model._meta.app_label,
model._meta.model_name
)
# These should be case insensitive!
if app_model in REQUIRED_SHARED_MODELS:
return True
if app_model in settings.SHARED_MODELS:
return True
# Sometimes, we want a join table to be private.
if app_model in settings.PRIVATE_MODELS:
return False
# if all fields are auto or fk, then we are a join model,
# and if all related objects are shared, then we must
# also be shared, unless we were explicitly marked as private
# above.
if _is_join_model(model):
return all([
is_shared_model(field.rel.get_related_field().model)
for field in model._meta.fields if field.rel
])
return False
def is_shared_table(table):
"""
Is the model from the provided database table name shared?
We may need to look and see if we can work out which models
this table joins.
"""
# Get a mapping of all table names to models.
table_map = dict([
(x._meta.db_table, x) for x in models.get_models()
if not x._meta.proxy
])
# If we have a match, see if that one is shared.
if table in table_map:
return is_shared_model(table_map[table])
# It may be a join table.
prefixes = [
(db_table, model) for db_table, model in table_map.items()
if table.startswith(db_table)
]
if len(prefixes) == 1:
db_table, model = prefixes[0]
rel_model = model._meta.get_field_by_name(
table.replace(db_table, '').lstrip('_')
)[0].rel.get_related_field().model
elif len(prefixes) == 0:
# No matching models found.
# Assume this is not a shared table...
return False
else:
return is_shared_model(model)
return is_shared_model(model) and is_shared_model(rel_model)
# Internal helper functions.
def _schema_table_exists():
table_name = get_schema_model()._meta.db_table
cursor = connection.cursor()
cursor.execute("SELECT * FROM information_schema.tables WHERE table_name = %s", [table_name])
return bool(cursor.fetchone())
def _sql_from_file(filename):
"""
A large part of this project is based around how simple it is to
clone a schema's structure into a new schema. This is encapsulated in
an SQL script: this function will install a function from an arbitrary
file.
"""
cursor = connection.cursor()
sql_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sql', '%s.sql' % filename)
function = " ".join([x.strip() for x in open(sql_file).readlines() if not x.strip().startswith('--')])
function = function.replace("%", "%%")
cursor.execute(function)
cursor.close()
def _wrap_command(command):
def inner(self, *args, **kwargs):
_sql_from_file('clone_schema')
get_template_schema().create_schema()
cursor = connection.cursor()
# In the case of create table statements, we want to make sure
# they go to the public schema, but want reads to come from
# __template__.
cursor.execute('SET search_path TO {},__template__'.format(_get_public_schema()))
cursor.close()
command(self, *args, **kwargs)
deactivate_schema()
# We don't want just active schemata...
_create_all_schemata()
return inner
def _create_all_schemata():
"""
Create all of the schemata, just in case they haven't yet been created.
"""
cursor = connection.cursor()
cursor.execute("SELECT count(*)>0 FROM information_schema.tables WHERE table_name = 'boardinghouse_schema'")
if cursor.fetchone() == (True,):
for schema in get_schema_model().objects.all():
schema.create_schema()
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin
from effect import Func, Effect
import yaml
from characteristic import attributes
from flocker.acceptance.testtools import DatasetBackend
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo_from_args,
put,
run_remotely,
)
from ._effect import sequence
from flocker.cli import configure_ssh
ZFS_REPO = {
'fedora-20': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"fedora/zfs-release$(rpm -E %dist).noarch.rpm",
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
CLUSTERHQ_REPO = {
'fedora-20': "https://s3.amazonaws.com/{archive_bucket}/"
"fedora/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
),
'centos-7': "https://s3.amazonaws.com/{archive_bucket}/"
"centos/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
),
}
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if distribution == 'centos-7':
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_disable_selinux(distribution):
"""
Disable SELinux for this session and permanently.
XXX: Remove this when we work out suitable SELinux settings.
See https://clusterhq.atlassian.net/browse/FLOC-619.
"""
if distribution in ('centos-7',):
return sequence([
run("if selinuxenabled; then setenforce 0; fi"),
run("test -e /etc/selinux/config && "
"sed --in-place='.preflocker' "
"'s/^SELINUX=.*$/SELINUX=disabled/g' "
"/etc/selinux/config"),
])
elif distribution in ('fedora-20', 'ubuntu-14.04'):
# Fedora and Ubuntu do not have SELinux enabled
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent()),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent()),
])
def task_enable_docker(distribution):
"""
Start docker and configure it to start automatically.
"""
if distribution in ('fedora-20', 'centos-7'):
return sequence([
run_from_args(["systemctl", "enable", "docker.service"]),
run_from_args(["systemctl", "start", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
# Ubuntu enables docker service during installation
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if distribution in ('centos-7', 'fedora-20'):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', 'start', 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if distribution in ('centos-7', 'fedora-20'):
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent']
])
def task_enable_flocker_agent(distribution, control_node,
dataset_backend=DatasetBackend.zfs):
"""
Configure and enable the flocker agents.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with. (This has a default for use in the documentation).
"""
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": {
"backend": dataset_backend.name,
},
},
),
)
if distribution in ('centos-7', 'fedora-20'):
return sequence([
put_config_file,
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', 'start', 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', 'start', 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
put_config_file,
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif distribution in ('fedora-20', 'centos-7'):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def task_install_flocker(
distribution=None,
package_source=PackageSource()):
"""
Install flocker on a distribution.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
"""
if package_source.branch:
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
base_url = None
if distribution == 'ubuntu-14.04':
commands = [
# Ensure add-apt-repository command is available
run_from_args([
"apt-get", "-y", "install", "software-properties-common"]),
# Add Docker repo for recent Docker versions
run_from_args([
"add-apt-repository", "-y", "ppa:james-page/docker"]),
# Add ClusterHQ repo for installation of Flocker packages.
run_from_args([
'add-apt-repository', '-y',
'deb https://s3.amazonaws.com/clusterhq-archive/ubuntu 14.04/amd64/' # noqa
])
]
if base_url:
# Add BuildBot repo for testing
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
]
if package_source.os_version:
package = 'clusterhq-flocker-node=%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
# Install Flocker node and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
else:
commands = [
run(command="yum install -y " + CLUSTERHQ_REPO[distribution])
]
if base_url:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/etc/yum.repos.d/clusterhq-build.repo'))
branch_opt = ['--enablerepo=clusterhq-build']
else:
branch_opt = []
if package_source.os_version:
package = 'clusterhq-flocker-node-%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
commands.append(run_from_args(
["yum", "install"] + branch_opt + ["-y", package]))
return sequence(commands)
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == 'fedora-20':
return sequence([
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'updates-testing'])
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == 'fedora-20':
return sequence([
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager',
'--add-repo',
'https://copr.fedoraproject.org/coprs/lsm5/docker-io/repo/fedora-20/lsm5-docker-io-fedora-20.repo', # noqa
])
])
elif distribution == "centos-7":
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common Fedora20 installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html#installing-on-fedora-20 # noqa
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
if distribution in ('centos-7'):
commands.append(task_disable_selinux(distribution))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def configure_cluster(control_node, agent_nodes,
certificates, dataset_backend):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param INode control_node: The control node.
:param INode agent_nodes: List of agent nodes.
:param Certificates certificates: Certificates to upload.
:param DatasetBackend dataset_backend: Dataset backend to configure.
"""
return sequence([
run_remotely(
username='root',
address=control_node.address,
commands=sequence([
task_install_control_certificates(
certificates.cluster.certificate,
certificates.control.certificate,
certificates.control.key),
task_enable_flocker_control(control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_enable_flocker_agent(
distribution=node.distribution,
control_node=control_node.address,
dataset_backend=dataset_backend,
)]),
),
]) for certnkey, node in zip(certificates.nodes, agent_nodes)
])
])
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
import tensorflow as tf
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@Tensors
"""
def __init__(self,
run_path_map=None,
size_guidance=None,
tensor_size_guidance=None,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
tensor_size_guidance: A dictionary mapping from `plugin_name` to
the number of items to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
tf.logging.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = (size_guidance or
event_accumulator.DEFAULT_SIZE_GUIDANCE)
self._tensor_size_guidance = tensor_size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
tf.logging.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
tf.logging.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@dandelionmane) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
tf.logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
tf.logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
tensor_size_guidance=self._tensor_size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
tf.logging.info('Starting AddRunsFromDirectory: %s', path)
for subdir in GetLogdirSubdirectories(path):
tf.logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
tf.logging.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
tf.logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
tf.logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
tf.logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
tf.logging.info('Finished with EventMultiplexer.Reload()')
return self
def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self.GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag)
def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag)
def PluginRunToTagToContent(self, plugin_name):
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}.
"""
mapping = {}
for run in self.Runs():
try:
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
plugin_name)
except KeyError:
# This run lacks content for the plugin. Try the next run.
continue
mapping[run] = tag_to_content
return mapping
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `tf.SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def GetAccumulator(self, run):
"""Returns EventAccumulator for a given run.
Args:
run: String name of run.
Returns:
An EventAccumulator object.
Raises:
KeyError: If run does not exist.
"""
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if tf.gfile.Exists(path) and not tf.gfile.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
|
|
# Copyright (c) 2008-2009 Pedro Matiello <pmatiello@gmail.com>
# Salim Fadhley <sal@stodge.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
class common( object ):
"""
Standard methods common to all graph classes.
@sort: __eq__, __getitem__, __iter__, __len__, __repr__, __str__, add_graph, add_nodes,
add_spanning_tree, complete, inverse, order, reverse
"""
def __str__(self):
"""
Return a string representing the graph when requested by str() (or print).
@rtype: string
@return: String representing the graph.
"""
str_nodes = repr( self.nodes() )
str_edges = repr( self.edges() )
return "%s %s" % ( str_nodes, str_edges )
def __repr__(self):
"""
Return a string representing the graph when requested by repr()
@rtype: string
@return: String representing the graph.
"""
return "<%s.%s %s>" % ( self.__class__.__module__, self.__class__.__name__, str(self) )
def __iter__(self):
"""
Return a iterator passing through all nodes in the graph.
@rtype: iterator
@return: Iterator passing through all nodes in the graph.
"""
for n in self.nodes():
yield n
def __len__(self):
"""
Return the order of self when requested by len().
@rtype: number
@return: Size of the graph.
"""
return self.order()
def __getitem__(self, node):
"""
Return a iterator passing through all neighbors of the given node.
@rtype: iterator
@return: Iterator passing through all neighbors of the given node.
"""
for n in self.neighbors( node ):
yield n
def order(self):
"""
Return the order of self, this is defined as the number of nodes in the graph.
@rtype: number
@return: Size of the graph.
"""
return len(self.nodes())
def add_nodes(self, nodelist):
"""
Add given nodes to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only
numbers and single-line strings as node identifiers if you intend to use write().
Objects used to identify nodes absolutely must be hashable. If you need attach a mutable
or non-hashable node, consider using the labeling feature.
@type nodelist: list
@param nodelist: List of nodes to be added to the graph.
"""
for each in nodelist:
self.add_node(each)
def add_graph(self, other):
"""
Add other graph to this graph.
@attention: Attributes and labels are not preserved.
@type other: graph
@param other: Graph
"""
self.add_nodes( n for n in other.nodes() if not n in self.nodes() )
for each_node in other.nodes():
for each_edge in other.neighbors(each_node):
if (not self.has_edge((each_node, each_edge))):
self.add_edge((each_node, each_edge))
def add_spanning_tree(self, st):
"""
Add a spanning tree to the graph.
@type st: dictionary
@param st: Spanning tree.
"""
self.add_nodes(list(st.keys()))
for each in st:
if (st[each] is not None):
self.add_edge((st[each], each))
def complete(self):
"""
Make the graph a complete graph.
@attention: This will modify the current graph.
"""
for each in self.nodes():
for other in self.nodes():
if (each != other and not self.has_edge((each, other))):
self.add_edge((each, other))
def inverse(self):
"""
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
"""
inv = self.__class__()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
if (inv.has_edge(each)):
inv.del_edge(each)
return inv
def reverse(self):
"""
Generate the reverse of a directed graph, returns an identical graph if not directed.
Attributes & weights are preserved.
@rtype: digraph
@return: The directed graph that should be reversed.
"""
assert self.DIRECTED, "Undirected graph types such as %s cannot be reversed" % self.__class__.__name__
N = self.__class__()
#- Add the nodes
N.add_nodes( n for n in self.nodes() )
#- Add the reversed edges
for (u, v) in self.edges():
wt = self.edge_weight((u, v))
label = self.edge_label((u, v))
attributes = self.edge_attributes((u, v))
N.add_edge((v, u), wt, label, attributes)
return N
def __eq__(self, other):
"""
Return whether this graph is equal to another one.
@type other: graph, digraph
@param other: Other graph or digraph
@rtype: boolean
@return: Whether this graph and the other are equal.
"""
def nodes_eq():
for each in self:
if (not other.has_node(each)): return False
for each in other:
if (not self.has_node(each)): return False
return True
def edges_eq():
for edge in self.edges():
if (not other.has_edge(edge)): return False
for edge in other.edges():
if (not self.has_edge(edge)): return False
return True
try:
return nodes_eq() and edges_eq()
except AttributeError:
return False
|
|
import collections
from supriya import CalculationRate
from supriya.synthdefs import PureUGen, UGen
class COsc(PureUGen):
"""
A chorusing wavetable oscillator.
::
>>> cosc = supriya.ugens.COsc.ar(
... beats=0.5,
... buffer_id=23,
... frequency=440,
... )
>>> cosc
COsc.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("frequency", 440.0), ("beats", 0.5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class DegreeToKey(PureUGen):
"""
A signal-to-modal-pitch converter.`
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> degree_to_key = supriya.ugens.DegreeToKey.ar(
... buffer_id=23,
... octave=12,
... source=source,
... )
>>> degree_to_key
DegreeToKey.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("source", None), ("octave", 12)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Impulse(PureUGen):
"""
A non-band-limited single-sample impulse generator unit generator.
::
>>> supriya.ugens.Impulse.ar()
Impulse.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Index(PureUGen):
"""
A clipping buffer indexer.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> index = supriya.ugens.Index.ar(
... buffer_id=23,
... source=source,
... )
>>> index
Index.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("source", None)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class LFCub(PureUGen):
"""
A sine-like oscillator unit generator.
::
>>> supriya.ugens.LFCub.ar()
LFCub.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class LFGauss(PureUGen):
"""
A non-band-limited gaussian function oscillator.
::
>>> supriya.ugens.LFGauss.ar()
LFGauss.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[
("duration", 1),
("width", 0.1),
("initial_phase", 0),
("loop", 1),
("done_action", 0),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
done_action=0,
duration=1,
initial_phase=0,
loop=1,
width=0.1,
):
import supriya.synthdefs
done_action = supriya.DoneAction.from_expr(done_action)
UGen.__init__(
self,
calculation_rate=calculation_rate,
done_action=done_action,
duration=duration,
initial_phase=initial_phase,
loop=loop,
width=width,
)
class LFPar(PureUGen):
"""
A parabolic oscillator unit generator.
::
>>> supriya.ugens.LFPar.ar()
LFPar.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class LFPulse(PureUGen):
"""
A non-band-limited pulse oscillator.
::
>>> supriya.ugens.LFPulse.ar()
LFPulse.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0), ("width", 0.5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class LFSaw(PureUGen):
"""
A non-band-limited sawtooth oscillator unit generator.
::
>>> supriya.ugens.LFSaw.ar()
LFSaw.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class LFTri(PureUGen):
"""
A non-band-limited triangle oscillator unit generator.
::
>>> supriya.ugens.LFTri.ar()
LFTri.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Osc(PureUGen):
"""
An interpolating wavetable oscillator.
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", 0), ("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class OscN(PureUGen):
"""
A non-interpolating wavetable oscillator.
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", 0), ("frequency", 440.0), ("initial_phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Select(PureUGen):
"""
A signal selector.
::
>>> sources = supriya.ugens.In.ar(bus=0, channel_count=8)
>>> selector = supriya.ugens.Phasor.kr() * 8
>>> select = supriya.ugens.Select.ar(
... sources=sources,
... selector=selector,
... )
>>> select
Select.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("selector", None), ("sources", None)]
)
_unexpanded_input_names = ("sources",)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class SinOsc(PureUGen):
"""
A sinusoid oscillator unit generator.
::
>>> supriya.ugens.SinOsc.ar()
SinOsc.ar()
::
>>> print(_)
synthdef:
name: ...
ugens:
- SinOsc.ar:
frequency: 440.0
phase: 0.0
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("phase", 0.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class SyncSaw(PureUGen):
"""
A sawtooth wave that is hard synched to a fundamental pitch.
::
>>> sync_saw = supriya.ugens.SyncSaw.ar(
... saw_frequency=440,
... sync_frequency=440,
... )
>>> sync_saw
SyncSaw.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("sync_frequency", 440), ("saw_frequency", 440)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class VOsc(PureUGen):
"""
A wavetable lookup oscillator which can be swept smoothly across wavetables.
::
>>> vosc = supriya.ugens.VOsc.ar(
... buffer_id=supriya.ugens.MouseX.kr(0, 7),
... frequency=440,
... phase=0,
... )
>>> vosc
VOsc.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("frequency", 440), ("phase", 0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class VOsc3(PureUGen):
"""
A wavetable lookup oscillator which can be swept smoothly across wavetables.
::
>>> vosc_3 = supriya.ugens.VOsc3.ar(
... buffer_id=supriya.ugens.MouseX.kr(0, 7),
... freq_1=110,
... freq_2=220,
... freq_3=440,
... )
>>> vosc_3
VOsc3.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("freq_1", 110), ("freq_2", 220), ("freq_3", 440)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class VarSaw(PureUGen):
"""
A sawtooth-triangle oscillator with variable duty.
::
>>> supriya.ugens.VarSaw.ar()
VarSaw.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("frequency", 440.0), ("initial_phase", 0.0), ("width", 0.5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Vibrato(PureUGen):
"""
Vibrato is a slow frequency modulation.
::
>>> vibrato = supriya.ugens.Vibrato.ar(
... delay=0,
... depth=0.02,
... depth_variation=0.1,
... frequency=440,
... initial_phase=0,
... onset=0,
... rate=6,
... rate_variation=0.04,
... )
>>> vibrato
Vibrato.ar()
"""
_ordered_input_names = collections.OrderedDict(
[
("frequency", 440),
("rate", 6),
("depth", 0.02),
("delay", 0),
("onset", 0),
("rate_variation", 0.04),
("depth_variation", 0.1),
("initial_phase", 0),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class WrapIndex(UGen):
"""
A wrapping buffer indexer.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> wrap_index = supriya.ugens.WrapIndex.ar(
... buffer_id=23,
... source=source,
... )
>>> wrap_index
WrapIndex.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("buffer_id", None), ("source", None)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Unit test for the AlarmItem class.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
from __future__ import unicode_literals, absolute_import
import unittest
import mock
import io
try:
from LightUpAlarm.AlarmItem import AlarmItem
except ImportError:
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
from LightUpAlarm.AlarmItem import AlarmItem
class AlarmItemTestCase(unittest.TestCase):
""" Tests for AlarmItem class. """
#
# Helper methods
#
def assert_repeat(self, alarm_test, days):
self.assertEqual(alarm_test.monday, days[0])
self.assertEqual(alarm_test.tuesday, days[1])
self.assertEqual(alarm_test.wednesday, days[2])
self.assertEqual(alarm_test.thursday, days[3])
self.assertEqual(alarm_test.friday, days[4])
self.assertEqual(alarm_test.saturday, days[5])
self.assertEqual(alarm_test.sunday, days[6])
def assert_stderr(self, test_srderr, equal=False):
""" Checks the stderr error string and resets it for next test. """
if equal is True:
self.assertEqual(test_srderr.getvalue(), '')
else:
self.assertNotEqual(test_srderr.getvalue(), '')
test_srderr.truncate(0)
test_srderr.write('')
self.assertEqual(test_srderr.getvalue(), '')
#
# Tests
#
def test_constructor(self):
""" Tests valid inputs to the constructor. """
id_ = 265
hour = 23
minute = 59
days = (False, True, True, False, False, False, False)
label = 'Alarm label'
timestamp = 12345678
# Check constructor with minimum arguments
alarm_test = AlarmItem(hour, minute)
self.assertEqual(hour, alarm_test.hour)
self.assertEqual(minute, alarm_test.minute)
for day in alarm_test.repeat:
self.assertEqual(alarm_test.repeat[day], False)
self.assertEqual(alarm_test.label, '')
self.assertIsNone(alarm_test.timestamp)
# Check constructor with minimum arguments + repeat days
alarm_test = AlarmItem(hour, minute, days=days)
self.assertEqual(days, alarm_test.repeat)
# Check constructor with minimum arguments + repeat days + enabled
alarm_test = AlarmItem(hour, minute, days=days, enabled=False)
self.assertEqual(False, alarm_test.enabled)
# Check constructor with minimum arguments + repeat days + enabled +
# label
alarm_test = AlarmItem(
hour, minute, days=days, enabled=False, label=label)
self.assertEqual(label, alarm_test.label)
# Check constructor with minimum arguments + repeat days + enabled +
# label + timestamp
alarm_test = AlarmItem(hour, minute, days=days, enabled=False,
label=label, timestamp=timestamp)
self.assertEqual(timestamp, alarm_test.timestamp)
# Check constructor with minimum arguments + repeat days + enabled +
# label + id
alarm_test = AlarmItem(hour, minute, days=days, enabled=False,
label=label, timestamp=timestamp, alarm_id=id_)
self.assertEqual(id_, alarm_test.id_)
def test_constructor_hour_min_range(self):
"""
Test constructor values for hours and minutes to produce a None object
if they are larger than 23 and 59 respectively.
"""
# The accessor functions print to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# Invalid minute
self.assertEqual(test_srderr.getvalue(), '')
alarm_test = AlarmItem(23, 60)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
alarm_test = AlarmItem(23, -50)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
# Invalid hour
alarm_test = AlarmItem(24, 59)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
alarm_test = AlarmItem(-12, 59)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
# Invalid hour and minute
alarm_test = AlarmItem(24, 60)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
alarm_test = AlarmItem(-16, -45)
self.assertIsNone(alarm_test)
self.assert_stderr(test_srderr)
def test_hour_min_loop_range(self):
"""
Test setting the hours and minutes accessors to not change values with
invalid inputs.
"""
alarm_test = AlarmItem(0, 1)
# The accessor functions print to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.hour = 12
alarm_test.minute = 34
self.assertEqual(alarm_test.hour, 12)
self.assertEqual(alarm_test.minute, 34)
self.assert_stderr(test_srderr, equal=True)
# Invalid ints
alarm_test.minute = 60
self.assert_stderr(test_srderr)
alarm_test.minute = -1
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.minute, 34)
alarm_test.hour = 24
self.assert_stderr(test_srderr)
alarm_test.hour = -2
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.hour, 12)
self.assertEqual(alarm_test.hour, 12)
self.assertEqual(alarm_test.minute, 34)
def test_hour_min_integers(self):
"""
Test setting the hours and minutes values valid integers and non-integer
values.
"""
alarm_test = AlarmItem(0, 0)
# The accessor functions print to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.hour = 12
alarm_test.minute = 34
self.assert_stderr(test_srderr, equal=True)
# Float instead of integer
alarm_test.hour = 12.34
self.assert_stderr(test_srderr)
alarm_test.minute = 34.56
self.assert_stderr(test_srderr)
# String instead of integer
alarm_test.hour = 'minutes'
self.assert_stderr(test_srderr)
alarm_test.minute = 'hours'
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.hour, 12)
self.assertEqual(alarm_test.minute, 34)
def test_repeat_list_strictness(self):
"""
Test that the repeat list of booleans is filtered and catches invalid
inputs. Including lists of not booleans, and boolean lists with and
incorrect number of items.
"""
alarm_test = AlarmItem(0, 0)
valid_days = (False, True, True, False, False, False, False)
# Setting a valid value
self.assertNotEqual(valid_days, alarm_test.repeat)
alarm_test.repeat = valid_days
self.assertEqual(valid_days, alarm_test.repeat)
# The accessor functions print to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.repeat = valid_days
self.assertEqual(alarm_test.repeat, valid_days)
self.assertEqual(test_srderr.getvalue(), '')
# Too many arguments
alarm_test.repeat = (
False, False, False, False, False, False, False, False)
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.repeat, valid_days)
# Too few arguments
alarm_test.repeat = (True, True, True, True, True, True)
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.repeat, valid_days)
# Wrong arguments
alarm_test.repeat = (True, True, True, 0, True, True, True)
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.repeat, valid_days)
def test_repeat_accessors_get(self):
"""
Sets the repeat list at the constructor and variable level, and test
that all the individual accessors for each day of the week works
correctly.
"""
days = [False, True, True, False, True, False, True]
# Test constructor input
alarm_test = AlarmItem(0, 0, days, False)
self.assertEqual(alarm_test.repeat, tuple(days))
self.assert_repeat(alarm_test, days)
# Test repeat accesor with opposite repeat list
for i in xrange(len(days)):
days[i] = not days[i]
alarm_test.repeat = days
self.assertEqual(alarm_test.repeat, tuple(days))
self.assert_repeat(alarm_test, days)
def test_repeat_accessors_set(self):
"""
Sets the repeat list and test that the individual set accessors work as
expected, including throwing errors if values are not booleans.
"""
alarm_test = AlarmItem(0, 0)
days = [False, True, True, False, True, False, True]
# Test with correct values
alarm_test.monday = days[0]
alarm_test.tuesday = days[1]
alarm_test.wednesday = days[2]
alarm_test.thursday = days[3]
alarm_test.friday = days[4]
alarm_test.saturday = days[5]
alarm_test.sunday = days[6]
self.assert_repeat(alarm_test, days)
# To test the incorrect value, the accessor setter prints to stderr
# if bad data is encountered, so we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.monday = days[0]
self.assertEqual(test_srderr.getvalue(), '')
# Monday
alarm_test.monday = 'monday'
self.assert_stderr(test_srderr)
alarm_test.monday = 1
self.assert_stderr(test_srderr)
alarm_test.monday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.monday, days[0])
# Tuesday
alarm_test.tuesday = 'tuesday'
self.assert_stderr(test_srderr)
alarm_test.tuesday = 1
self.assert_stderr(test_srderr)
alarm_test.tuesday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.tuesday, days[1])
# Wednesday
alarm_test.wednesday = 'wednesday'
self.assert_stderr(test_srderr)
alarm_test.wednesday = 1
self.assert_stderr(test_srderr)
alarm_test.wednesday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.wednesday, days[2])
# Thursday
alarm_test.thursday = 'thursday'
self.assert_stderr(test_srderr)
alarm_test.thursday = 1
self.assert_stderr(test_srderr)
alarm_test.thursday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.thursday, days[3])
# Friday
alarm_test.friday = 'friday'
self.assert_stderr(test_srderr)
alarm_test.friday = 1
self.assert_stderr(test_srderr)
alarm_test.friday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.friday, days[4])
# Saturday
alarm_test.saturday = 'saturday'
self.assert_stderr(test_srderr)
alarm_test.saturday = 1
self.assert_stderr(test_srderr)
alarm_test.saturday = 2.3
self.assert_stderr(test_srderr)
self.assertEqual(alarm_test.saturday, days[5])
# Sunday
alarm_test.sunday = 'sunday'
self.assert_stderr(test_srderr)
alarm_test.sunday = 1
self.assert_stderr(test_srderr)
alarm_test.sunday = 2.3
self.assertEqual(alarm_test.sunday, days[6])
def test_id(self):
""" Tests the id member variable accessors filters non-integers. """
alarm_test = AlarmItem(0, 0)
alarm_test.id_ = 5
self.assertEqual(5, alarm_test.id_)
# The accessor setter prints to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.id_ = 10
self.assertEqual(test_srderr.getvalue(), '')
self.assertEqual(alarm_test.id_, 10)
# Negative integer instead of positive integer
alarm_test.id_ = -2
self.assertEqual(alarm_test.id_, 10)
self.assert_stderr(test_srderr)
# String instead of integer
alarm_test.id_ = 'String'
self.assertEqual(alarm_test.id_, 10)
self.assert_stderr(test_srderr)
# Float instead of integer
alarm_test.id_ = 10.4
self.assertEqual(alarm_test.id_, 10)
self.assert_stderr(test_srderr)
def test_label(self):
""" Tests the label variable accessors and its string coversion. """
alarm_test = AlarmItem(0, 0)
label = 'Alarm test label'
# The accessor setter prints to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.label = label
self.assertEqual(label, alarm_test.label)
self.assertEqual(test_srderr.getvalue(), '')
# Try other types
alarm_test.label = 5
self.assertEqual('5', alarm_test.label)
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.label = True
self.assertEqual('True', alarm_test.label)
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.label = {'test': 5}
self.assertEqual("{u'test': 5}", alarm_test.label)
self.assertEqual(test_srderr.getvalue(), '')
def test_timestamp(self):
"""
Tests the timetstamp member variable accessors filters non-integers.
"""
alarm_test = AlarmItem(0, 0)
alarm_test.timestamp = 1427486989
self.assertEqual(alarm_test.timestamp, 1427486989)
# The accessor setter prints to stderr if bad data is encountered, so
# we need to capture stderr to test it.
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
alarm_test.timestamp = 10
self.assertEqual(test_srderr.getvalue(), '')
self.assertEqual(alarm_test.timestamp, 10)
# Negative integer instead of positive integer
alarm_test.timestamp = -2
self.assertEqual(alarm_test.timestamp, 10)
self.assert_stderr(test_srderr)
# String instead of integer
alarm_test.timestamp = 'String'
self.assertEqual(alarm_test.timestamp, 10)
self.assert_stderr(test_srderr)
# Float instead of integer
alarm_test.timestamp = 10.4
self.assertEqual(alarm_test.timestamp, 10)
self.assert_stderr(test_srderr)
def test_time_to_alarm(self):
"""
Full tests coverage for the get_time_diff function. Good resource:
http://www.timeanddate.com/date/timeduration.html
"""
one_day = 1440
test_alarm = AlarmItem(
9, 30, (True, False, False, True, False, False, False), True)
time_diff = test_alarm.minutes_to_alert(9, 30, 0)
self.assertEqual(time_diff, 0)
time_diff = test_alarm.minutes_to_alert(9, 29, 0)
self.assertEqual(time_diff, 1)
time_diff = test_alarm.minutes_to_alert(19, 55, 2)
self.assertEqual(time_diff, 815)
time_diff = test_alarm.minutes_to_alert(9, 30, 2)
self.assertEqual(time_diff, one_day)
time_diff = test_alarm.minutes_to_alert(9, 30, 1)
self.assertEqual(time_diff, (one_day * 2))
time_diff = test_alarm.minutes_to_alert(9, 31, 1)
self.assertEqual(time_diff, ((one_day * 2) - 1))
time_diff = test_alarm.minutes_to_alert(9, 29, 4)
self.assertEqual(time_diff, ((one_day * 3) + 1))
time_diff = test_alarm.minutes_to_alert(3, 15, 1)
self.assertEqual(time_diff, ((one_day * 2) + (60 * 6) + 15))
test_alarm.repeat = (True, False, False, False, False, False, False)
time_diff = test_alarm.minutes_to_alert(9, 31, 0)
self.assertEqual(time_diff, ((one_day * 7) - 1))
time_diff = test_alarm.minutes_to_alert(13, 34, 1)
self.assertEqual(time_diff, ((one_day * 5) + (60 * 19) + 56))
time_diff = test_alarm.minutes_to_alert(4, 15, 2)
self.assertEqual(time_diff, ((one_day * 5) + (60 * 5) + 15))
def test_string_alarm(self):
""" Checks the __str__ output is correct. """
test_alarm = AlarmItem(
9, 30, (True, False, False, True, False, False, True), True)
test_alarm.id_ = 10
out = 'Alarm ID: 10 | Time: 09:30 | Enabled: Yes | Repeat: ' +\
'Mon --- --- Thu --- --- Sun '
self.assertEqual(str(test_alarm), out)
def test_any_enabled_day(self):
""" Test any_day_enabled() returns False if all repeats are false. """
test_alarm = AlarmItem(
9, 30, (True, False, False, True, False, False, True), True)
self.assertTrue(test_alarm.any_day_enabled())
test_alarm.repeat = (False, False, False, False, False, False, False)
self.assertFalse(test_alarm.any_day_enabled())
def test_diff_alarm(self):
""" Tests the diff_alarm method returned Alarms. """
# Helper function to assert the alarm properties, takes the outer scope
# variables directly as we will be reusing them for all tests
def assert_diff_alarm(diff_alarm):
self.assertEqual(diff_alarm.minute, expected_minute)
self.assertEqual(diff_alarm.hour, expected_hour)
self.assert_repeat(diff_alarm, expected_days)
self.assertEqual(diff_alarm.enabled, test_enabled)
self.assertEqual(diff_alarm.label, expected_label)
self.assertNotEqual(diff_alarm.timestamp, test_timestamp)
self.assertNotEqual(diff_alarm.id_, test_id)
# First test - 15 minutes to 9 30, so only change in minutes
time_diff = -15
test_minute = 30
test_hour = 9
expected_minute = 15
expected_hour = 9
test_days = (True, False, False, True, False, False, True)
expected_days = test_days
test_enabled = True
test_id = 98
test_label = "test label"
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_timestamp = 1234
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test + 15 minutes to 9 30, so only change in minutes
time_diff = 15
test_minute = 30
test_hour = 9
expected_minute = 45
expected_hour = 9
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test + 15 minutes to 9 30, so only change in minutes
time_diff = 15
test_minute = 30
test_hour = 9
expected_minute = 45
expected_hour = 9
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test + minutes with hour rollover
time_diff = 59
test_minute = 10
test_hour = 14
expected_minute = 9
expected_hour = 15
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test - minutes with hour rollover
time_diff = -59
test_minute = 10
test_hour = 14
expected_minute = 11
expected_hour = 13
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test + minutes with day rollover
time_diff = 30
test_minute = 50
test_hour = 23
expected_minute = 20
expected_hour = 0
test_days = (True, False, False, True, True, False, True)
expected_days = (True, True, False, False, True, True, False)
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Now test - minutes with day rollover
time_diff = -30
test_minute = 10
test_hour = 0
expected_minute = 40
expected_hour = 23
test_days = (True, False, False, True, True, False, True)
expected_days = (False, False, True, True, False, True, True)
expected_label = test_label + \
(" (Alarm %s %+dmin)" % (test_id, time_diff))
test_alarm = AlarmItem(
test_hour, test_minute, days=test_days, enabled=test_enabled,
timestamp=test_timestamp, label=test_label, alarm_id=test_id)
assert_diff_alarm(test_alarm.diff_alarm(time_diff))
# Test input sanitation capturing stderr to check
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# First ensure that successful set does not write to stderr
self.assertEqual(test_srderr.getvalue(), '')
time_diff = 0
test_alarm.diff_alarm(time_diff)
self.assertEqual(test_srderr.getvalue(), '')
# Lower boundary
time_diff = -60
test_alarm.diff_alarm(time_diff)
self.assert_stderr(test_srderr)
# Upper boundary
time_diff = 60
test_alarm.diff_alarm(time_diff)
self.assert_stderr(test_srderr)
# other types instead of integer
time_diff = 0.1
test_alarm.diff_alarm(time_diff)
self.assert_stderr(test_srderr)
time_diff = "0"
test_alarm.diff_alarm(time_diff)
self.assert_stderr(test_srderr)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distritributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python `bool`. If `True` this `Distribution` is continuous
over its supported domain.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._is_continuous = is_continuous
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def is_continuous(self):
return self._is_continuous
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32),
array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
|
import os
import numpy as np
import taichi as ti
from tests import test_utils
this_dir = os.path.dirname(os.path.abspath(__file__))
model_file_path = os.path.join(this_dir, 'ell.json')
@test_utils.test(require=ti.extension.mesh)
def test_mesh_patch_idx():
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'idx': ti.i32})
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
@ti.kernel
def foo():
for v in model.verts:
v.idx = ti.mesh_patch_idx()
foo()
idx = model.verts.idx.to_numpy()
assert idx[0] == 6
assert idx.sum() == 89
def _test_mesh_for(cell_reorder=False, vert_reorder=False, extra_tests=True):
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'t': ti.i32}, reorder=vert_reorder)
mesh_builder.cells.place({'t': ti.i32}, reorder=cell_reorder)
mesh_builder.cells.link(mesh_builder.verts)
mesh_builder.verts.link(mesh_builder.cells)
mesh_builder.cells.link(mesh_builder.cells)
mesh_builder.verts.link(mesh_builder.verts)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
@ti.kernel
def cell_vert():
for c in model.cells:
for j in range(c.verts.size):
c.t += c.verts[j].id
cell_vert()
total = model.cells.t.to_numpy().sum()
model.cells.t.fill(0)
assert total == 892
@ti.kernel
def vert_cell():
for v in model.verts:
for j in range(v.cells.size):
v.t += v.cells[j].id
vert_cell()
total = model.verts.t.to_numpy().sum()
model.verts.t.fill(0)
assert total == 1104
if not extra_tests:
return
@ti.kernel
def cell_cell():
for c in model.cells:
for j in range(c.cells.size):
c.t += c.cells[j].id
cell_cell()
total = model.cells.t.to_numpy().sum()
model.cells.t.fill(0)
assert total == 690
@ti.kernel
def vert_vert():
for v in model.verts:
for j in range(v.verts.size):
v.t += v.verts[j].id
vert_vert()
total = model.verts.t.to_numpy().sum()
model.verts.t.fill(0)
assert total == 1144
@test_utils.test(require=ti.extension.mesh)
def test_mesh_for():
_test_mesh_for(False, False)
_test_mesh_for(False, True)
@test_utils.test(require=ti.extension.mesh,
optimize_mesh_reordered_mapping=False)
def test_mesh_reordered_opt():
_test_mesh_for(True, True, False)
@test_utils.test(require=ti.extension.mesh, mesh_localize_to_end_mapping=False)
def test_mesh_localize_mapping0():
_test_mesh_for(False, False, False)
_test_mesh_for(True, True, False)
@test_utils.test(require=ti.extension.mesh,
mesh_localize_from_end_mapping=True)
def test_mesh_localize_mapping1():
_test_mesh_for(False, False, False)
_test_mesh_for(True, True, False)
@test_utils.test(require=ti.extension.mesh)
def test_mesh_reorder():
vec3i = ti.types.vector(3, ti.i32)
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'s': ti.i32, 's3': vec3i}, reorder=True)
mesh_builder.cells.link(mesh_builder.verts)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
id2 = np.array([x**2 for x in range(len(model.verts))])
id123 = np.array([[x**1, x**2, x**3] for x in range(len(model.verts))])
model.verts.s.from_numpy(id2)
model.verts.s3.from_numpy(id123)
@ti.kernel
def foo():
for v in model.verts:
assert v.s == v.id**2
assert v.s3[0] == v.id**1 and v.s3[1] == v.id**2 and v.s3[
2] == v.id**3
v.s = v.id**3
v.s3 *= v.id
foo()
id3 = model.verts.s.to_numpy()
id234 = model.verts.s3.to_numpy()
for i in range(len(model.verts)):
assert model.verts.s[i] == i**3
assert id3[i] == i**3
assert model.verts.s3[i][0] == i**2
assert model.verts.s3[i][1] == i**3
assert model.verts.s3[i][2] == i**4
assert id234[i][0] == i**2
assert id234[i][1] == i**3
assert id234[i][2] == i**4
@test_utils.test(require=ti.extension.mesh)
def test_mesh_minor_relations():
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'y': ti.i32})
mesh_builder.edges.place({'x': ti.i32})
mesh_builder.cells.link(mesh_builder.edges)
mesh_builder.verts.link(mesh_builder.cells)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
model.edges.x.fill(1)
@ti.kernel
def foo():
for v in model.verts:
for i in range(v.cells.size):
c = v.cells[i]
for j in range(c.edges.size):
e = c.edges[j]
v.y += e.x
foo()
total = model.verts.y.to_numpy().sum()
assert total == 576
@test_utils.test(require=ti.extension.mesh, demote_no_access_mesh_fors=True)
def test_multiple_meshes():
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'y': ti.i32})
meta = ti.Mesh.load_meta(model_file_path)
model1 = mesh_builder.build(meta)
model2 = mesh_builder.build(meta)
model1.verts.y.from_numpy(
np.array([x**2 for x in range(len(model1.verts))]))
@ti.kernel
def foo():
for v in model1.verts:
model2.verts.y[v.id] = v.y
foo()
out = model2.verts.y.to_numpy()
for i in range(len(out)):
assert out[i] == i**2
@test_utils.test(require=ti.extension.mesh)
def test_mesh_local():
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'a': ti.i32})
mesh_builder.faces.link(mesh_builder.verts)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
ext_a = ti.field(ti.i32, shape=len(model.verts))
@ti.kernel
def foo(cache: ti.template()):
if ti.static(cache):
ti.mesh_local(ext_a, model.verts.a)
for f in model.faces:
m = f.verts[0].id + f.verts[1].id + f.verts[2].id
f.verts[0].a += m
f.verts[1].a += m
f.verts[2].a += m
ext_a[f.verts[0].id] += m
ext_a[f.verts[1].id] += m
ext_a[f.verts[2].id] += m
foo(False)
res1 = model.verts.a.to_numpy()
res2 = ext_a.to_numpy()
model.verts.a.fill(0)
ext_a.fill(0)
foo(True)
res3 = model.verts.a.to_numpy()
res4 = ext_a.to_numpy()
for i in range(len(model.verts)):
assert res1[i] == res2[i]
assert res1[i] == res3[i]
assert res1[i] == res4[i]
@test_utils.test(require=ti.extension.mesh, experimental_auto_mesh_local=True)
def test_auto_mesh_local():
mesh_builder = ti.Mesh.Tet()
mesh_builder.verts.place({'a': ti.i32, 's': ti.i32})
mesh_builder.faces.link(mesh_builder.verts)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
ext_a = ti.field(ti.i32, shape=len(model.verts))
@ti.kernel
def foo(cache: ti.template()):
for v in model.verts:
v.s = v.id
if ti.static(cache):
ti.mesh_local(ext_a, model.verts.a)
for f in model.faces:
m = f.verts[0].s + f.verts[1].s + f.verts[2].s
f.verts[0].a += m
f.verts[1].a += m
f.verts[2].a += m
for i in range(3):
ext_a[f.verts[i].id] += m
foo(False)
res1 = model.verts.a.to_numpy()
res2 = ext_a.to_numpy()
model.verts.a.fill(0)
ext_a.fill(0)
foo(True)
res3 = model.verts.a.to_numpy()
res4 = ext_a.to_numpy()
for i in range(len(model.verts)):
assert res1[i] == res2[i]
assert res1[i] == res3[i]
assert res1[i] == res4[i]
@test_utils.test(require=ti.extension.mesh)
def test_nested_mesh_for():
mesh_builder = ti.Mesh.Tet()
mesh_builder.faces.place({'a': ti.i32, 'b': ti.i32})
mesh_builder.faces.link(mesh_builder.verts)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
@ti.kernel
def foo():
for f in model.faces:
for i in range(f.verts.size):
f.a += f.verts[i].id
for v in f.verts:
f.b += v.id
a = model.faces.a.to_numpy()
b = model.faces.b.to_numpy()
assert (a == b).all() == 1
@test_utils.test(require=ti.extension.mesh)
def test_multiple_mesh_major_relations():
mesh = ti.TetMesh()
mesh.verts.place({
's': ti.i32,
's_': ti.i32,
's1': ti.i32,
'a': ti.i32,
'b': ti.i32,
'c': ti.i32
})
mesh.edges.place({'s2': ti.i32})
mesh.cells.place({'s3': ti.i32})
mesh.verts.link(mesh.verts)
mesh.verts.link(mesh.edges)
mesh.verts.link(mesh.cells)
model = mesh.build(ti.Mesh.load_meta(model_file_path))
@ti.kernel
def foo():
for u in model.verts:
u.s1 = u.id
for e in model.edges:
e.s2 = e.id
for c in model.cells:
c.s3 = c.id
ti.mesh_local(model.verts.s1, model.edges.s2, model.cells.s3)
for u in model.verts:
a, b, c = 0, 0, 0
for i in range(u.verts.size):
a += u.verts[i].s1
for i in range(u.edges.size):
b += u.edges[i].s2
for i in range(u.cells.size):
c += u.cells[i].s3
u.s = a * b * c
for u in model.verts:
for i in range(u.verts.size):
u.a += u.verts[i].s1
for u in model.verts:
for i in range(u.edges.size):
u.b += u.edges[i].s2
for u in model.verts:
for i in range(u.cells.size):
u.c += u.cells[i].s3
for u in model.verts:
u.s_ = u.a * u.b * u.c
foo()
sum1 = model.verts.s.to_numpy().sum()
sum2 = model.verts.s_.to_numpy().sum()
assert sum1 == sum2
|
|
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import ssl
try: # Test for SSL features
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
try:
from ssl import _DEFAULT_CIPHERS
except ImportError:
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = sys.version_info >= (2, 7)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1,
32: sha256,
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.