gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""Test the python_script component."""
import logging
from unittest.mock import mock_open, patch
from homeassistant.components.python_script import DOMAIN, FOLDER, execute
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.setup import async_setup_component
from tests.common import patch_yaml_files
async def test_setup(hass):
"""Test we can discover scripts."""
scripts = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
res = await async_setup_component(hass, "python_script", {})
assert res
assert hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "world_beer")
with patch(
"homeassistant.components.python_script.open",
mock_open(read_data="fake source"),
create=True,
), patch("homeassistant.components.python_script.execute") as mock_ex:
await hass.services.async_call(
"python_script", "hello", {"some": "data"}, blocking=True
)
assert len(mock_ex.mock_calls) == 1
hass, script, source, data = mock_ex.mock_calls[0][1]
assert hass is hass
assert script == "hello.py"
assert source == "fake source"
assert data == {"some": "data"}
async def test_setup_fails_on_no_dir(hass, caplog):
"""Test we fail setup when no dir found."""
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=False
):
res = await async_setup_component(hass, "python_script", {})
assert not res
assert "Folder python_scripts not found in configuration folder" in caplog.text
async def test_execute_with_data(hass, caplog):
"""Test executing a script."""
caplog.set_level(logging.WARNING)
source = """
hass.states.set('test.entity', data.get('name', 'not set'))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {"name": "paulus"})
await hass.async_block_till_done()
assert hass.states.is_state("test.entity", "paulus")
# No errors logged = good
assert caplog.text == ""
async def test_execute_warns_print(hass, caplog):
"""Test print triggers warning."""
caplog.set_level(logging.WARNING)
source = """
print("This triggers warning.")
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Don't use print() inside scripts." in caplog.text
async def test_execute_logging(hass, caplog):
"""Test logging works."""
caplog.set_level(logging.INFO)
source = """
logger.info('Logging from inside script')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Logging from inside script" in caplog.text
async def test_execute_compile_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
this is not valid Python
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Error loading script test.py" in caplog.text
async def test_execute_runtime_error(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
raise Exception('boom')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Error executing script: boom" in caplog.text
async def test_accessing_async_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
hass.async_stop()
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Not allowed to access async methods" in caplog.text
async def test_using_complex_structures(hass, caplog):
"""Test that dicts and lists work."""
caplog.set_level(logging.INFO)
source = """
mydict = {"a": 1, "b": 2}
mylist = [1, 2, 3, 4]
logger.info('Logging from inside script: %s %s' % (mydict["a"], mylist[2]))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert "Logging from inside script: 1 3" in caplog.text
async def test_accessing_forbidden_methods(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
for source, name in {
"hass.stop()": "HomeAssistant.stop",
"dt_util.set_default_time_zone()": "module.set_default_time_zone",
"datetime.non_existing": "module.non_existing",
"time.tzset()": "TimeWrapper.tzset",
}.items():
caplog.records.clear()
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert f"Not allowed to access {name}" in caplog.text
async def test_iterating(hass):
"""Test compile error logs error."""
source = """
for i in [1, 2]:
hass.states.set('hello.{}'.format(i), 'world')
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.1", "world")
assert hass.states.is_state("hello.2", "world")
async def test_using_enumerate(hass):
"""Test that enumerate is accepted and executed."""
source = """
for index, value in enumerate(["earth", "mars"]):
hass.states.set('hello.{}'.format(index), value)
"""
hass.async_add_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.0", "earth")
assert hass.states.is_state("hello.1", "mars")
async def test_unpacking_sequence(hass, caplog):
"""Test compile error logs error."""
caplog.set_level(logging.ERROR)
source = """
a,b = (1,2)
ab_list = [(a,b) for a,b in [(1, 2), (3, 4)]]
hass.states.set('hello.a', a)
hass.states.set('hello.b', b)
hass.states.set('hello.ab_list', '{}'.format(ab_list))
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "1")
assert hass.states.is_state("hello.b", "2")
assert hass.states.is_state("hello.ab_list", "[(1, 2), (3, 4)]")
# No errors logged = good
assert caplog.text == ""
async def test_execute_sorted(hass, caplog):
"""Test sorted() function."""
caplog.set_level(logging.ERROR)
source = """
a = sorted([3,1,2])
assert(a == [1,2,3])
hass.states.set('hello.a', a[0])
hass.states.set('hello.b', a[1])
hass.states.set('hello.c', a[2])
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "1")
assert hass.states.is_state("hello.b", "2")
assert hass.states.is_state("hello.c", "3")
# No errors logged = good
assert caplog.text == ""
async def test_exposed_modules(hass, caplog):
"""Test datetime and time modules exposed."""
caplog.set_level(logging.ERROR)
source = """
hass.states.set('module.time', time.strftime('%Y', time.gmtime(521276400)))
hass.states.set('module.time_strptime',
time.strftime('%H:%M', time.strptime('12:34', '%H:%M')))
hass.states.set('module.datetime',
datetime.timedelta(minutes=1).total_seconds())
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("module.time", "1986")
assert hass.states.is_state("module.time_strptime", "12:34")
assert hass.states.is_state("module.datetime", "60.0")
# No errors logged = good
assert caplog.text == ""
async def test_execute_functions(hass, caplog):
"""Test functions defined in script can call one another."""
caplog.set_level(logging.ERROR)
source = """
def a():
hass.states.set('hello.a', 'one')
def b():
a()
hass.states.set('hello.b', 'two')
b()
"""
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert hass.states.is_state("hello.a", "one")
assert hass.states.is_state("hello.b", "two")
# No errors logged = good
assert caplog.text == ""
async def test_reload(hass):
"""Test we can re-discover scripts."""
scripts = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
res = await async_setup_component(hass, "python_script", {})
assert res
assert hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "world_beer")
assert hass.services.has_service("python_script", "reload")
scripts = [
"/some/config/dir/python_scripts/hello2.py",
"/some/config/dir/python_scripts/world_beer.py",
]
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch("homeassistant.components.python_script.glob.iglob", return_value=scripts):
await hass.services.async_call("python_script", "reload", {}, blocking=True)
assert not hass.services.has_service("python_script", "hello")
assert hass.services.has_service("python_script", "hello2")
assert hass.services.has_service("python_script", "world_beer")
assert hass.services.has_service("python_script", "reload")
async def test_service_descriptions(hass):
"""Test that service descriptions are loaded and reloaded correctly."""
# Test 1: no user-provided services.yaml file
scripts1 = [
"/some/config/dir/python_scripts/hello.py",
"/some/config/dir/python_scripts/world_beer.py",
]
service_descriptions1 = (
"hello:\n"
" name: ABC\n"
" description: Description of hello.py.\n"
" fields:\n"
" fake_param:\n"
" description: Parameter used by hello.py.\n"
" example: 'This is a test of python_script.hello'"
)
services_yaml1 = {
"{}/{}/services.yaml".format(
hass.config.config_dir, FOLDER
): service_descriptions1
}
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch(
"homeassistant.components.python_script.glob.iglob", return_value=scripts1
), patch(
"homeassistant.components.python_script.os.path.exists", return_value=True
), patch_yaml_files(
services_yaml1
):
await async_setup_component(hass, DOMAIN, {})
descriptions = await async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert descriptions[DOMAIN]["hello"]["name"] == "ABC"
assert descriptions[DOMAIN]["hello"]["description"] == "Description of hello.py."
assert (
descriptions[DOMAIN]["hello"]["fields"]["fake_param"]["description"]
== "Parameter used by hello.py."
)
assert (
descriptions[DOMAIN]["hello"]["fields"]["fake_param"]["example"]
== "This is a test of python_script.hello"
)
# Verify default name = file name
assert descriptions[DOMAIN]["world_beer"]["name"] == "world_beer"
assert descriptions[DOMAIN]["world_beer"]["description"] == ""
assert bool(descriptions[DOMAIN]["world_beer"]["fields"]) is False
# Test 2: user-provided services.yaml file
scripts2 = [
"/some/config/dir/python_scripts/hello2.py",
"/some/config/dir/python_scripts/world_beer.py",
]
service_descriptions2 = (
"hello2:\n"
" description: Description of hello2.py.\n"
" fields:\n"
" fake_param:\n"
" description: Parameter used by hello2.py.\n"
" example: 'This is a test of python_script.hello2'"
)
services_yaml2 = {
"{}/{}/services.yaml".format(
hass.config.config_dir, FOLDER
): service_descriptions2
}
with patch(
"homeassistant.components.python_script.os.path.isdir", return_value=True
), patch(
"homeassistant.components.python_script.glob.iglob", return_value=scripts2
), patch(
"homeassistant.components.python_script.os.path.exists", return_value=True
), patch_yaml_files(
services_yaml2
):
await hass.services.async_call(DOMAIN, "reload", {}, blocking=True)
descriptions = await async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert descriptions[DOMAIN]["hello2"]["description"] == "Description of hello2.py."
assert (
descriptions[DOMAIN]["hello2"]["fields"]["fake_param"]["description"]
== "Parameter used by hello2.py."
)
assert (
descriptions[DOMAIN]["hello2"]["fields"]["fake_param"]["example"]
== "This is a test of python_script.hello2"
)
async def test_sleep_warns_one(hass, caplog):
"""Test time.sleep warns once."""
caplog.set_level(logging.WARNING)
source = """
time.sleep(2)
time.sleep(5)
"""
with patch("homeassistant.components.python_script.time.sleep"):
hass.async_add_executor_job(execute, hass, "test.py", source, {})
await hass.async_block_till_done()
assert caplog.text.count("time.sleep") == 1
| |
# -*- coding: utf-8 -*-
from random import randint
from datetime import date
from lettuce import *
from rapidsms.contrib.locations.models import *
from django.template.defaultfilters import slugify
from survey.features.page_objects.households import NewHouseholdPage, HouseholdsListPage, HouseholdDetailsPage, EditHouseholdsPage
from survey.models import EnumerationArea
from survey.models.households import HouseholdMember, HouseholdHead, Household
from survey.models.investigator import Investigator
def random_text(text):
return text + str(randint(1, 999))
@step(u'And I visit new household page')
def and_i_visit_new_household_page(step):
world.page = NewHouseholdPage(world.browser)
world.page.visit()
@step(u'And I fill household data')
def and_i_fill_household_data(step):
values = {
'surname': random_text('house'),
'first_name': random_text('ayoyo'),
'date_of_birth': '1980-02-01',
'uid': '2'}
world.page.fill_valid_values(values, world.ea)
@step(u'And I see all households fields are present')
def and_i_see_all_households_fields_are_present(step):
world.page.valid_page()
@step(u'And I have an investigator in that location')
def and_i_have_an_investigator_in_that_location(step):
world.investigator = Investigator.objects.create(
name="Investigator name", ea=world.ea)
@step(u'Then I should see that the household is created')
def then_i_should_see_that_the_household_is_created(step):
world.household_uid = world.page.get_household_values()['uid']
world.page.validate_household_created()
@step(u'And I click No to has children')
def and_i_click_no_to_has_children(step):
world.page.has_children('False')
@step(u'Then I should see children number fields disabled')
def then_i_should_see_children_number_fields_disabled(step):
world.page.are_children_fields_disabled()
@step(u'And No below 5 is also checked')
def and_no_below_5_is_also_checked(step):
world.page.is_no_below_5_checked()
@step(u'And checking below 5 to yes does not work')
def and_checking_below_5_to_yes_does_not_work(step):
world.page.cannot_say_yes_to_below_5()
@step(u'And Now If I click to Yes to has children')
def and_now_if_i_click_to_yes_to_has_children(step):
world.page.has_children('True')
@step(u'Then all children number fields are enabled back')
def then_all_children_number_fields_are_enabled_back(step):
world.page.are_children_fields_disabled(is_disabled=False)
@step(u'And I click No to has below 5')
def and_i_click_no_to_has_below_5(step):
world.page.has_children_below_5('False')
@step(u'Then I should see below 5 number fields disabled')
def then_i_should_see_below_5_number_fields_disabled(step):
world.page.are_children_below_5_fields_disabled(is_disabled=True)
@step(u'And Now If I click Yes to below 5')
def and_now_if_i_click_yes_to_below_5(step):
world.page.has_children_below_5('True')
@step(u'Then below 5 number fields are enabled back')
def then_below_5_number_fields_are_enabled_back(step):
world.page.are_children_below_5_fields_disabled(is_disabled=False)
@step(u'And I click No to has women')
def and_i_click_no_to_has_women(step):
world.page.has_women('False')
@step(u'Then I should see has women number fields disabled')
def then_i_should_see_has_women_number_fields_disabled(step):
world.page.are_women_fields_disabled()
@step(u'And Now If I click Yes to has women')
def and_now_if_i_click_yes_to_has_women(step):
world.page.has_women('True')
@step(u'Then has women number fields are enabled back')
def then_has_women_number_fields_are_enabled_back(step):
world.page.are_women_fields_disabled(is_disabled=False)
@step(u'And I fill in number_of_females lower than sum of 15_19 and 20_49')
def and_i_fill_in_number_of_females_lower_than_sum_of_15_19_and_20_49(step):
world.page.fill_in_number_of_females_lower_than_sum_of_15_19_and_20_49()
@step(u'Then I should see an error on number_of_females')
def then_i_should_see_an_error_on_number_of_females(step):
world.page.see_an_error_on_number_of_females()
@step(u'And Now If I choose Other as occupation')
def and_now_if_i_choose_other_as_occupation(step):
world.page.choose_occupation('Other: ')
@step(u'Then I have to specify one')
def then_i_have_to_specify_one(step):
world.page.is_specify_visible(True)
@step(u'And If I choose a different occupation')
def and_if_i_choose_a_different_occupation(step):
world.page.choose_occupation('Business person')
@step(u'Then Specify disappears')
def then_specify_disappears(step):
world.page.is_specify_visible(False)
@step(u'Given I have an investigator')
def given_i_have_an_investigator(step):
country = LocationType.objects.create(
name="Country", slug=slugify("country"))
uganda = Location.objects.create(name="Uganda", type=country)
world.ea = EnumerationArea.objects.create(name="EA")
world.ea.locations.add(uganda)
world.investigator = Investigator.objects.create(
name="Investigator ",
mobile_number='987654321',
age=20,
level_of_education="Nursery",
language="Luganda",
ea=world.ea)
@step(u'Given I have 100 households')
def given_i_have_100_households(step):
for i in xrange(100):
random_number = str(randint(1, 99999))
try:
HouseholdHead.objects.create(
surname="head" + random_number,
date_of_birth='1980-06-01',
male=False,
household=Household.objects.create(
investigator=world.investigator,
location=world.investigator.location,
uid=i,
ea=world.investigator.ea))
except Exception:
pass
@step(u'And I visit households listing page')
def and_i_visit_households_listing_page(step):
world.page = HouseholdsListPage(world.browser)
world.page.visit()
@step(u'And I should see the households list paginated')
def and_i_should_see_the_households_list_paginated(step):
world.page.validate_fields()
world.page.validate_pagination()
@step(u'Given I have no households')
def given_i_have_no_households(step):
Household.objects.all().delete()
@step(u'And I should see no household message')
def and_i_should_see_no_household_message(step):
world.page.no_registered_huseholds()
@step(u'And I select list households')
def and_i_select_list_households(step):
world.page.click_link_by_text("Households")
world.page = HouseholdsListPage(world.browser)
@step(u'When I click add household button')
def when_i_click_add_household_button(step):
world.page = HouseholdsListPage(world.browser)
world.page.visit()
world.page.click_by_css("#add-household")
@step(u'Then I should see add household page')
def then_i_should_see_add_household_page(step):
world.page = NewHouseholdPage(world.browser)
world.page.validate_url()
@step(u'And then I click on that household ID')
def and_when_i_click_on_that_household_id(step):
world.page.click_link_by_text(world.household.uid)
@step(u'And I should see that household details, its head and members')
def and_i_should_see_that_household_details_its_head_and_members(step):
world.page.validate_household_details()
world.page.validate_household_member_details()
@step(u'And I have a member for that household')
def and_i_have_a_member_for_that_household(step):
world.household = Household.objects.get(uid=world.household_uid)
fields_data = dict(surname='xyz', male=True, date_of_birth=date(
1980, 0o5, 0o1), household=world.household)
HouseholdMember.objects.create(**fields_data)
@step(u'Then I should see household member title and add household member link')
def then_i_should_see_household_member_title_and_add_household_member_link(
step):
world.page = HouseholdDetailsPage(world.browser, world.household)
world.page.validate_household_member_title_and_add_household_member_link()
@step(u'And I should see actions edit and delete member')
def and_i_should_see_actions_edit_and_delete_member(step):
world.page.validate_actions_edit_and_delete_member()
@step(u'And I have two other investigators')
def and_i_have_two_other_investigators(step):
world.investigator_1 = Investigator.objects.create(
name="Investigator name", ea=world.ea, mobile_number="123456789")
world.investigator_2 = Investigator.objects.create(
name="Investigator name", ea=world.ea, mobile_number="123456782")
@step(u'And I click on that household ID')
def and_i_click_on_that_household_id(step):
world.page.click_link_by_text(world.household.uid)
@step(u'Then I should be on the household details page')
def then_i_should_be_on_the_household_details_page(step):
world.page = HouseholdDetailsPage(world.browser, world.household)
world.page.validate_url()
@step(u'When I click edit household')
def when_i_click_edit_household(step):
world.browser.find_link_by_text(' Edit Household').first.click()
@step(u'Then I should see edit household form')
def then_i_should_see_edit_household_form(step):
world.page = EditHouseholdsPage(world.browser, world.household)
world.related_location = world.household.get_related_location()
for key in world.related_location.keys()[:-1]:
world.page.is_text_present(world.related_location[key])
@step(u'When I assign a new investigator')
def when_i_assign_a_new_investigator(step):
world.page.fill_in_with_js(
'$("#household-investigator")', world.investigator_1.id)
@step(u'Then I should see the investigator was saved successfully')
def then_i_should_see_the_investigator_was_saved_successfully(step):
world.page.see_success_message('Household', 'edited')
| |
"""The test for the threshold sensor platform."""
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, STATE_UNKNOWN, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
async def test_sensor_upper(hass):
"""Test if source is above threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "above"
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "upper"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 15)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
async def test_sensor_lower(hass):
"""Test if source is below threshold."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "15",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 16)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "lower"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 14)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_hysteresis(hass):
"""Test if source is above threshold using hysteresis."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "15",
"hysteresis": "2.5",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 20)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 2.5
assert state.attributes.get("type") == "upper"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 17)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 18)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_in_range_no_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "in_range"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 9)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 21)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.state == "off"
async def test_sensor_in_range_with_hysteresis(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"hysteresis": "2",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "in_range"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == float(
config["binary_sensor"]["hysteresis"]
)
assert state.attributes.get("type") == "range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 8)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 7)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 12)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "below"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 13)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 22)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 23)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 18)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "above"
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 17)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "in_range"
assert state.state == "on"
async def test_sensor_in_range_unknown_state(hass):
"""Test if source is within the range."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "10",
"upper": "20",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("entity_id") == "sensor.test_monitored"
assert state.attributes.get("sensor_value") == 16
assert state.attributes.get("position") == "in_range"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.attributes.get("hysteresis") == 0.0
assert state.attributes.get("type") == "range"
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", STATE_UNKNOWN)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("position") == "unknown"
assert state.state == "off"
async def test_sensor_lower_zero_threshold(hass):
"""Test if a lower threshold of zero is set."""
config = {
"binary_sensor": {
"platform": "threshold",
"lower": "0",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 16)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("type") == "lower"
assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"])
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", -3)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
async def test_sensor_upper_zero_threshold(hass):
"""Test if an upper threshold of zero is set."""
config = {
"binary_sensor": {
"platform": "threshold",
"upper": "0",
"entity_id": "sensor.test_monitored",
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", -10)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.attributes.get("type") == "upper"
assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"])
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 2)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.threshold")
assert state.state == "on"
| |
#!/usr/bin/env python
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_fifo_64'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_fifo_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[64:])
input_axis_tkeep = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[64:])
output_axis_tkeep = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_fifo_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame.user = 1
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
assert rx_frame.user[-1]
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| |
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from lxml import etree
import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
from os_vif.objects import fields as osv_fields
from oslo_concurrency import processutils
from oslo_config import cfg
import six
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.virt import fakelibosinfo
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import host
from nova.virt.libvirt import vif
CONF = cfg.CONF
MIN_LIBVIRT_VHOSTUSER_MQ = vif.MIN_LIBVIRT_VHOSTUSER_MQ
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(
cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99, mtu=9000)
vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99, mtu=1000)
network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_agilio_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_agilio_ovs_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid='aaa-bbb-ccc',
devname='tap-xxx-yyy-zzz',
profile={'pci_slot': '0000:0a:00.1'})
vif_agilio_ovs_forwarder = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER,
profile={'pci_slot': '0000:0a:00.1'},
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid='aaa-bbb-ccc', mtu=1500)
vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'ovs_hybrid_plug': True,
'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={
'port_filter': True,
'ovs_hybrid_plug': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid='aaa')
vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hostdev_physical = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL,
ovs_interfaceid=None,
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_macvtap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
network_vrouter = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
interface='eth0')
vif_vrouter = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
devname='tap-xxx-yyy-zzz')
vif_ib_hostdev = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_IB_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_tap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
type=network_model.VIF_TYPE_TAP,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
vif_vhostuser = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/vif-xxx-yyy-zzz'
})
vif_vhostuser_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid='aaa-bbb-ccc', mtu=1500)
vif_vhostuser_no_path = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client'})
vif_macvtap_vlan = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_VLAN: 1,
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0.1',
network_model.VIF_DETAILS_MACVTAP_MODE: 'vepa'})
vif_macvtap_flat = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_MODE: 'bridge'})
vif_macvtap_exception = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP)
instance = objects.Instance(id=1,
uuid='f0000000-0000-0000-0000-000000000001',
project_id=723)
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setup_os_vif_objects(self):
self.os_vif_network = osv_objects.network.Network(
id="b82c1929-051e-481d-8110-4669916c7915",
label="Demo Net",
subnets=osv_objects.subnet.SubnetList(
objects=[]))
self.os_vif_bridge = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br100",
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_ovs_prof = osv_objects.vif.VIFPortProfileOpenVSwitch(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood")
self.os_vif_repr_prof = osv_objects.vif.VIFPortProfileOVSRepresentor(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood",
representor_name='nicdc065497-3c',
representor_address='0000:0a:00.1')
self.os_vif_agilio_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_agilio_forwarder = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_agilio_direct = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_ovs_hybrid = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_vhostuser = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="openvswitch",
vif_name="vhudc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_hostdevice_ethernet = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_hostdevice_generic = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.GENERIC,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_inst_info = osv_objects.instance_info.InstanceInfo(
uuid="d5b1090c-9e00-4fa4-9504-4b1494857970",
name="instance-000004da",
project_id="2f37d7f6-e51a-4a1f-8b6e-b0917ffc8390")
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture(stub_os_vif=False))
self.flags(firewall_driver=None)
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stub_out('nova.utils.execute', fake_execute)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want,
prefix=None):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
if prefix is None:
self.assertEqual(br_name, br_want)
else:
self.assertTrue(br_name.startswith(prefix))
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None, size=0, prefix=None):
ret = node.findall("filterref")
self.assertEqual(len(ret), size)
self._assertTypeEquals(node, type, attr, source, br_want,
prefix)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
self._assertPciEqual(node, vif, type="pci")
def _assertPciEqual(self, node, vif, type=None):
address = node.find("source").find("address")
if type:
addr_type = address.get("type")
self.assertEqual(type, addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _assertXmlEqual(self, expectedXmlstr, actualXmlstr):
self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None,
has_min_libvirt_version=True):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = self._get_conf()
hostimpl = host.Host("qemu:///system")
with mock.patch.object(hostimpl, 'has_min_version',
return_value=has_min_libvirt_version):
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type,
hostimpl)
conf.add_device(nic)
return conf.to_xml()
def _test_virtio_multiqueue(self, vcpus, want_queues):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=vcpus,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta, flavor)
node = self._get_node(xml)
driver = node.find("driver").get("name")
self.assertEqual(driver, 'vhost')
queues = node.find("driver").get("queues")
self.assertEqual(queues, want_queues)
def test_virtio_multiqueue(self):
self._test_virtio_multiqueue(4, '4')
@mock.patch('os.uname', return_value=('Linux', '', '2.6.32-21-generic'))
def test_virtio_multiqueue_in_kernel_2(self, mock_uname):
self._test_virtio_multiqueue(10, '1')
@mock.patch('os.uname', return_value=('Linux', '', '3.19.0-47-generic'))
def test_virtio_multiqueue_in_kernel_3(self, mock_uname):
self._test_virtio_multiqueue(10, '8')
@mock.patch('os.uname', return_value=('Linux', '', '4.2.0-35-generic'))
def test_virtio_multiqueue_in_kernel_4(self, mock_uname):
self._test_virtio_multiqueue(10, '10')
@mock.patch.object(host.Host, "has_min_version")
def test_vhostuser_os_vif_multiqueue(self, has_min_version):
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=4,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
flavor, 'kvm', 'normal')
self.assertEqual(4, conf.vhost_queues)
self.assertEqual('vhost', conf.driver_name)
has_min_version.return_value = True
d._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser,
conf, hostimpl)
self.assertEqual(4, conf.vhost_queues)
self.assertEqual('vhost', conf.driver_name)
has_min_version.assert_called_once_with(MIN_LIBVIRT_VHOSTUSER_MQ)
has_min_version.return_value = False
d._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser,
conf, hostimpl)
self.assertIsNone(conf.vhost_queues)
self.assertIsNone(conf.driver_name)
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_parallels(self):
self.flags(use_virtio_for_bridges=True,
virt_type='parallels',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_parallels_custom(self):
for virt in ('kvm', 'qemu', 'parallels'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
if virt == 'parallels':
supported = (network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000)
elif virt == 'qemu':
supported = (network_model.VIF_MODEL_LAN9118,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
else:
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
for model in supported:
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': model}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_with_osinfo(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', 'normal')
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
'virtio', None, None)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_sriov_multi_queue_not_set(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', 'direct')
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
'virtio', None, None)
self.assertIsNone(conf.vhost_queues)
self.assertIsNone(conf.driver_name)
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
if libvirt_version is not None:
d.libvirt_version = libvirt_version
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertIsNotNone(bandwidth)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_model_qemu_no_firewall(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_ovs,
)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_ovs,
self.vif_ivs,
self.vif_8021qbg,
self.vif_iovisor
)
def test_model_xen(self):
self.flags(use_virtio_for_bridges=True,
virt_type='xen',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want, 1)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, prefix=dev_prefix)
script = node.find("script")
self.assertIsNone(script)
def test_unplug_ivs_ethernet(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
delete.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_ivs)
@mock.patch.object(utils, 'execute')
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
def _test_hw_veb_op(self, op, vlan, mock_get_vf_num, mock_get_ifname,
mock_execute):
mock_get_ifname.side_effect = ['eth1', 'eth13']
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
calls = {
'get_ifname':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'execute': [mock.call('ip', 'link', 'set', 'eth1',
'vf', 1, 'mac',
self.vif_hw_veb_macvtap['address'],
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code),
mock.call('ip', 'link', 'set',
'eth13', 'address',
self.vif_hw_veb_macvtap['address'],
port_state,
run_as_root=True,
check_exit_code=exit_code)]
}
op(self.instance, self.vif_hw_veb_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_execute.assert_has_calls(calls['execute'])
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(
d.plug,
self.vif_hw_veb_macvtap['details'][network_model.VIF_DETAILS_VLAN])
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug, 0)
def test_plug_ivs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy', None)],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('tee', ('/proc/sys/net/ipv6/conf'
'/qbrvif-xxx-yyy/disable_ipv6'),
process_input='1', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001')]
}
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ivs_vif_port'),
mock.patch.object(os.path, 'exists', return_value=True)
) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port,
path_exists):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_ivs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
def test_unplug_ivs_hybrid(self):
calls = {
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
}
with test.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ivs_vif_port')
) as (execute, delete_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_ivs)
execute.assert_has_calls(calls['execute'])
delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_ivs)
def test_unplug_iovisor(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug(self.instance, self.vif_iovisor)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_iovisor(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
d.plug(self.instance, self.vif_iovisor)
execute.assert_has_calls([
mock.call('ifc_ctl', 'gateway', 'add_port',
'tap-xxx-yyy-zzz', run_as_root=True),
mock.call('ifc_ctl', 'gateway', 'ifup',
'tap-xxx-yyy-zzz',
'access_vm', self.vif_iovisor['id'],
self.vif_iovisor['address'],
'pgtag2=%s' % self.vif_iovisor['network']['id'],
'pgtag1=%s' % self.instance.project_id,
run_as_root=True)])
def test_unplug_vrouter_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
d.unplug(self.instance, self.vif_vrouter)
execute.assert_called_once_with(
'vrouter-port-control',
'--oper=delete --uuid=vif-xxx-yyy-zzz',
run_as_root=True)
def test_plug_vrouter_with_details(self):
d = vif.LibvirtGenericVIFDriver()
instance = mock.Mock()
instance.name = 'instance-name'
instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2'
instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5'
instance.display_name = 'instance1'
instance.image_meta = objects.ImageMeta.from_dict({'properties': {}})
with mock.patch.object(utils, 'execute') as execute:
d.plug(instance, self.vif_vrouter)
execute.assert_has_calls([
mock.call('ip', 'tuntap', 'add', 'tap-xxx-yyy-zzz', 'mode',
'tap', run_as_root=True, check_exit_code=[0, 2, 254]),
mock.call('ip', 'link', 'set', 'tap-xxx-yyy-zzz', 'up',
run_as_root=True, check_exit_code=[0, 2, 254]),
mock.call('vrouter-port-control',
'--oper=add --uuid=vif-xxx-yyy-zzz '
'--instance_uuid=46a4308b-e75a-4f90-a34a-650c86ca18b2 '
'--vn_uuid=network-id-xxx-yyy-zzz '
'--vm_project_uuid=b168ea26fa0c49c1a84e1566d9565fa5 '
'--ip_address=0.0.0.0 '
'--ipv6_address=None '
'--vm_name=instance1 '
'--mac=ca:fe:de:ad:be:ef '
'--tap_name=tap-xxx-yyy-zzz '
'--port_type=NovaVMPort '
'--tx_vlan_id=-1 '
'--rx_vlan_id=-1', run_as_root=True)])
@mock.patch('nova.network.linux_net.create_tap_dev')
def test_plug_vrouter_with_details_multiqueue(self, mock_create_tap_dev):
d = vif.LibvirtGenericVIFDriver()
instance = mock.Mock()
instance.name = 'instance-name'
instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2'
instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5'
instance.display_name = 'instance1'
instance.image_meta = objects.ImageMeta.from_dict({
'properties': {'hw_vif_multiqueue_enabled': True}})
instance.flavor.vcpus = 2
with mock.patch.object(utils, 'execute') as execute:
d.plug(instance, self.vif_vrouter)
mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz',
multiqueue=True)
execute.assert_called_once_with(
'vrouter-port-control',
'--oper=add --uuid=vif-xxx-yyy-zzz '
'--instance_uuid=46a4308b-e75a-4f90-a34a-650c86ca18b2 '
'--vn_uuid=network-id-xxx-yyy-zzz '
'--vm_project_uuid=b168ea26fa0c49c1a84e1566d9565fa5 '
'--ip_address=0.0.0.0 '
'--ipv6_address=None '
'--vm_name=instance1 '
'--mac=ca:fe:de:ad:be:ef '
'--tap_name=tap-xxx-yyy-zzz '
'--port_type=NovaVMPort '
'--tx_vlan_id=-1 '
'--rx_vlan_id=-1', run_as_root=True)
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
self._check_ivs_ethernet_driver(d,
self.vif_ivs,
"tap")
def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
vif, vif['devname'])
def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, "br0")
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_generic_ovs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.vif_ovs,
want_iface_id)
def test_generic_ivs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ivs['ovs_interfaceid']
self._check_ivs_virtualport_driver(d,
self.vif_ivs,
want_iface_id)
def test_ivs_plug_with_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs, br_want, 1)
def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs_hybrid, br_want, 0)
def test_ivs_plug_with_port_hybrid_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_ivs_filter_cap['devname']
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs_filter_cap, br_want, 0)
def test_hybrid_plug_without_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ovs_hybrid, br_want, 0)
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, br_want, 1)
def test_generic_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ovs,
br_want)
def test_ivs_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ivs,
br_want)
def test_ib_hostdev_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ib_hostdev)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_ib_hostdev)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_tap_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_tap['devname']
xml = self._get_instance_xml(d, self.vif_tap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_tap, br_want)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_tap(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_tap)
def test_unplug_tap(self):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_tap)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
conf = vconfig.LibvirtConfigGuestInterface()
conf.parse_dom(node)
self.assertEqual(conf.vlan, self.vif_hw_veb["details"]["vlan"])
def test_hostdev_physical_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hostdev_physical)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_hw_veb_driver_macvtap(self, ver_mock, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(int(vlan), vlan_want)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
@mock.patch.object(host.Host, "has_min_version", return_value=False)
def test_hw_veb_driver_macvtap_pre_vlan_support(self, ver_mock,
mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(
d, self.vif_hw_veb_macvtap,
has_min_libvirt_version=ver_mock.return_value)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan")
self.assertIsNone(vlan)
def test_driver_macvtap_vlan(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_vlan)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0.1")
self._assertTypeEquals(node, "direct", "source",
"mode", "vepa")
self._assertMacEquals(node, self.vif_macvtap_vlan)
def test_driver_macvtap_flat(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_flat)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0")
self._assertTypeEquals(node, "direct", "source",
"mode", "bridge")
self._assertMacEquals(node, self.vif_macvtap_flat)
def test_driver_macvtap_exception(self):
d = vif.LibvirtGenericVIFDriver()
e = self.assertRaises(exception.VifDetailsMissingMacvtapParameters,
self._get_instance_xml,
d,
self.vif_macvtap_exception)
self.assertIn('macvtap_source', six.text_type(e))
self.assertIn('macvtap_mode', six.text_type(e))
self.assertIn('physical_interface', six.text_type(e))
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_vlan(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_vlan)
ensure_vlan_mock.assert_called_once_with(1, 'eth0', interface='eth0.1')
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_flat(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_flat)
self.assertFalse(ensure_vlan_mock.called)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_ivs['devname']
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
def test_vhostuser_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/vif-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_vhostuser_no_queues(self):
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_vhostuser, image_meta,
has_min_libvirt_version=False)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertMacEquals(node, self.vif_vhostuser)
driver = node.find("driver")
self.assertIsNone(driver, None)
def test_vhostuser_driver_no_path(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.VifDetailsMissingVhostuserSockPath,
self._get_instance_xml,
d,
self.vif_vhostuser_no_path)
def test_vhostuser_driver_ovs(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_vhostuser_ovs)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser_ovs)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_agilio_ovs_direct(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_agilio_ovs_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_agilio_ovs_direct)
self._assertMacEquals(node, self.vif_agilio_ovs_direct)
def test_agilio_ovs_forwarder(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_agilio_ovs_forwarder)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_agilio_ovs_forwarder)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "plug")
def _test_osvif_plug(self, fail, mock_plug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_plug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.plug,
self.instance, self.vif_bridge)
else:
d.plug(self.instance, self.vif_bridge)
mock_plug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_plug_normal(self):
self._test_osvif_plug(False)
def test_osvif_plug_fail(self):
self._test_osvif_plug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "unplug")
def _test_osvif_unplug(self, fail, mock_unplug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_unplug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.unplug,
self.instance, self.vif_bridge)
else:
d.unplug(self.instance, self.vif_bridge)
mock_unplug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_unplug_normal(self):
self._test_osvif_unplug(False)
def test_osvif_unplug_fail(self):
self._test_osvif_unplug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge(self, mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge_nofw(self, mock_convert_vif,
mock_convert_inst):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<target dev="nicdc065497-3c"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_fallthrough(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_agilio_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_forwarder(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_forwarder
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_agilio_ovs_forwarder,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="vhostuser">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source mode="client"
path="/var/run/openvswitch/vhudc065497-3c" type="unix"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_direct(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_direct
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_agilio_ovs_direct,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs(self, mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs_hybrid(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs_hybrid
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_hostdevice_ethernet(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_hostdevice_ethernet
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_hostdevice_generic(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_hostdevice_generic
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.InternalError,
d.get_config, self.instance, self.vif_bridge,
image_meta, flavor, CONF.libvirt.virt_type,
hostimpl)
| |
from __future__ import absolute_import, unicode_literals
import mock
from oauthlib.oauth2.rfc6749.tokens import *
from ...unittest import TestCase
class TokenTest(TestCase):
# MAC without body/payload or extension
mac_plain = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/resource/1?b=1&a=2',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:dj83hs9s',
'hash_algorithm': 'hmac-sha-1'
}
auth_plain = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:dj83hs9s",'
' mac="SLDJd4mg43cjQfElUs3Qub4L6xE="'
}
# MAC with body/payload, no extension
mac_body = {
'token': 'jd93dh9dh39D',
'uri': 'http://example.com/request',
'key': '8yfrufh348h',
'http_method': 'POST',
'nonce': '273156:di3hvdf8',
'hash_algorithm': 'hmac-sha-1',
'body': 'hello=world%21'
}
auth_body = {
'Authorization': 'MAC id="jd93dh9dh39D", nonce="273156:di3hvdf8",'
' bodyhash="k9kbtCIy0CkI3/FEfpS/oIDjk6k=", mac="W7bdMZbv9UWOTadASIQHagZyirA="'
}
# MAC with body/payload and extension
mac_both = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2&a3=2+q',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:7d8f3e4a',
'hash_algorithm': 'hmac-sha-1',
'body': 'Hello World!',
'ext': 'a,b,c'
}
auth_both = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:7d8f3e4a",'
' bodyhash="Lve95gjOVATpfV8EL5X4nxwjKHE=", ext="a,b,c",'
' mac="Z3C2DojEopRDIC88/imW8Ez853g="'
}
# Bearer
token = 'vF9dft4qmT'
uri = 'http://server.example.com/resource'
bearer_headers = {
'Authorization': 'Bearer vF9dft4qmT'
}
bearer_body = 'access_token=vF9dft4qmT'
bearer_uri = 'http://server.example.com/resource?access_token=vF9dft4qmT'
def test_prepare_mac_header(self):
"""Verify mac signatures correctness
TODO: verify hmac-sha-256
"""
self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)
self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)
self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)
def test_prepare_bearer_request(self):
"""Verify proper addition of bearer tokens to requests.
They may be represented as query components in body or URI or
in a Bearer authorization header.
"""
self.assertEqual(prepare_bearer_headers(self.token), self.bearer_headers)
self.assertEqual(prepare_bearer_body(self.token), self.bearer_body)
self.assertEqual(prepare_bearer_uri(self.token, uri=self.uri), self.bearer_uri)
class JWTTokenTestCase(TestCase):
def test_create_token_callable_expires_in(self):
"""
Test retrieval of the expires in value by calling the callable expires_in property
"""
expires_in_mock = mock.MagicMock()
request_mock = mock.MagicMock()
token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
token.create_token(request=request_mock)
expires_in_mock.assert_called_once_with(request_mock)
def test_create_token_non_callable_expires_in(self):
"""
When a non callable expires in is set this should just be set to the request
"""
expires_in_mock = mock.NonCallableMagicMock()
request_mock = mock.MagicMock()
token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
token.create_token(request=request_mock)
self.assertFalse(expires_in_mock.called)
self.assertEqual(request_mock.expires_in, expires_in_mock)
def test_create_token_calls_get_id_token(self):
"""
When create_token is called the call should be forwarded to the get_id_token on the token validator
"""
request_mock = mock.MagicMock()
with mock.patch('oauthlib.oauth2.rfc6749.request_validator.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator = RequestValidatorMock()
token = JWTToken(expires_in=mock.MagicMock(), request_validator=request_validator)
token.create_token(request=request_mock)
request_validator.get_jwt_bearer_token.assert_called_once_with(None, None, request_mock)
def test_validate_request_token_from_headers(self):
"""
Bearer token get retrieved from headers.
"""
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
mock.patch('oauthlib.oauth2.rfc6749.request_validator.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator_mock = RequestValidatorMock()
token = JWTToken(request_validator=request_validator_mock)
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.scopes = mock.MagicMock()
request.headers = {
'Authorization': 'Bearer some-token-from-header'
}
token.validate_request(request=request)
request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-header',
request.scopes,
request)
def test_validate_token_from_request(self):
"""
Token get retrieved from request object.
"""
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
mock.patch('oauthlib.oauth2.rfc6749.request_validator.RequestValidator',
autospec=True) as RequestValidatorMock:
request_validator_mock = RequestValidatorMock()
token = JWTToken(request_validator=request_validator_mock)
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.scopes = mock.MagicMock()
request.access_token = 'some-token-from-request-object'
request.headers = {}
token.validate_request(request=request)
request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-request-object',
request.scopes,
request)
def test_estimate_type(self):
"""
Estimate type results for a jwt token
"""
def test_token(token, expected_result):
with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock:
jwt_token = JWTToken()
request = RequestMock('/uri')
# Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
# with autospec=True
request.headers = {
'Authorization': 'Bearer {}'.format(token)
}
result = jwt_token.estimate_type(request=request)
self.assertEqual(result, expected_result)
test_items = (
('eyfoo.foo.foo', 10),
('eyfoo.foo.foo.foo.foo', 10),
('eyfoobar', 0)
)
for token, expected_result in test_items:
test_token(token, expected_result)
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
import copy
import itertools
import random
import shutil
import sys
import time
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.i18n import _LE, _LW
glance_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
glance_core_properties = [
cfg.ListOpt('glance_core_properties',
default=['checksum', 'container_format',
'disk_format', 'image_name', 'image_id',
'min_disk', 'min_ram', 'name', 'size'],
help='Default core properties of image')
]
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.register_opts(glance_core_properties)
CONF.import_opt('glance_api_version', 'cinder.common.config')
LOG = logging.getLogger(__name__)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, netloc, use_ssl)
:raises ValueError
"""
url = urlparse.urlparse(image_href)
netloc = url.netloc
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, netloc, use_ssl)
def _create_glance_client(context, netloc, use_ssl, version=None):
"""Instantiate a new glanceclient.Client object."""
if version is None:
version = CONF.glance_api_version
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_compression'] = CONF.glance_api_ssl_compression
params['cacert'] = CONF.glance_ca_certificates_file
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
params['token'] = context.auth_token
if CONF.glance_request_timeout is not None:
params['timeout'] = CONF.glance_request_timeout
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Return Iterable over shuffled api servers.
Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urlparse.urlparse(api_server)
netloc = url.netloc
use_ssl = (url.scheme == 'https')
api_servers.append((netloc, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, netloc=None, use_ssl=False,
version=None):
if netloc is not None:
self.client = self._create_static_client(context,
netloc,
use_ssl, version)
else:
self.client = None
self.api_servers = None
self.version = version
if CONF.glance_num_retries < 0:
LOG.warning(_LW(
"glance_num_retries shouldn't be a negative value. "
"The number of retries will be set to 0 until this is"
"corrected in the cinder.conf."))
CONF.set_override('glance_num_retries', 0)
def _create_static_client(self, context, netloc, use_ssl, version):
"""Create a client that we'll use for every call."""
self.netloc = netloc
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.netloc,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.netloc, self.use_ssl = self.api_servers.next()
return _create_glance_client(context,
self.netloc,
self.use_ssl, version)
def call(self, context, method, *args, **kwargs):
"""Call a glance client method.
If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
version = self.version
if 'version' in kwargs:
version = kwargs['version']
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _LE("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
raise exception.GlanceConnectionFailed(reason=e)
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_from_glance(image)
return base_image_meta
def get_location(self, context, image_id):
"""Returns a tuple of the direct url and locations representing the
backend storage location, or (None, None) if these attributes are not
shown by Glance.
"""
if CONF.glance_api_version == 1:
# image location not available in v1
return (None, None)
try:
# direct_url is returned by v2 api
client = GlanceClientWrapper(version=2)
image_meta = client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
# some glance stores like nfs only meta data
# is stored and returned as locations.
# so composite of two needs to be returned.
return (getattr(image_meta, 'direct_url', None),
getattr(image_meta, 'locations', None))
def download(self, context, image_id, data=None):
"""Calls out to Glance for data and writes data."""
if data and 'file' in CONF.allowed_direct_url_schemes:
direct_url, locations = self.get_location(context, image_id)
urls = [direct_url] + [loc.get('url') for loc in locations or []]
for url in urls:
if url is None:
continue
parsed_url = urlparse.urlparse(url)
if parsed_url.scheme == "file":
# a system call to cp could have significant performance
# advantages, however we do not have the path to files at
# this point in the abstraction.
with open(parsed_url.path, "r") as f:
shutil.copyfileobj(f, data)
return
try:
image_chunks = self._client.call(context, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not data:
return image_chunks
else:
for chunk in image_chunks:
data.write(chunk)
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = self._translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
recv_service_image_meta = self._client.call(context, 'create',
**sent_service_image_meta)
return self._translate_from_glance(recv_service_image_meta)
def update(self, context, image_id,
image_meta, data=None, purge_props=True):
"""Modify the given image with the new data."""
image_meta = self._translate_to_glance(image_meta)
# NOTE(dosaboy): see comment in bug 1210467
if CONF.glance_api_version == 1:
image_meta['purge_props'] = purge_props
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
# NOTE(dosaboy): the v2 api separates update from upload
if data and CONF.glance_api_version > 1:
image_meta = self._client.call(context, 'upload', image_id,
image_meta['data'])
else:
image_meta = self._client.call(context, 'update', image_id,
**image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return self._translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
"""
try:
self._client.call(context, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
return True
@staticmethod
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
@staticmethod
def _translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
@staticmethod
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
if image.is_public or context.is_admin:
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, basestring):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, basestring):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
else:
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
# NOTE(jbernard): Update image properties for API version 2. For UEC
# images stored in glance, the necessary boot information is stored in the
# properties dict in version 1 so there is nothing more to do. However, in
# version 2 these are standalone fields in the GET response. This bit of
# code moves them back into the properties dict as the caller expects, thus
# producing a volume with correct metadata for booting.
for attr in ('kernel_id', 'ramdisk_id'):
value = getattr(image, attr, None)
if value:
output['properties'][attr] = value
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
raise new_exc, None, exc_trace
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
raise new_exc, None, exc_trace
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(exc_value)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
netloc=glance_netloc,
use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
| |
# -*- coding: utf-8 -*-
"""User models."""
from flask import current_app, url_for
from flask_babel import gettext
from flask_login import UserMixin
from fulfil_client.model import BooleanType, ModelType, One2ManyType, StringType
from itsdangerous import TimestampSigner, URLSafeSerializer
from shop.extensions import fulfil
from shop.fulfilio import Model
from shop.globals import current_channel
from shop.utils import render_email
class Address(Model):
"""
An address of a user
"""
__model_name__ = 'party.address'
_eager_fields = set(['full_address'])
party = ModelType('party.party')
name = StringType(required=True)
street = StringType()
streetbis = StringType()
zip = StringType()
city = StringType()
country = ModelType("country.country")
subdivision = ModelType("country.subdivision")
phone = StringType()
validation_status = StringType()
@property
def full_address(self):
return self._values.get('full_address', '').replace('\r\n', ', ')
def serialize(self):
values = self._values.copy()
values['country'] = self.country and self.country.code
values['subdivision'] = self.subdivision and self.subdivision.code
return values
class ContactMechanism(Model):
__model_name__ = 'party.contact_mechanism'
party = ModelType('party.party')
type = StringType(required=True)
value = StringType(required=True)
class Party(Model):
"""
A contact (party) in Fulfil.
Needed since every user is associated with a contact
"""
__model_name__ = 'party.party'
name = StringType(required=True)
contact_mechanisms = One2ManyType("party.contact_mechanism")
addresses = One2ManyType("party.address")
payment_profiles = One2ManyType("party.payment_profile")
def get_mechanism(self, name):
for mechanism in self.contact_mechanisms:
if mechanism.type == name:
return mechanism.value
return ''
class User(UserMixin, Model):
"""
A user of the app.
"""
__model_name__ = 'nereid.user'
email = StringType(required=True)
name = StringType(required=True)
password = StringType()
party = ModelType("party.party")
active = BooleanType()
@property
def is_active(self):
"For Flask login"
return self.active
@property
def phone(self):
return self.party.get_mechanism('phone')
@property
def first_name(self):
if self.name:
return self.name.split(' ')[0]
@property
def last_name(self):
if self.name:
return self.name.split(' ')[-1]
@classmethod
def find_user(cls, email):
"""
Find the user from the email
"""
return cls.query.filter_by_domain(
[('email', 'ilike', email)]
).show_active_only(False).first()
def set_password(self, password):
"""Set password."""
self.password = password
self.save()
def check_password(self, value):
"""Check password."""
return self.rpc.match_password(self.id, value)
@classmethod
def authenticate(cls, email, password):
"""
Convenience method to implement
find_user and check_password
:param email: Email of the user
:param password: Password
:return: user instance if password matches
else None
"""
user = cls.find_user(email)
if user and user.check_password(password):
return user
return None
def save(self):
if not self.party:
party = Party(name=self.name)
party.save()
self.party = party.id
ContactMechanism(
party=party.id,
value=self.email,
type='email',
).save()
super(User, self).save()
@classmethod
def user_exists(cls, email):
"""Check if the user exists"""
return cls.query.filter_by_domain(
[('email', 'ilike', email)]
).show_active_only(False).exists()
@staticmethod
def _signer():
return TimestampSigner(current_app.secret_key)
@staticmethod
def _serializer():
return URLSafeSerializer(current_app.secret_key)
def _get_sign(self, salt):
"""
Returns a timestampsigned, url_serialized sign with a salt
'verification'.
"""
return self._signer().sign(self._serializer().dumps(self.id, salt=salt))
def get_email_verification_link(self, **options):
"""
Returns an email verification link for the user
"""
return url_for(
'public.verify_email',
sign=self._get_sign('verification'),
user_id=self.id,
**options
)
def get_activation_link(self, **options):
"""
Returns an activation link for the user
"""
return url_for(
'public.activate',
sign=self._get_sign('activation'),
user_id=self.id,
**options
)
def get_reset_password_link(self, **options):
"""
Returns a password reset link for the user
"""
return url_for(
'public.new_password',
sign=self._get_sign('reset-password'),
user_id=self.id,
**options
)
def initiate_reset_password(self):
"""
Initiate the password reset for the user
"""
EmailQueue = fulfil.model('email.queue')
email_message = render_email(
current_channel.support_email, # From
self.email, # To
gettext('Your %(channel)s password', channel=current_channel.name), # Subj
'emails/reset-password.text',
'emails/reset-password.html',
user=self,
)
EmailQueue.create([{
'from_addr': current_channel.support_email,
'to_addrs': self.email,
'msg': email_message.as_string(),
}])
def get_addresses(self):
"""
Get all addresses of the user
"""
return Address.query.filter_by_domain(
[('party', '=', self.party.id)]
).all()
def get_magic_login_link(self, **options):
"""
Returns a direct login link for user
"""
return url_for(
'public.magic_login',
sign=self._get_sign('magic-login'),
user_id=self.id,
**options
)
| |
'''
Created on Sep 18, 2013
@author: timmahrt
Given two lists of tuples of the form [(value, time), (value, time)], morph
can iteratively transform the values in one list to the values in the other
while maintaining the times in the first list.
Both time scales are placed on a relative scale. This assumes that the times
may be different and the number of samples may be different but the 'events'
occur at the same relative location (half way through, at the end, etc.).
Both dynamic time warping and morph, align two data lists in time. However,
dynamic time warping does this by analyzing the event structure and aligning
events in the two signals as best it can
(i.e. it changes when events happen in relative time while morph preserves
when events happen in relative time).
'''
class RelativizeSequenceException(Exception):
def __init__(self, dist):
super(RelativizeSequenceException, self).__init__()
self.dist = dist
def __str__(self):
return "You need at least two unique values to make " + \
"a sequence relative. Input: %s" % repr(self.dist)
def makeSequenceRelative(absVSequence):
'''
Puts every value in a list on a continuum between 0 and 1
Also returns the min and max values (to reverse the process)
'''
if len(absVSequence) < 2 or len(set(absVSequence)) == 1:
raise RelativizeSequenceException(absVSequence)
minV = min(absVSequence)
maxV = max(absVSequence)
relativeSeq = [(value - minV) / (maxV - minV) for value in absVSequence]
return relativeSeq, minV, maxV
def makeSequenceAbsolute(relVSequence, minV, maxV):
'''
Makes every value in a sequence absolute
'''
return [(value * (maxV - minV)) + minV for value in relVSequence]
def _makeTimingRelative(absoluteDataList):
'''
Given normal pitch tier data, puts the times on a scale from 0 to 1
Input is a list of tuples of the form
([(time1, pitch1), (time2, pitch2),...]
Also returns the start and end time so that the process can be reversed
'''
timingSeq = [row[0] for row in absoluteDataList]
valueSeq = [list(row[1:]) for row in absoluteDataList]
relTimingSeq, startTime, endTime = makeSequenceRelative(timingSeq)
relDataList = [tuple([time, ] + row) for time, row
in zip(relTimingSeq, valueSeq)]
return relDataList, startTime, endTime
def _makeTimingAbsolute(relativeDataList, startTime, endTime):
'''
Maps values from 0 to 1 to the provided start and end time
Input is a list of tuples of the form
([(time1, pitch1), (time2, pitch2),...]
'''
timingSeq = [row[0] for row in relativeDataList]
valueSeq = [list(row[1:]) for row in relativeDataList]
absTimingSeq = makeSequenceAbsolute(timingSeq, startTime, endTime)
absDataList = [tuple([time, ] + row) for time, row
in zip(absTimingSeq, valueSeq)]
return absDataList
def _getSmallestDifference(inputList, targetVal):
'''
Returns the value in inputList that is closest to targetVal
Iteratively splits the dataset in two, so it should be pretty fast
'''
targetList = inputList[:]
retVal = None
while True:
# If we're down to one value, stop iterating
if len(targetList) == 1:
retVal = targetList[0]
break
halfPoint = int(len(targetList) / 2.0) - 1
a = targetList[halfPoint]
b = targetList[halfPoint + 1]
leftDiff = abs(targetVal - a)
rightDiff = abs(targetVal - b)
# If the distance is 0, stop iterating, the targetVal is present
# in the inputList
if leftDiff == 0 or rightDiff == 0:
retVal = targetVal
break
# Look at left half or right half
if leftDiff < rightDiff:
targetList = targetList[:halfPoint + 1]
else:
targetList = targetList[halfPoint + 1:]
return retVal
def _getNearestMappingIndexList(fromValList, toValList):
'''
Finds the indicies for data points that are closest to each other.
The inputs should be in relative time, scaled from 0 to 1
e.g. if you have [0, .1, .5., .9] and [0, .1, .2, 1]
will output [0, 1, 1, 2]
'''
indexList = []
for fromTimestamp in fromValList:
smallestDiff = _getSmallestDifference(toValList, fromTimestamp)
i = toValList.index(smallestDiff)
indexList.append(i)
return indexList
def morphDataLists(fromList, toList, stepList):
'''
Iteratively morph fromList into toList using the values 0 to 1 in stepList
stepList: a value of 0 means no change and a value of 1 means a complete
change to the other value
'''
# If there are more than 1 pitch value, then we align the data in
# relative time.
# Each data point comes with a timestamp. The earliest timestamp is 0
# and the latest timestamp is 1. Using this method, for each relative
# timestamp in the source list, we find the closest relative timestamp
# in the target list. Just because two pitch values have the same index
# in the source and target lists does not mean that they correspond to
# the same speech event.
fromListRel, fromStartTime, fromEndTime = _makeTimingRelative(fromList)
toListRel = _makeTimingRelative(toList)[0]
# If fromList has more points, we'll have flat areas
# If toList has more points, we'll might miss peaks or valleys
fromTimeList = [dataTuple[0] for dataTuple in fromListRel]
toTimeList = [dataTuple[0] for dataTuple in toListRel]
indexList = _getNearestMappingIndexList(fromTimeList, toTimeList)
alignedToPitchRel = [toListRel[i] for i in indexList]
for stepAmount in stepList:
newPitchList = []
# Perform the interpolation
for fromTuple, toTuple in zip(fromListRel, alignedToPitchRel):
fromTime, fromValue = fromTuple
toTime, toValue = toTuple
# i + 1 b/c i_0 = 0 = no change
newValue = fromValue + (stepAmount * (toValue - fromValue))
newTime = fromTime + (stepAmount * (toTime - fromTime))
newPitchList.append((newTime, newValue))
newPitchList = _makeTimingAbsolute(newPitchList, fromStartTime,
fromEndTime)
yield stepAmount, newPitchList
def morphChunkedDataLists(fromDataList, toDataList, stepList):
'''
Morph one set of data into another, in a stepwise fashion
A convenience function. Given a set of paired data lists,
this will morph each one individually.
Returns a single list with all data combined together.
'''
assert(len(fromDataList) == len(toDataList))
# Morph the fromDataList into the toDataList
outputList = []
for x, y in zip(fromDataList, toDataList):
# We cannot morph a region if there is no data or only
# a single data point for either side
if (len(x) < 2) or (len(y) < 2):
continue
tmpList = [outputPitchList for _, outputPitchList
in morphDataLists(x, y, stepList)]
outputList.append(tmpList)
# Transpose list
finalOutputList = outputList.pop(0)
for subList in outputList:
for i, subsubList in enumerate(subList):
finalOutputList[i].extend(subsubList)
return finalOutputList
def morphAveragePitch(fromDataList, toDataList):
'''
Adjusts the values in fromPitchList to have the same average as toPitchList
Because other manipulations can alter the average pitch, morphing the pitch
is the last pitch manipulation that should be done
After the morphing, the code removes any values below zero, thus the
final average might not match the target average.
'''
timeList, fromPitchList = zip(*fromDataList)
toPitchList = [pitchVal for _, pitchVal in toDataList]
# Zero pitch values aren't meaningful, so filter them out if they are
# in the dataset
fromListNoZeroes = [val for val in fromPitchList if val > 0]
fromAverage = sum(fromListNoZeroes) / float(len(fromListNoZeroes))
toListNoZeroes = [val for val in toPitchList if val > 0]
toAverage = sum(toListNoZeroes) / float(len(toListNoZeroes))
newPitchList = [val - fromAverage + toAverage for val in fromPitchList]
# finalAverage = sum(newPitchList) / float(len(newPitchList))
# Removing zeroes and negative pitch values
retDataList = [(time, pitchVal) for time, pitchVal
in zip(timeList, newPitchList)
if pitchVal > 0]
return retDataList
def morphRange(fromDataList, toDataList):
'''
Changes the scale of values in one distribution to that of another
ie The maximum value in fromDataList will be set to the maximum value in
toDataList. The 75% largest value in fromDataList will be set to the
75% largest value in toDataList, etc.
Small sample sizes will yield results that are not very meaningful
'''
# Isolate and sort pitch values
fromPitchList = [dataTuple[1] for dataTuple in fromDataList]
toPitchList = [dataTuple[1] for dataTuple in toDataList]
fromPitchListSorted = sorted(fromPitchList)
toPitchListSorted = sorted(toPitchList)
# Bin pitch values between 0 and 1
fromListRel = makeSequenceRelative(fromPitchListSorted)[0]
toListRel = makeSequenceRelative(toPitchListSorted)[0]
# Find each values closest equivalent in the other list
indexList = _getNearestMappingIndexList(fromListRel, toListRel)
# Map the source pitch to the target pitch value
# Pitch value -> get sorted position -> get corresponding position in
# target list -> get corresponding pitch value = the new pitch value
retList = []
for time, pitch in fromDataList:
fromI = fromPitchListSorted.index(pitch)
toI = indexList[fromI]
newPitch = toPitchListSorted[toI]
retList.append((time, newPitch))
return retList
| |
#
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oz.GuestFactory
import oz.TDL
import os
import guestfs
import libxml2
import traceback
import json
import configparser
import logging
import subprocess
from zope.interface import implementer
from xml.etree.ElementTree import fromstring
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist, create_cloud_info
try:
from .VSphereHelper import VSphereHelper
except:
# psphere may be end of life
# we only need it for pushing (aka target images) - this plugin is widely used
# for creating VMDK target images so let's not fail just because psphere is not
# present and/or has issues
# TODO: Either move to a supported vSphere API binding or drop push support entirely
logging.warning("VSphereHelper failed to load - pushing to vSphere will not work")
from imgfac.CloudDelegate import CloudDelegate
from imgfac.FactoryUtils import check_qcow_size
rhel5_module_script='''echo "alias scsi_hostadapter2 mptbase" >> /etc/modprobe.conf
echo "alias scsi_hostadapter3 mptspi" >> /etc/modprobe.conf
KERNEL=`grubby --default-kernel`
KERNELVERSION=`grubby --default-kernel | cut -f 2- -d "-"`
NEWINITRD="`grubby --info=$KERNEL | grep initrd | cut -f 2 -d "="`-vsphere"
mkinitrd $NEWINITRD $KERNELVERSION
grubby --add-kernel=$KERNEL --copy-default --make-default --initrd=$NEWINITRD --title="Red Hat Enterprise Linux Server ($KERNELVERSION) Image Factory vSphere module update"
rm /root/vsphere-module.sh'''
@implementer(CloudDelegate)
class vSphere(object):
"""docstring for Fedora_vsphere_Builder"""
def __init__(self):
super(vSphere, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def log_exc(self):
self.log.debug("Exception caught in ImageFactory")
self.log.debug(traceback.format_exc())
def delete_from_provider(self, builder, provider, credentials, target, parameters):
self.log.debug("Deleting vSphere image (%s)" % (self.builder.provider_image.identifier_on_provider))
provider_data = self.get_dynamic_provider_data(provider)
if provider_data is None:
raise ImageFactoryException("VMWare instance not found in XML or JSON provided")
self.generic_decode_credentials(credentials, provider_data, "vsphere")
helper = VSphereHelper(provider_data['api-url'], self.username, self.password)
# This call raises an exception on error
helper.delete_vm(self.builder.provider_image.identifier_on_provider)
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_should_create_target_image() called on vSphere plugin - returning True')
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
if tdlobj.distro == "RHEL-5":
merge_content = { "commands": [ { "name": "execute-module-script", "type": "raw" , "command": "/bin/sh /root/vsphere-module.sh" } ],
"files" : [ { "name": "/root/vsphere-module.sh", "type": "raw", "file": rhel5_module_script } ] }
try:
builder.os_plugin.add_cloud_plugin_content(merge_content)
except:
self.log.error("Failed to add RHEL-5 specific vSphere customization to cloud plugin tasks")
raise
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_did_create_target_image() called in vSphere plugin')
self.status="BUILDING"
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.target_image.identifier
# TODO: More convenience vars - revisit
self.template = template
self.target = target
self.builder = builder
self.image = builder.target_image.data
# This lets our logging helper know what image is being operated on
self.active_image = self.builder.target_image
self.tdlobj = oz.TDL.TDL(xmlstring=self.template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
# Add in target specific content
#TODO - URGENT - make this work again
#self.add_target_content()
# Oz assumes unique names - TDL built for multiple backends guarantees
# they are not unique. We don't really care about the name so just
# force uniqueness
# Oz now uses the tdlobject name property directly in several places
# so we must change it
self.tdlobj.name = "factory-build-" + self.new_image_id
# In contrast to our original builders, we enter the cloud plugins with a KVM file already
# created as the base_image. As a result, all the Oz building steps are gone (and can be found
# in the OS plugin(s)
# OS plugin has already provided the initial file for us to work with
# which we can currently assume is a raw KVM compatible image
# Add the cloud-info file
self.modify_oz_filesystem()
self.log.info("Transforming image for use on VMWare")
vmdk_format = parameters.get('vsphere_vmdk_format', 'streaming')
builder.target_image.parameters['vsphere_vmdk_format'] = vmdk_format
if vmdk_format == 'streaming':
self.vmware_transform_image_stream_vmdk()
elif vmdk_format == 'standard':
self.vmware_transform_image_standard_vmdk()
else:
raise Exception("Requested unknown VMDK format (%s)" % (vmdk_format))
self.percent_complete=100
self.status="COMPLETED"
def vmware_transform_image_standard_vmdk(self):
# On entry the image points to our generic KVM raw image
# Convert to stream-optimized VMDK and then update the image property
target_image = self.image + ".tmp.vmdk"
self.log.debug("Converting raw kvm image (%s) to standard VMDK (%s) using qemu-img" % (self.image, target_image))
qemu_img_cmd = [ 'qemu-img', 'convert', '-O', 'vmdk', self.image, target_image ]
subprocess.check_call(qemu_img_cmd)
self.log.debug("VMDK conversion complete")
os.unlink(self.image)
os.rename(target_image, self.image)
def vmware_transform_image_stream_vmdk(self):
# On entry the image points to our generic KVM raw image
# Convert to stream-optimized VMDK and then update the image property
target_image = self.image + ".tmp.vmdk"
self.log.debug("Converting raw kvm image (%s) to streaming VMDK (%s) using qemu-img" % (self.image, target_image))
qemu_img_cmd = [ 'qemu-img', 'convert', '-O', 'vmdk', "-o",
"adapter_type=lsilogic,subformat=streamOptimized,compat6", self.image, target_image ]
subprocess.check_call(qemu_img_cmd)
self.log.debug("VMDK conversion complete")
os.unlink(self.image)
os.rename(target_image, self.image)
def modify_oz_filesystem(self):
self.log.debug("Doing further Factory specific modification of Oz image")
guestfs_handle = launch_inspect_and_mount(self.builder.target_image.data)
remove_net_persist(guestfs_handle)
create_cloud_info(guestfs_handle, self.target)
shutdown_and_close(guestfs_handle)
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
self.log.info('push_image_to_provider() called in vSphere')
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.provider_image.identifier
self.tdlobj = oz.TDL.TDL(xmlstring=builder.target_image.template, rootpw_required=self.app_config["tdl_require_root_pw"])
self.builder = builder
self.active_image = self.builder.provider_image
self.vmware_push_image_upload(target_image, provider, credentials)
def vmware_push_image_upload(self, target_image_id, provider, credentials):
# BuildDispatcher is now the only location for the logic to map a provider to its data and target
provider_data = self.get_dynamic_provider_data(provider)
if provider_data is None:
raise ImageFactoryException("VMWare instance not found in XML or JSON provided")
self.generic_decode_credentials(credentials, provider_data, "vsphere")
# Image is always here and it is the target_image datafile
input_image = self.builder.target_image.data
# Example of some JSON for westford_esx
# {"westford_esx": {"api-url": "https://vsphere.virt.bos.redhat.com/sdk", "username": "Administrator", "password": "changeme",
# "datastore": "datastore1", "network_name": "VM Network" } }
vm_name = "factory-image-" + self.new_image_id
helper = VSphereHelper(provider_data['api-url'], self.username, self.password)
# Newer Oz versions introduce a configurable disk size in TDL
# We must still detect that it is present and pass it in this call
try:
disksize=getattr(self.tdlobj, "disksize")
except AttributeError:
disksize = 10
disksize_str = str(int(disksize)*1024*1024 + 2) + "KB"
helper.create_vm(input_image, vm_name, provider_data['compute_resource'], provider_data['datastore'],
disksize_str, [ { "network_name": provider_data['network_name'], "type": "VirtualE1000"} ],
"512MB", 1, 'otherLinux64Guest')
self.builder.provider_image.identifier_on_provider = vm_name
self.builder.provider_account_identifier = self.username
self.percent_complete = 100
def generic_decode_credentials(self, credentials, provider_data, target):
# convenience function for simple creds (rhev-m and vmware currently)
doc = libxml2.parseDoc(credentials)
self.username = None
_usernodes = doc.xpathEval("//provider_credentials/%s_credentials/username" % (target))
if len(_usernodes) > 0:
self.username = _usernodes[0].content
else:
try:
self.username = provider_data['username']
except KeyError:
raise ImageFactoryException("No username specified in config file or in push call")
self.provider_account_identifier = self.username
_passnodes = doc.xpathEval("//provider_credentials/%s_credentials/password" % (target))
if len(_passnodes) > 0:
self.password = _passnodes[0].content
else:
try:
self.password = provider_data['password']
except KeyError:
raise ImageFactoryException("No password specified in config file or in push call")
doc.freeDoc()
def get_dynamic_provider_data(self, provider):
# Get provider details for RHEV-M or VSphere
# First try to interpret this as an ad-hoc/dynamic provider def
# If this fails, try to find it in one or the other of the config files
# If this all fails return None
# We use this in the builders as well so I have made it "public"
try:
xml_et = fromstring(provider)
return xml_et.attrib
except Exception as e:
self.log.debug('Testing provider for XML: %s' % e)
pass
try:
jload = json.loads(provider)
return jload
except ValueError as e:
self.log.debug('Testing provider for JSON: %s' % e)
pass
return None
def abort(self):
pass
| |
"""Word completion for IPython.
This module is a fork of the rlcompleter module in the Python standard
library. The original enhancements made to rlcompleter have been sent
upstream and were accepted as of Python 2.3, but we need a lot more
functionality specific to IPython, so this module will continue to live as an
IPython-specific utility.
---------------------------------------------------------------------------
Original rlcompleter documentation:
This requires the latest extension to the readline module (the
completes keywords, built-ins and globals in __main__; when completing
NAME.NAME..., it evaluates (!) the expression up to the last dot and
completes its attributes.
It's very cool to do "import string" type "string.", hit the
completion key (twice), and see the list of names defined by the
string module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
__getattr__ hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
#*****************************************************************************
#
# Since this file is essentially a minimally modified copy of the rlcompleter
# module which is part of the standard Python distribution, I assume that the
# proper procedure is to maintain its copyright as belonging to the Python
# Software Foundation (in addition to my own, for all new code).
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2001-2006 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import __builtin__
import __main__
import glob
import keyword
import os
import re
import shlex
import sys
import IPython.rlineimpl as readline
import itertools
from IPython.ipstruct import Struct
from IPython import ipapi
import types
# Python 2.4 offers sets as a builtin
try:
set([1,2])
except NameError:
from sets import Set as set
from IPython.genutils import debugx, dir2
__all__ = ['Completer','IPCompleter']
class Completer:
def __init__(self,namespace=None,global_namespace=None):
"""Create a new completer for the command line.
Completer([namespace,global_namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# some minimal strict typechecks. For some core data structures, I
# want actual basic python types, not just anything that looks like
# one. This is especially true for namespaces.
for ns in (namespace,global_namespace):
if ns is not None and type(ns) != types.DictType:
raise TypeError,'namespace must be a dictionary'
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
__builtin__.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
words = dir2(obj)
# Build match list to return
n = len(attr)
return ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
class IPCompleter(Completer):
"""Extension of the completer class with IPython-specific features"""
def __init__(self,shell,namespace=None,global_namespace=None,
omit__names=0,alias_table=None):
"""IPCompleter() -> completer
Return a completer object suitable for use by the readline library
via readline.set_completer().
Inputs:
- shell: a pointer to the ipython shell itself. This is needed
because this completer knows about magic functions, and those can
only be accessed via the ipython instance.
- namespace: an optional dict where completions are performed.
- global_namespace: secondary optional dict for completions, to
handle cases (such as IPython embedded inside functions) where
both Python scopes are visible.
- The optional omit__names parameter sets the completer to omit the
'magic' names (__magicname__) for python objects unless the text
to be completed explicitly starts with one or more underscores.
- If alias_table is supplied, it should be a dictionary of aliases
to complete. """
Completer.__init__(self,namespace,global_namespace)
self.magic_prefix = shell.name+'.magic_'
self.magic_escape = shell.ESC_MAGIC
self.readline = readline
delims = self.readline.get_completer_delims()
delims = delims.replace(self.magic_escape,'')
self.readline.set_completer_delims(delims)
self.get_line_buffer = self.readline.get_line_buffer
self.omit__names = omit__names
self.merge_completions = shell.rc.readline_merge_completions
if alias_table is None:
alias_table = {}
self.alias_table = alias_table
# Regexp to split filenames with spaces in them
self.space_name_re = re.compile(r'([^\\] )')
# Hold a local ref. to glob.glob for speed
self.glob = glob.glob
# Determine if we are running on 'dumb' terminals, like (X)Emacs
# buffers, to avoid completion problems.
term = os.environ.get('TERM','xterm')
self.dumb_terminal = term in ['dumb','emacs']
# Special handling of backslashes needed in win32 platforms
if sys.platform == "win32":
self.clean_glob = self._clean_glob_win32
else:
self.clean_glob = self._clean_glob
self.matchers = [self.python_matches,
self.file_matches,
self.alias_matches,
self.python_func_kw_matches]
# Code contributed by Alex Schmolck, for ipython/emacs integration
def all_completions(self, text):
"""Return all possible completions for the benefit of emacs."""
completions = []
comp_append = completions.append
try:
for i in xrange(sys.maxint):
res = self.complete(text, i)
if not res: break
comp_append(res)
#XXX workaround for ``notDefined.<tab>``
except NameError:
pass
return completions
# /end Alex Schmolck code.
def _clean_glob(self,text):
return self.glob("%s*" % text)
def _clean_glob_win32(self,text):
return [f.replace("\\","/")
for f in self.glob("%s*" % text)]
def file_matches(self, text):
"""Match filenames, expanding ~USER type strings.
Most of the seemingly convoluted logic in this completer is an
attempt to handle filenames with spaces in them. And yet it's not
quite perfect, because Python's readline doesn't expose all of the
GNU readline details needed for this to be done correctly.
For a filename with a space in it, the printed completions will be
only the parts after what's already been typed (instead of the
full completions, as is normally done). I don't think with the
current (as of Python 2.3) Python readline it's possible to do
better."""
#print 'Completer->file_matches: <%s>' % text # dbg
# chars that require escaping with backslash - i.e. chars
# that readline treats incorrectly as delimiters, but we
# don't want to treat as delimiters in filename matching
# when escaped with backslash
protectables = ' ()[]{}'
if text.startswith('!'):
text = text[1:]
text_prefix = '!'
else:
text_prefix = ''
def protect_filename(s):
return "".join([(ch in protectables and '\\' + ch or ch)
for ch in s])
def single_dir_expand(matches):
"Recursively expand match lists containing a single dir."
if len(matches) == 1 and os.path.isdir(matches[0]):
# Takes care of links to directories also. Use '/'
# explicitly, even under Windows, so that name completions
# don't end up escaped.
d = matches[0]
if d[-1] in ['/','\\']:
d = d[:-1]
subdirs = os.listdir(d)
if subdirs:
matches = [ (d + '/' + p) for p in subdirs]
return single_dir_expand(matches)
else:
return matches
else:
return matches
lbuf = self.lbuf
open_quotes = 0 # track strings with open quotes
try:
lsplit = shlex.split(lbuf)[-1]
except ValueError:
# typically an unmatched ", or backslash without escaped char.
if lbuf.count('"')==1:
open_quotes = 1
lsplit = lbuf.split('"')[-1]
elif lbuf.count("'")==1:
open_quotes = 1
lsplit = lbuf.split("'")[-1]
else:
return []
except IndexError:
# tab pressed on empty line
lsplit = ""
if lsplit != protect_filename(lsplit):
# if protectables are found, do matching on the whole escaped
# name
has_protectables = 1
text0,text = text,lsplit
else:
has_protectables = 0
text = os.path.expanduser(text)
if text == "":
return [text_prefix + protect_filename(f) for f in self.glob("*")]
m0 = self.clean_glob(text.replace('\\',''))
if has_protectables:
# If we had protectables, we need to revert our changes to the
# beginning of filename so that we don't double-write the part
# of the filename we have so far
len_lsplit = len(lsplit)
matches = [text_prefix + text0 +
protect_filename(f[len_lsplit:]) for f in m0]
else:
if open_quotes:
# if we have a string with an open quote, we don't need to
# protect the names at all (and we _shouldn't_, as it
# would cause bugs when the filesystem call is made).
matches = m0
else:
matches = [text_prefix +
protect_filename(f) for f in m0]
#print 'mm',matches # dbg
return single_dir_expand(matches)
def alias_matches(self, text):
"""Match internal system aliases"""
#print 'Completer->alias_matches:',text,'lb',self.lbuf # dbg
# if we are not in the first 'item', alias matching
# doesn't make sense - unless we are starting with 'sudo' command.
if ' ' in self.lbuf.lstrip() and not self.lbuf.lstrip().startswith('sudo'):
return []
text = os.path.expanduser(text)
aliases = self.alias_table.keys()
if text == "":
return aliases
else:
return [alias for alias in aliases if alias.startswith(text)]
def python_matches(self,text):
"""Match attributes or global python names"""
#print 'Completer->python_matches, txt=<%s>' % text # dbg
if "." in text:
try:
matches = self.attr_matches(text)
if text.endswith('.') and self.omit__names:
if self.omit__names == 1:
# true if txt is _not_ a __ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\.__.*?__',txt) is None)
else:
# true if txt is _not_ a _ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\._.*?',txt) is None)
matches = filter(no__name, matches)
except NameError:
# catches <undefined attributes>.<tab>
matches = []
else:
matches = self.global_matches(text)
# this is so completion finds magics when automagic is on:
if (matches == [] and
not text.startswith(os.sep) and
not ' ' in self.lbuf):
matches = self.attr_matches(self.magic_prefix+text)
return matches
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
if not (inspect.isfunction(obj) or inspect.ismethod(obj)):
# for classes, check for __init__,__new__
if inspect.isclass(obj):
obj = (getattr(obj,'__init__',None) or
getattr(obj,'__new__',None))
# for all others, check if they are __call__able
elif hasattr(obj, '__call__'):
obj = obj.__call__
# XXX: is there a way to handle the builtins ?
try:
args,_,_1,defaults = inspect.getargspec(obj)
if defaults:
return args[-len(defaults):]
except TypeError: pass
return []
def python_func_kw_matches(self,text):
"""Match named parameters (kwargs) of the last open function"""
if "." in text: # a parameter cannot be dotted
return []
try: regexp = self.__funcParamsRegex
except AttributeError:
regexp = self.__funcParamsRegex = re.compile(r'''
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
# 1. find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo"
tokens = regexp.findall(self.get_line_buffer())
tokens.reverse()
iterTokens = iter(tokens); openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(iterTokens.next())
if not isId(ids[-1]):
ids.pop(); break
if not iterTokens.next() == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = self.global_matches(ids[0])
else:
callableMatches = self.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
try: namedArgs = self._default_arguments(eval(callableMatch,
self.namespace))
except: continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append("%s=" %namedArg)
return argMatches
def dispatch_custom_completer(self,text):
#print "Custom! '%s' %s" % (text, self.custom_completers) # dbg
line = self.full_lbuf
if not line.strip():
return None
event = Struct()
event.line = line
event.symbol = text
cmd = line.split(None,1)[0]
event.command = cmd
#print "\ncustom:{%s]\n" % event # dbg
# for foo etc, try also to find completer for %foo
if not cmd.startswith(self.magic_escape):
try_magic = self.custom_completers.s_matches(
self.magic_escape + cmd)
else:
try_magic = []
for c in itertools.chain(
self.custom_completers.s_matches(cmd),
try_magic,
self.custom_completers.flat_matches(self.lbuf)):
#print "try",c # dbg
try:
res = c(event)
return [r for r in res if r.lower().startswith(text.lower())]
except ipapi.TryNext:
pass
return None
def complete(self, text, state,line_buffer=None):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
:Keywords:
- line_buffer: string
If not given, the completer attempts to obtain the current line buffer
via readline. This keyword allows clients which are requesting for
text completions in non-readline contexts to inform the completer of
the entire text.
"""
#print '\n*** COMPLETE: <%s> (%s)' % (text,state) # dbg
# if there is only a tab on a line with only whitespace, instead
# of the mostly useless 'do you want to see all million
# completions' message, just do the right thing and give the user
# his tab! Incidentally, this enables pasting of tabbed text from
# an editor (as long as autoindent is off).
# don't apply this on 'dumb' terminals, such as emacs buffers, so we
# don't interfere with their own tab-completion mechanism.
if line_buffer is None:
self.full_lbuf = self.get_line_buffer()
else:
self.full_lbuf = line_buffer
if not (self.dumb_terminal or self.full_lbuf.strip()):
self.readline.insert_text('\t')
return None
magic_escape = self.magic_escape
magic_prefix = self.magic_prefix
self.lbuf = self.full_lbuf[:self.readline.get_endidx()]
try:
if text.startswith(magic_escape):
text = text.replace(magic_escape,magic_prefix)
elif text.startswith('~'):
text = os.path.expanduser(text)
if state == 0:
custom_res = self.dispatch_custom_completer(text)
if custom_res is not None:
# did custom completers produce something?
self.matches = custom_res
else:
# Extend the list of completions with the results of each
# matcher, so we return results to the user from all
# namespaces.
if self.merge_completions:
self.matches = []
for matcher in self.matchers:
self.matches.extend(matcher(text))
else:
for matcher in self.matchers:
self.matches = matcher(text)
if self.matches:
break
try:
return self.matches[state].replace(magic_prefix,magic_escape)
except IndexError:
return None
except:
#from IPython.ultraTB import AutoFormattedTB; # dbg
#tb=AutoFormattedTB('Verbose');tb() #dbg
# If completion fails, don't annoy the user.
return None
| |
#-*- coding: utf8
'''
This module contains the mixture of odes used for each
infectivity model. The classes here defined are used for
fitting the Phoenix-R model.
'''
from __future__ import division, print_function
from numpy.linalg import LinAlgError
from phoenix import ode
from phoenix.peak_finder import find_peaks
from phoenix.score_funcs import bic
from phoenix.score_funcs import msq
from phoenix.score_funcs import mdl
import lmfit
import multiprocessing as mp
import numpy as np
RESIDUALS = set(['lin', 'log', 'mean'])
SCORES = {'bic':bic,
'msq':msq,
'mdl':mdl}
def period_fun(period, amp, phase, t):
'''
Simulates a sine wave to account for periodicity.
Parameters
----------
period : double
the period of the sine wave
amp : double
the amplitude of the sine wave
phase : double
used to correct the period. For example, if the data starts on a
monday and has a seven day period, we need the phase to sync the
sine wave to peak on wednesdays.
t : array like or number
the time tick to evaluate the period function
'''
return 1 - .5 * amp * (np.sin(2 * np.pi * (t + phase) / period) + 1)
def phoenix_r_with_period(parameters, num_ticks, return_audience=False):
'''
Calls the Phoenix-R model adding a period function
Parameters
----------
parameters : dict like
the parameters for the phoenix-r equations. See `ode.phoenix_r` for
details.
num_ticks : int
number of ticks to simulate
See Also
--------
ode.phoenix_r : for the actual phoenix R equations
'''
#the 1e-6 avoids underflows.
result = ode.phoenix_r(parameters, num_ticks, return_audience) + 1e-6
if isinstance(parameters, lmfit.Parameters):
amp = parameters['amp'].value
phase = parameters['phase'].value
period = parameters['period'].value
else:
amp = parameters['amp']
phase = parameters['phase']
period = parameters['period']
if not return_audience:
result *= period_fun(period, amp, phase, np.arange(num_ticks))
return result
def residual(params, tseries, residual_metric, fit_audience=False):
'''
Computes the residual of the model. Different strategies can me used
for this computaion such as:
1. lin (Sum squared errors) - Returns the sum of squared errors of the
model
2. log (Sum squared errors on log) - The same as msq but transforms
the data and model to log scales. Useful for when the data has a high
variability in values. This is close to minimizing the squared of
the relative error
3. mean (mean squared errors) - Returns the mean squared errors of
the model
Parameters
----------
params : dict like
Parameters to input to the model
tseries : array like (or matrix if fit_audience is True)
Time series which the model tries to capture. If we will
also fir the audience, a matrix should be passed.
residual_metric : string in ('lin', 'log', 'mean')
Error metric to use, defaults to mean
fit_audience : bool
Indicates if the audience should be fitted, not the popularity.
Returns
-------
This method returns an array with:
1. sum($tseries[i] - model[i]$) in the case of 'lin'.
2. sum($log(tseries[i]) - log(model[i]$) in the case of 'log'.
3. mean($tseries[i] - model[i]$) in the case of 'mean'.
'''
if residual_metric not in RESIDUALS:
raise ValueError('Most choose residual from ' + ' '.join(RESIDUALS))
est = phoenix_r_with_period(params, tseries.shape[0], fit_audience)
data = tseries
#zeros are missing values
msk = (est > 0) & (data > 0)
if not msk.any():
return (data - data.mean())
data = data[msk]
est = est[msk]
if residual_metric == 'log':
return np.log(data) - np.log(est)
else:
n = data.shape[0]
if residual_metric == 'mean':
div = np.sqrt(n)
else:
div = 1
return (data - est) / div
def _params_to_list_of_tuples(params, ignore=None):
copy = []
for key in params:
parameter = params[key]
if ignore and parameter.name in ignore:
continue
copy.append((\
parameter.name, parameter.value, parameter.vary, \
parameter.min, parameter.max, parameter.expr))
return copy
def _fit_one(tseries, period, residual_metric, curr_sp, curr_pv, fit_audience,
curr_params, first_sp):
init_params = []
#copy current parameters
if curr_params is not None:
ignore = ('num_models', 'start_points')
init_params.extend(_params_to_list_of_tuples(curr_params, ignore))
else:
#On the first run we search for a period
init_params.append(\
('period', period, False))
init_params.append(\
('amp', 0.01, True, 0, 1))
init_params.append(\
('phase', 0.01, True, 0, period))
#Add the new shock
if curr_sp != 0:
init_params.append(\
('s0_%d' % curr_sp, curr_pv, True, 0))
init_params.append(\
('i0_%d' % curr_sp, 1, False))
init_params.append(\
('sp_%d' % curr_sp, curr_sp, False))
init_params.append(\
('beta_%d' % curr_sp, np.random.rand(), True, 0, 1))
init_params.append(\
('r_%d' % curr_sp, np.random.rand(), True, 0))
init_params.append(\
('gamma_%d' % curr_sp, np.random.rand(), True, 0, 1))
#Add the num models and start points params
if curr_params and 'start_points' in curr_params:
start_points = [x for x in curr_params['start_points'].value]
else:
start_points = []
start_points.append(curr_sp)
num_models = len(start_points)
init_params.append(('start_points', start_points, False))
init_params.append(('num_models', num_models, False))
if curr_sp == first_sp:
#Grid search for s0_0
best_err = np.inf
best_params = None
for s0_0 in np.logspace(2, 6, 21):
params = lmfit.Parameters()
params.add_many(*init_params)
params.add('s0_%d' % first_sp, value=s0_0, vary=True, min=0)
try:
lmfit.minimize(residual, params, \
args=(tseries, residual_metric, fit_audience),
ftol=.0000001, xtol=.0000001)
resid = residual(params, tseries, residual_metric, fit_audience)
err = (resid ** 2).sum()
if err < best_err:
best_err = err
best_params = params
except (AssertionError, LinAlgError, FloatingPointError, \
ZeroDivisionError, TypeError):
continue
#ugly ugly hack. stick with last guess if none worked
if best_params is None:
best_params = params
else:
try:
best_params = lmfit.Parameters()
best_params.add_many(*init_params)
lmfit.minimize(residual, best_params, \
args=(tseries, residual_metric, fit_audience), \
ftol=.0001, xtol=.0001)
except (AssertionError, LinAlgError, FloatingPointError, \
ZeroDivisionError, TypeError):
best_params = curr_params
return best_params
class FixedParamsPhoenixR(object):
'''
PhoenixR model with parameters
Parameters
----------
parameters : dict like
The parameters for the model
score_func : string in {'bic', 'msq', 'mdl'}
Select the score to store.
'''
def __init__(self, parameters, score_func='mdl'):
self.parameters = parameters
self.score_func = SCORES[score_func]
self.num_params = None
self.score = None
def __call__(self, num_ticks):
return phoenix_r_with_period(self.parameters, num_ticks)
def fit(self, tseries):
tseries = np.asanyarray(tseries)
num_models = 0
if isinstance(self.parameters, lmfit.Parameters):
num_models = self.parameters['num_models'].value
else:
num_models = self.parameters['num_models']
self.num_params = 5 * num_models + 2
self.score = self.score_func(phoenix_r_with_period(self.parameters, \
tseries.shape[0]), tseries, self.num_params, self.parameters)
return self
class FixedStartPhoenixR(object):
'''
PhoenixR model with fixed start points. The model will fit one start
point at a time, adding new ones in order. A final fit is performed
to using the results of the previous ones as start points.
Parameters
----------
start_points : array like
List of start points for each infection. The algorithm will fit each
start point in the order it appears on this list.
peak_volumes : array like
The peak volumes for start point
period : integer
Period to consider. If time windows are daily, 7 means weekly period
residual_metric : string in ('lin', 'log', 'mean')
Error metric to minimize on the residual. See the function
residual for more details.
score_func : string in ('bic', 'msq', 'mdl')
Select the score to store.
'''
def __init__(self, start_points, peak_volumes, period=7, \
residual_metric='mean', score_func='mdl'):
self.start_points = np.asanyarray(start_points, dtype='i')
self.peak_volumes = np.asanyarray(peak_volumes, dtype='f')
assert self.start_points.shape[0] == self.peak_volumes.shape[0]
self.period = period
self.residual_metric = residual_metric
self.score_func = SCORES[score_func]
self.parameters = None
self.num_params = None
self.score = None
def __call__(self, num_ticks):
return phoenix_r_with_period(self.parameters, num_ticks)
def fit(self, tseries):
tseries = np.asanyarray(tseries)
start_points = [sp for sp in self.start_points]
old_state = np.seterr(all='raise')
params = None
for i in xrange(self.start_points.shape[0]):
sp = self.start_points[i]
pv = self.peak_volumes[i]
params = \
_fit_one(tseries, self.period, self.residual_metric, sp, pv, \
False, params, self.start_points[0])
num_models = len(start_points)
self.num_params = 5 * num_models + 2
self.parameters = params
try:
model = phoenix_r_with_period(self.parameters, tseries.shape[0])
except (LinAlgError, FloatingPointError, ZeroDivisionError):
model = tseries.mean()
self.score = self.score_func(model, tseries, self.num_params, \
self.parameters)
np.seterr(**old_state)
return self
class WavePhoenixR(object):
'''
PhoenixR model with fixed wavelet based shock starts. Paper results uses this
class.
Parameters
----------
period : integer
Period to consider. If time windows are daily, 7 means weekly period
wave_widths : array like
Widths to test while searching for peaks and start points
threshold : double
Will continue improving models while the error is decaying above
this threshold.
residual_metric : string in ('lin', 'log', 'mean')
Error metric to minimize on the residual. See the function
residual for more details.
score_func : string in {'bic', 'msq', 'mdl'}
Select the score to store.
fit_audience : bool
Indicates if the audience should be fit, not the popularity.
'''
def __init__(self, period=7, wave_widths=[1, 2, 4, 8, 16, 32, 64, 128, 256],
threshold=.05, residual_metric='mean', score_func='mdl',
fit_audience=False):
self.period = period
self.wave_widths = wave_widths
self.threshold = threshold
self.residual_metric = residual_metric
self.score_func = SCORES[score_func]
self.fit_audience = fit_audience
self.parameters = None
self.num_params = None
self.score = None
def __call__(self, num_ticks, return_audience=False):
return phoenix_r_with_period(self.parameters, num_ticks, return_audience)
def _wave_fit(self, tseries, candidate_start_points, candidate_peak_volumes):
'''
This method has the fitting startegy to minimize the cost.
'''
period = self.period
threshold = self.threshold
residual_metric = self.residual_metric
score_func = self.score_func
fit_audience = self.fit_audience
curr_score = np.finfo('d').max
best_score = np.finfo('d').max
best_params = None
params = None
for i in xrange(len(candidate_start_points)):
sp = candidate_start_points[i]
pv = candidate_peak_volumes[i]
params = _fit_one(tseries, period, residual_metric, sp, pv, \
fit_audience, params, candidate_start_points[0])
model = phoenix_r_with_period(params, tseries.shape[0])
num_params = 5 * (i + 1) + 2
curr_score = self.score_func(model, tseries, num_params, params)
if (curr_score <= best_score):
best_params = params
best_score = curr_score
else:
increased_score = (curr_score - best_score) / best_score
if increased_score > threshold:
break
return best_score, best_params
def fit(self, tseries):
tseries = np.asanyarray(tseries)
peaks = find_peaks(tseries, self.wave_widths)
#First start_point is first non zero data tick
first_nonz = np.where(tseries > 0)[0][0]
candidate_start_points = []
candidate_start_points.append(first_nonz)
#first peak is searched for by grid search
candidate_peak_volumes = []
candidate_peak_volumes.append(1)
for x in peaks:
candidate_sp = max(x[2] - x[1], first_nonz + 1)
peak_vol = max(tseries[x[2]] - tseries[candidate_sp], 0)
if peak_vol == 0:
continue
if candidate_sp not in candidate_start_points:
candidate_start_points.append(candidate_sp)
candidate_peak_volumes.append(peak_vol)
best_score = np.finfo('d').max
best_params = None
for _ in xrange(5):
score, params = self._wave_fit(tseries, candidate_start_points,\
candidate_peak_volumes)
if score < best_score:
best_score = score
best_params = params
self.parameters = best_params
self.num_params = 5 * self.parameters['num_models'].value + 2
self.score = best_score
return self
| |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, poly1d
from scipy.interpolate import interp1d
from scipy.special import beta
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda variance function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
#
# def mpvar(lam):
# if lam == 0:
# v = mp.pi**2 / three
# else:
# v = (two / lam**2) * (one / (one + two*lam) -
# mp.beta(lam + one, lam + one))
# return v
#
# t = mp.taylor(mpvar, 0, 8)
# p, q = mp.pade(t, 4, 4)
# print "p =", [mp.fp.mpf(c) for c in p]
# print "q =", [mp.fp.mpf(c) for c in q]
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda variance function.
_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127,
-0.5370742306855439, 0.17292046290190008,
-0.02371146284628187]
_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124,
1.7660926747377275, 0.2643989311168465]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda variance.
_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1])
_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1])
def tukeylambda_variance(lam):
"""Variance of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.5, the variance is not defined, so
np.nan is returned. For lam = 0.5, np.inf is returned.
Notes
-----
In an interval around lambda=0, this function uses the [4,4] Pade
approximation to compute the variance. Otherwise it uses the standard
formula (http://en.wikipedia.org/wiki/Tukey_lambda_distribution). The
Pade approximation is used because the standard formula has a removable
discontinuity at lambda = 0, and does not produce accurate numerical
results near lambda = 0.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.075
# Play games with masks to implement the conditional evaluation of
# the distribution.
# lambda < -0.5: var = nan
low_mask = lam < -0.5
# lambda == -0.5: var = inf
neghalf_mask = lam == -0.5
# abs(lambda) < threshold: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | neghalf_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
v = np.empty_like(lam)
v[low_mask] = np.nan
v[neghalf_mask] = np.inf
if small.size > 0:
# Use the Pade approximation near lambda = 0.
v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
if reg.size > 0:
v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
beta(reg + 1, reg + 1))
v.shape = shp
return v
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
# three = mp.mpf(3)
# four = mp.mpf(4)
#
# def mpkurt(lam):
# if lam == 0:
# k = mp.mpf(6)/5
# else:
# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) +
# three*mp.beta(two*lam+one, two*lam+one))
# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2
# k = numer / denom - three
# return k
#
# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the
# # taylor function and we request a degree 9 Taylor polynomial, we actually
# # get degree 8.
# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01)
# t = [mp.chop(c, tol=1e-15) for c in t]
# p, q = mp.pade(t, 4, 4)
# print "p =", [mp.fp.mpf(c) for c in p]
# print "q =", [mp.fp.mpf(c) for c in q]
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda kurtosis function.
_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077,
0.20601184383406815, 4.59796302262789]
_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842,
0.43075235247853005, -2.789746758009912]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda kurtosis.
_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1])
_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1])
def tukeylambda_kurtosis(lam):
"""Kurtosis of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.25, the variance is not defined, so
np.nan is returned. For lam = 0.25, np.inf is returned.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.055
# Use masks to implement the conditional evaluation of the kurtosis.
# lambda < -0.25: kurtosis = nan
low_mask = lam < -0.25
# lambda == -0.25: kurtosis = inf
negqrtr_mask = lam == -0.25
# lambda near 0: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | negqrtr_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
k = np.empty_like(lam)
k[low_mask] = np.nan
k[negqrtr_mask] = np.inf
if small.size > 0:
k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
if reg.size > 0:
numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
3 * beta(2 * reg + 1, 2 * reg + 1))
denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
k[reg_mask] = numer / denom - 3
# The return value will be a numpy array; resetting the shape ensures that
# if `lam` was a scalar, the return value is a 0-d array.
k.shape = shp
return k
| |
import json
import sys
import requests
from requests.auth import HTTPBasicAuth
class ConfigEngine:
"""
Processes the configuration file to find what the promotion gates are and
the information necessary to grab the test data
"""
def required_config_error(self, required_item):
"""
Prints an error message if a part of the configuration file is not found
Keyword arguments:
required_item - the item that is missing from the configuration file
"""
print("ERROR: Unable to find {0}".format(required_item))
sys.exit(1)
def retrieve_config(self):
"""
Finds the configuration file and grabs the JSON data out of it
"""
# Look for arguments telling us where the config file is located
if ((self.arg_lr != None)):
# Config is located remotely
try:
config_file = requests.get(self.arg_lr)
return config_file.json()
except:
# Not able to find a configuration file at the specified location, quit out
print("ERROR: Unable to find properly formatted remote configuration file")
sys.exit(1)
# Config is stored locally
try:
with open(self.filename) as config_file:
return json.load(config_file)
except:
print("ERROR: Unable to find properly formatted config.json file")
sys.exit(1)
def process_config(self):
"""
Gets the configuration file and processes it
"""
# Get the config file
config_json = self.retrieve_config()
# Stores all of the data in a format that the dataengine and
# comparsionengine can deal with
config_output = {}
# Variables used to note whether or not these modules were set up in
# the configuration file. Default to False
appdynamics_exists = False
blazemeter_exists = False
webpagetest_exists = False
elastic_kibana_exists = False
# Make sure that all of the config sections are there
if "appdynamics" in config_json:
appdynamics_exists = True
if "blazemeter" in config_json:
blazemeter_exists = True
if "webpagetest" in config_json:
webpagetest_exists = True
if "promotion_gates" not in config_json:
# If the promotion gates aren't in there, there's no use running the program
self.required_config_error("promotion gates")
if "elastic_kibana" in config_json:
elastic_kibana_exists = True
if (appdynamics_exists == False and blazemeter_exists == False and webpagetest_exists == False):
# If all of the modules don't exist, there's no way to get any data
self.required_config_error("AppDynamics, BlazeMeter or WebPageTest")
# AppDynamics Module
config_output["appdynamics"] = {}
if (appdynamics_exists):
# AppDynamics Configuration Information -- Required
if ("username" not in config_json["appdynamics"]) and (self.arg_appduser == None):
self.required_config_error("AppDynamics username")
elif ("password" not in config_json["appdynamics"]) and (self.arg_appdpass == None):
self.required_config_error("AppDynamics password")
elif ("application_name" not in config_json["appdynamics"]) and (self.arg_appdapp == None):
self.required_config_error("AppDynamics application name")
# Two ways to set length (load_test_length_min or load_test_start_ms and load_test_end_ms)
# Check for:
# - load_test_length_min is not set and at least one of the start/end times are not set
# - load_test_length_min and load_test_start_ms or load_test_end_ms are set (both of the options are set)
elif ((("load_test_length_min" not in config_json["appdynamics"]) and (("load_test_start_ms" not in config_json["appdynamics"]) or ("load_test_end_ms" not in config_json["appdynamics"]))) or
(("load_test_length_min" in config_json["appdynamics"]) and (("load_test_start_ms" in config_json["appdynamics"]) or ("load_test_end_ms" in config_json["appdynamics"])))):
self.required_config_error("AppDynamics load test length")
else:
if (self.arg_appduser == None):
config_output["appdynamics"]["username"] = config_json["appdynamics"]["username"]
else:
config_output["appdynamics"]["username"] = self.arg_appduser
if (self.arg_appdpass == None):
config_output["appdynamics"]["password"] = config_json["appdynamics"]["password"]
else:
config_output["appdynamics"]["password"] = self.arg_appdpass
if (self.arg_appdapp == None):
config_output["appdynamics"]["application_name"] = config_json["appdynamics"]["application_name"]
else:
config_output["appdynamics"]["application_name"] = self.arg_appdapp
# The complicated load test length stuff
if ("load_test_length_min" in config_json["appdynamics"]):
config_output["appdynamics"]["load_test_length"] = config_json["appdynamics"]["load_test_length_min"]
elif (("load_test_start_ms" in config_json["appdynamics"]) and ("load_test_end_ms" in config_json["appdynamics"])):
config_output["appdynamics"]["load_test_start_ms"] = config_json["appdynamics"]["load_test_start_ms"]
config_output["appdynamics"]["load_test_end_ms"] = config_json["appdynamics"]["load_test_end_ms"]
else:
# Something slipped through the cracks somehow, error out
self.required_config_error("AppDynamics load test length")
# AppDynamics Promotion Gates -- Optional
if ((("warning" not in config_json["promotion_gates"]) and ("critical" not in config_json["promotion_gates"])) or
(("warning" in config_json["promotion_gates"]) and (config_json["promotion_gates"]["warning"] == False) and
("critical" in config_json["promotion_gates"]) and (config_json["promotion_gates"]["critical"] == False))):
# AppDynamics configuration information exists, but none of the metrics do (or we were told to ignore those that do exist)
# Pretend AppDynamics configuration information doesn't exist either so
# that we don't waste our time querying the AppDynamics API
appdynamics_exists = False
config_output["appdynamics"] = {"exists": False}
else:
# AppDynamics still exists
config_output["appdynamics"]["exists"] = True
# Make sure that we can put in promotion gates
if ("promotion_gates" not in config_output):
config_output["promotion_gates"] = {}
# Warning health violation
if "warning" in config_json["promotion_gates"]:
config_output["promotion_gates"]["warning"] = config_json["promotion_gates"]["warning"]
else:
# Warning = False means that the user doesn't care about
# health violations with a status of WARNING
config_output["promotion_gates"]["warning"] = False
# Critical health violation
if "critical" in config_json["promotion_gates"]:
config_output["promotion_gates"]["critical"] = config_json["promotion_gates"]["critical"]
else:
# Critical = False means that the user doesn't care about
# health violations with a status of CRITICAL
config_output["promotion_gates"]["critical"] = False
else:
config_output["appdynamics"]["exists"] = False
# BlazeMeter Module
config_output["blazemeter"] = {}
if (blazemeter_exists):
# BlazeMeter Configuration Information -- Required
if ("api" not in config_json["blazemeter"]) and (self.arg_blzkey == None):
self.required_config_error("BlazeMeter API key")
elif ("test_id" not in config_json["blazemeter"]) and (self.arg_blztest == None):
self.required_config_error("BlazeMeter test ID")
else:
if (self.arg_blzkey == None):
config_output["blazemeter"]["api_key"] = config_json["blazemeter"]["api"]
else:
config_output["blazemeter"]["api_key"] = self.arg_blzkey
if (self.arg_blztest == None):
config_output["blazemeter"]["test_id"] = config_json["blazemeter"]["test_id"]
else:
config_output["blazemeter"]["test_id"] = self.arg_blztest
# BlazeMeter Promotion Gates -- Optional
if (("response_time_avg" not in config_json["promotion_gates"]) and
("response_time_max" not in config_json["promotion_gates"]) and
("response_time_geomean" not in config_json["promotion_gates"]) and
("response_time_stdev" not in config_json["promotion_gates"]) and
("response_time_tp90" not in config_json["promotion_gates"]) and
("response_time_tp95" not in config_json["promotion_gates"]) and
("response_time_tp99" not in config_json["promotion_gates"]) and
("latency_max" not in config_json["promotion_gates"]) and
("latency_avg" not in config_json["promotion_gates"]) and
("latency_stdev" not in config_json["promotion_gates"]) and
("bandwidth_avg" not in config_json["promotion_gates"]) and
("transaction_rate" not in config_json["promotion_gates"])):
# Blazemeter configuration inforamtion exists, but none of the metrics do
# Pretend BlazeMeter configuration information doesn't exist either so
# that we don't waste our time querying the BlazeMeter API
blazemeter_exists = False
config_output["blazemeter"] = {"exists": False}
else:
# BlazeMeter still exists, put it in the config
config_output["blazemeter"]["exists"] = True
# Make sure that we can put in promotion gates
if ("promotion_gates" not in config_output):
config_output["promotion_gates"] = {}
# Average response time
if ("response_time_avg" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_avg"] = config_json["promotion_gates"]["response_time_avg"]
else:
# 0 means that the user doesn't care about the metric
config_output["promotion_gates"]["response_time_avg"] = 0
# Maximum response time
if ("response_time_max" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_max"] = config_json["promotion_gates"]["response_time_max"]
else:
config_output["promotion_gates"]["response_time_max"] = 0
# Response time geometric mean
if ("response_time_geomean" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_geomean"] = config_json["promotion_gates"]["response_time_geomean"]
else:
config_output["promotion_gates"]["response_time_geomean"] = 0
# Response time standard deviation
if ("response_time_stdev" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_stdev"] = config_json["promotion_gates"]["response_time_stdev"]
else:
config_output["promotion_gates"]["response_time_stdev"] = 0
# Response time 90% line
# e.g. 90% of the requests fell at or below this response time
# e.g. 90% of the requests had this response time
if ("response_time_tp90" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_tp90"] = config_json["promotion_gates"]["response_time_tp90"]
else:
config_output["promotion_gates"]["response_time_tp90"] = 0
# Response time 95% line
if ("response_time_tp95" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_tp95"] = config_json["promotion_gates"]["response_time_tp95"]
else:
config_output["promotion_gates"]["response_time_tp95"] = 0
# Response time #99% line
if ("response_time_tp99" in config_json["promotion_gates"]):
config_output["promotion_gates"]["response_time_tp99"] = config_json["promotion_gates"]["response_time_tp99"]
else:
config_output["promotion_gates"]["response_time_tp99"] = 0
# Maximum latency
if ("latency_max" in config_json["promotion_gates"]):
config_output["promotion_gates"]["latency_max"] = config_json["promotion_gates"]["latency_max"]
else:
config_output["promotion_gates"]["latency_max"] = 0
# Average latency
if ("latency_avg" in config_json["promotion_gates"]):
config_output["promotion_gates"]["latency_avg"] = config_json["promotion_gates"]["latency_avg"]
else:
config_output["promotion_gates"]["latency_avg"] = 0
# Latency Standard Deviation
if ("latency_stdev" in config_json["promotion_gates"]):
config_output["promotion_gates"]["latency_stdev"] = config_json["promotion_gates"]["latency_stdev"]
else:
config_output["promotion_gates"]["latency_stdev"] = 0
# Average Bandwidth (AKA average bytes/second)
if ("bandwidth_avg" in config_json["promotion_gates"]):
config_output["promotion_gates"]["bandwidth_avg"] = config_json["promotion_gates"]["bandwidth_avg"]
else:
config_output["promotion_gates"]["bandwidth_avg"] = 0
# Transaction Rate (AKA hits/second)
if ("transaction_rate" in config_json["promotion_gates"]):
config_output["promotion_gates"]["transaction_rate"] = config_json["promotion_gates"]["transaction_rate"]
else:
config_output["promotion_gates"]["transaction_rate"] = 0
else:
config_output["blazemeter"]["exists"] = False
# WebPageTest Module
config_output["webpagetest"] = {}
# Have to have a list of locations that WebPageTest supports since the API
# doesn't have any sort of validation on this
available_locations = ["Dulles_IE9", "Dulles_IE10", "Dulles_IE_11", "Dulles:Chrome", "Dulles:Canary", "Dulles:Firefox",
"Dulles:Firefox Nightly", "Dulles: Safari", "Dulles_MotoG:Motorola G - Chrome", "Dulles_MotoG:Motorola G - Chrome Beta",
"Dulles_MotoG:Motorola G - Chrome Dev", "ec2-us-east-1:Chrome", "ec2-us-east-1:IE 11", "ec2-us-east-1:Firefox",
"ec2-us-east-1:Safari", "ec2-us-west-1:Chrome", "ec2-us-west-1:IE 11", "ec2-us-west-1:Firefox", "ec2-us-west-1:Safari",
"ec2-us-west-2:Chrome", "ec2-us-west-2:IE 11", "ec2-us-west-2:Firefox", "ec2-us-west-2:Safari", "ec2-eu-west-1:Chrome",
"ec2-eu-west-1:IE 11", "ec2-eu-west-1:Firefox", "ec2-eu-west-1:Safari", "ec2-eu-central-1:Chrome", "ec2-eu-central-1:IE 11",
"ec2-eu-central-1:Firefox", "ec2-eu-central-1:Safari", "ec2-ap-northeast-1:Chrome", "ec2-ap-northeast-1:IE 11",
"ec2-ap-northeast-1:Firefox", "ec2-ap-northeast-1:Safari", "ec2-ap-southeast-2:Chrome", "ec2-ap-southeast-2:IE 11",
"ec2-ap-southeast-2:Firefox", "ec2-ap-southeast-2:Safari", "ec2-sa-east-1:Chrome", "ec2-sa-east-1:IE 11",
"ec2-sa-east-1:Firefox", "ec2-sa-east-1:Safari"]
if (webpagetest_exists):
# WebPageTest Configuration Information -- Required
if ("url" not in config_json["webpagetest"]):
self.required_config_error("WebPageTest url")
elif ("location" not in config_json["webpagetest"]):
self.required_config_error("WebPageTest location")
elif (config_json["webpagetest"]["location"] not in available_locations):
self.required_config_error("the specified WebPageTest location")
elif ("runs" not in config_json["webpagetest"]):
self.required_config_error("WebPageTest runs")
elif ("api" not in config_json["webpagetest"]) and (self.arg_wpgtkey == None):
self.required_config_error("WebPageTest API key")
else:
config_output["webpagetest"] = {}
config_output["webpagetest"]["url"] = config_json["webpagetest"]["url"]
config_output["webpagetest"]["location"] = config_json["webpagetest"]["location"]
config_output["webpagetest"]["runs"] = config_json["webpagetest"]["runs"]
if (self.arg_wpgtkey == None):
config_output["webpagetest"]["api"] = config_json["webpagetest"]["api"]
else:
config_output["webpagetest"]["api"] = self.arg_wpgtkey
# WebPageTest Promotion Gates -- Optional
if ("first_view" not in config_json["promotion_gates"] and
"repeat_view" not in config_json["promotion_gates"]):
# WebPageTest configuration inforamtion exists, but none of the metrics do
# Pretend WebPageTest configuration information doesn't exist either so
# that we don't waste our time querying the WebPageTest API
webpagetest_exists = False
config_output["webpagetest"] = {"exists": False}
else:
# At least one of them exists
config_output["webpagetest"]["exists"] = True
# Make sure that we can put in promotion gates
if ("promotion_gates" not in config_output):
config_output["promotion_gates"] = {}
# All of the views that we have to loop over
views = ["first_view", "repeat_view"]
for view in views:
if (view in config_json["promotion_gates"]):
# Set up the view
config_output["promotion_gates"][view] = {}
# Speed Index
if ("speed_index" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["speed_index"] = config_json["promotion_gates"][view]["speed_index"]
else:
config_output["promotion_gates"][view]["speed_index"] = 0
# Time to First Paint
if ("first_paint" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["first_paint"] = config_json["promotion_gates"][view]["first_paint"]
else:
config_output["promotion_gates"][view]["first_paint"] = 0
# Time to First Byte
if ("first_byte" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["first_byte"] = config_json["promotion_gates"][view]["first_byte"]
else:
config_output["promotion_gates"][view]["first_byte"] = 0
# Time to Fully Loaded
if ("fully_loaded" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["fully_loaded"] = config_json["promotion_gates"][view]["fully_loaded"]
else:
config_output["promotion_gates"][view]["fully_loaded"] = 0
# Time to Visual Complete
if ("visual_complete" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["visual_complete"] = config_json["promotion_gates"][view]["visual_complete"]
else:
config_output["promotion_gates"][view]["visual_complete"] = 0
# Time to Start Render
if ("visual_complete" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["start_render"] = config_json["promotion_gates"][view]["start_render"]
else:
config_output["promotion_gates"][view]["start_render"] = 0
# Time to Last Visual Change
if ("last_visual_change" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["last_visual_change"] = config_json["promotion_gates"][view]["last_visual_change"]
else:
config_output["promotion_gates"][view]["last_visual_change"] = 0
# Time to <title></title> Tags Loaded
if ("title_time" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["title_time"] = config_json["promotion_gates"][view]["title_time"]
else:
config_output["promotion_gates"][view]["title_time"] = 0
# Page Size (Bytes In)
if ("page_size" in config_json["promotion_gates"][view]):
config_output["promotion_gates"][view]["page_size"] = config_json["promotion_gates"][view]["page_size"]
else:
config_output["promotion_gates"][view]["page_size"] = 0
else:
config_output["webpagetest"]["exists"] = False
if (elastic_kibana_exists):
# ElasticSearch/Kibana Configuration Information -- Required
if ("elastic_server" not in config_json["elastic_kibana"]):
self.required_config_error("ElasticSearch server")
elif ("index" not in config_json["elastic_kibana"]):
self.required_config_error("ElasticSearch index")
else:
config_output["elastic_kibana"] = {}
config_output["elastic_kibana"]["elastic_server"] = config_json["elastic_kibana"]["elastic_server"]
config_output["elastic_kibana"]["index"] = config_json["elastic_kibana"]["index"]
config_output["elastic_kibana"]["exists"] = True
else:
config_output["elastic_kibana"] = {"exists": False}
# Return all of the now properly formatted config data
return config_output
def __init__(self, filename, arg_lr, arg_ll, arg_blzkey, arg_blztest, arg_appduser, arg_appdpass, arg_appdapp, arg_wpgtkey):
"""
Class starting point
"""
# Configuration file name
self.filename = filename
# Argument - Location Remote
self.arg_lr = arg_lr
# Argument - Location Local (Specific Directory)
if (arg_ll != None):
self.filename = arg_ll
# Argument - BlazeMeter API key
self.arg_blzkey = arg_blzkey
# Argument - BlazeMeter API test ID
self.arg_blztest = arg_blztest
# Argument - AppDynamics username
self.arg_appduser = arg_appduser
# Argument - AppDynamics password
self.arg_appdpass = arg_appdpass
# Argument - AppDynamics application name
self.arg_appdapp = arg_appdapp
# Argument - WebPageTest API key
self.arg_wpgtkey = arg_wpgtkey
| |
from django.shortcuts import redirect
from finance.models import Transaction, Person
from django.shortcuts import render, render_to_response
from django.contrib.admin.views.decorators import staff_member_required
from django.template.context import RequestContext
from finance.forms import CSVInputForm, MemberTestForm
import StringIO
import zipfile
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
import csv
from datetime import datetime
import os
from smtplib import SMTPRecipientsRefused
from django.core.mail import send_mail
from django.utils.timezone import datetime, timedelta, now
from django.template.loader import render_to_string
def transaction_list(request):
transaction_list = Transaction.objects.filter(public=True).order_by('date')[::-1]#for debugging purposes, results should actually be paginated
return render(request, 'transaction_list.html', {
'transaction_list': transaction_list,
})
def chart_account(request):
transactions = Transaction.objects.filter(public=True).order_by('date')[:]
data = []
account = 0.0
for transaction in transactions:
account += transaction.amount
date = "Date.UTC(%d,%d,%d)"%(transaction.date.year,transaction.date.month-1,transaction.date.day)
data.append([date, account])
money_data = str(data)
money_data = money_data.replace("'", "")
money_data = money_data.replace("\"", "")
return render(request, 'chart_account.html', {
'money_data': money_data,
})
def number_of_members(request):
count = 0
for member in Person.objects.all():
if member.is_valid_member:
count += 1
return render(request, 'member_count.html', {
'member_count': count,
})
def member_test(request):
messages = []
if request.method == 'POST': # If the form has been submitted...
form = MemberTestForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
if form.cleaned_data['mail']:
mail = form.cleaned_data['mail']
try:
member = Person.objects.get(email_address=mail)
except Person.DoesNotExist:
member = None
form_valid = True
elif form.cleaned_data['firstname'] and form.cleaned_data['lastname']:
try:
member = Person.objects.get(firstname=form.cleaned_data['firstname'], lastname=form.cleaned_data['lastname'])
mail = member.email_address
except Person.DoesNotExist:
member = None
mail = None
form_valid = True
else:
member = None
mail = None
messages.append("Either fill in an emailaddress, or a first and last name")
form_valid = False
if form_valid:
try:
if member is None or not member.is_valid_member:
text = render_to_string('mails/member_denial.txt', dictionary={})
else:
date = member.last_payment_date + timedelta(days=366)
text = render_to_string('mails/member_confirmation.txt', dictionary={'firstname':member.firstname,
'lastname': member.lastname,
'postal_code':member.postal_code,
'final_date':date})
if mail:
send_mail('Finance Squad', text, "finance@pirateparty.be", [mail], fail_silently=False)
if form.cleaned_data['mail']:
messages.append("A mail has been sent to the mail address %s"%form.cleaned_data['mail'])
elif form.cleaned_data['firstname'] and form.cleaned_data['lastname']:
messages.append("A mail has been sent to the mail address we have registered for the pirate with the name %s %s" % (form.cleaned_data['firstname'], form.cleaned_data['lastname']))
else:
messages.append("A mail has been sent to the mail address we have registered for the pirate with the name %s %s" % (form.cleaned_data['firstname'], form.cleaned_data['lastname']))
except SMTPRecipientsRefused:
if form.cleaned_data['mail']:
messages.append("We weren't able to send a mail to the address %s"%form.cleaned_data['mail'])
elif form.cleaned_data['firstname'] and form.cleaned_data['lastname']:
messages.append("We weren't able to send a mail to the mail address we have registered for the pirate with the name %s %s" % (form.cleaned_data['firstname'], form.cleaned_data['lastname']))
else:
form = MemberTestForm() # An unbound form
return render(request, 'member_test.html', {
'form': form,
'messages': messages,
})
@staff_member_required
def backup(request):
PRIVATE_ROOT = "/home/jonas/git/ppbe-finance/"
newfile = 'sqlite.db.back.{:%Y.%m.%d}'.format(datetime.now())
if os.path.isfile(PRIVATE_ROOT + newfile):
index = 1
while os.path.isfile( PRIVATE_ROOT + "%s.%d" % (newfile,index) ):
index += 1
newfile = "%s.%d" % (newfile,index)
os.system('cp ' + PRIVATE_ROOT + "sqlite.db" + ' ' + \
PRIVATE_ROOT + newfile)
return redirect('/admin/')
@staff_member_required
def import_csv(request):
if request.method == "POST":
form = CSVInputForm(request.POST, request.FILES)
success = False
messages = ["invalid form"]
if form.is_valid():
(success, messages) = form.save()
context = {"form": form, "success": success, "messages": messages}
return render_to_response("admin/import_csv.html", context,
context_instance=RequestContext(request))
else:
form = CSVInputForm()
context = {"form": form}
return render_to_response("admin/import_csv.html", context,
context_instance=RequestContext(request))
@staff_member_required
def export_csv(request):
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=transactions.csv.zip'
zip_file = zipfile.ZipFile( response, "w", zipfile.ZIP_DEFLATED)
csv_file = StringIO.StringIO()
dialect = csv.excel()
dialect.quotechar = '"'
dialect.delimiter = ','
csv_writer = csv.writer(csv_file, dialect=dialect)
for transaction in Transaction.objects.order_by("date"): # generate chunk
csv_writer.writerow([transaction.date,
transaction.pirate_account.account.iban,
transaction.amount,
transaction.beneficiary.current_banking_account.iban if transaction.beneficiary.current_banking_account else "",
transaction.BIC,
transaction.beneficiary.lastname+" "+transaction.beneficiary.firstname,
"%s %s %s"%(transaction.beneficiary.street, transaction.beneficiary.postal_code, transaction.beneficiary.city),
transaction.code,
transaction.statement.encode("utf-8")])
zip_file.writestr("transactions.csv",csv_file.getvalue())
csv_file.close()
zip_file.close()
# generate the file
response['Content-Length'] = response.tell()
return response
@staff_member_required
def export_members(request):
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=transactions.csv.zip'
zip_file = zipfile.ZipFile( response, "w", zipfile.ZIP_DEFLATED)
csv_file = StringIO.StringIO()
dialect = csv.excel()
dialect.quotechar = '"'
dialect.delimiter = ','
csv_writer = csv.writer(csv_file, dialect=dialect)
for person in Person.objects.order_by("postal_code"): # generate chunk
if person.is_valid_member:
csv_writer.writerow([person.firstname.encode("utf-8"),
person.lastname.encode("utf-8"),
person.email_address,
person.street.encode("utf-8"),
person.postal_code,
person.city.encode("utf-8"),
person.telephone,
person.language,
person.notas.encode("utf-8"),
person.last_payment_date])
zip_file.writestr("transactions.csv",csv_file.getvalue())
csv_file.close()
zip_file.close()
# generate the file
response['Content-Length'] = response.tell()
return response
| |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 32]).astype(np.float32)
for batch in [1, 2, 4]:
for axis in [0, 1]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
op_outputs = {"Out": ["output_data"]}
else:
op_outputs = {
"Out": ["output_data"],
"XShape": ["xshape_data"]
}
dics = [{"axis": axis}]
ops_config = [{
"op_type": "flatten",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": op_outputs,
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, batch))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 8]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7130:
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
else:
if dynamic_shape:
return 0, 3
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def test(self):
self.run_test()
class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 32, 64]).astype(np.float32)
for batch in [1, 2, 4]:
for axis in [0, 1, 2]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
op_outputs = {"Out": ["output_data"]}
else:
op_outputs = {
"Out": ["output_data"],
"XShape": ["xshape_data"]
}
dics = [{"axis": axis}]
ops_config = [{
"op_type": "flatten",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": op_outputs,
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, batch))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64, 768]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 256]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7130:
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
else:
if dynamic_shape:
return 0, 3
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def test(self):
self.run_test()
class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)
for batch in [1, 2, 4]:
for axis in [0, 1, 2, 3]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
op_outputs = {"Out": ["output_data"]}
else:
op_outputs = {
"Out": ["output_data"],
"XShape": ["xshape_data"]
}
dics = [{"axis": axis}]
ops_config = [{
"op_type": "flatten",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": op_outputs,
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, batch))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7130:
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
else:
if dynamic_shape:
return 0, 3
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def test(self):
self.run_test()
class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)
for batch in [1, 2, 4]:
for axis in [0, 1, 2, 3, 4]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
op_outputs = {"Out": ["output_data"]}
else:
op_outputs = {
"Out": ["output_data"],
"XShape": ["xshape_data"]
}
dics = [{"axis": axis}]
ops_config = [{
"op_type": "flatten",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": op_outputs,
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, batch))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7130:
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
else:
if dynamic_shape:
return 0, 3
if attrs[0]['axis'] == 1:
return 1, 2
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import os.path
import tempfile
import mock
from oslo.config import cfg
from oslo_concurrency import processutils
from oslotest import base as test_base
from ironic_lib import exception
from ironic_lib import utils
CONF = cfg.CONF
class BareMetalUtilsTestCase(test_base.BaseTestCase):
def test_unlink(self):
with mock.patch.object(os, "unlink") as unlink_mock:
unlink_mock.return_value = None
utils.unlink_without_raise("/fake/path")
unlink_mock.assert_called_once_with("/fake/path")
def test_unlink_ENOENT(self):
with mock.patch.object(os, "unlink") as unlink_mock:
unlink_mock.side_effect = OSError(errno.ENOENT)
utils.unlink_without_raise("/fake/path")
unlink_mock.assert_called_once_with("/fake/path")
class ExecuteTestCase(test_base.BaseTestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
utils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
@mock.patch.object(processutils, 'execute')
@mock.patch.object(os.environ, 'copy', return_value={})
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute')
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute')
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(processutils, 'execute') as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(processutils, 'execute') as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
class MkfsTestCase(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_mkfs(self, execute_mock):
utils.mkfs('ext4', '/my/block/dev')
utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
expected = [mock.call('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
run_as_root=True,
use_standard_locale=True),
mock.call('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
run_as_root=True,
use_standard_locale=True),
mock.call('mkswap', '/my/swap/block/dev',
run_as_root=True,
use_standard_locale=True)]
self.assertEqual(expected, execute_mock.call_args_list)
@mock.patch.object(utils, 'execute')
def test_mkfs_with_label(self, execute_mock):
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
expected = [mock.call('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol',
'/my/block/dev', run_as_root=True,
use_standard_locale=True),
mock.call('mkfs', '-t', 'msdos', '-n', 'msdos-vol',
'/my/msdos/block/dev', run_as_root=True,
use_standard_locale=True),
mock.call('mkswap', '-L', 'swap-vol',
'/my/swap/block/dev', run_as_root=True,
use_standard_locale=True)]
self.assertEqual(expected, execute_mock.call_args_list)
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr=os.strerror(errno.ENOENT)))
def test_mkfs_with_unsupported_fs(self, execute_mock):
self.assertRaises(exception.FileSystemNotSupported,
utils.mkfs, 'foo', '/my/block/dev')
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr='fake'))
def test_mkfs_with_unexpected_error(self, execute_mock):
self.assertRaises(processutils.ProcessExecutionError, utils.mkfs,
'ext4', '/my/block/dev', 'ext4-vol')
class IsHttpUrlTestCase(test_base.BaseTestCase):
def test_is_http_url(self):
self.assertTrue(utils.is_http_url('http://127.0.0.1'))
self.assertTrue(utils.is_http_url('https://127.0.0.1'))
self.assertTrue(utils.is_http_url('HTTP://127.1.2.3'))
self.assertTrue(utils.is_http_url('HTTPS://127.3.2.1'))
self.assertFalse(utils.is_http_url('Zm9vYmFy'))
self.assertFalse(utils.is_http_url('11111111'))
| |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
import time
import unittest
LOCATION = "southcentralusstage"
VNET_LOCATION = "southcentralus"
class AzureNetAppFilesBackupServiceScenarioTest(ScenarioTest):
def setup_vnet(self, vnet_name, subnet_name):
self.cmd("az network vnet create -n %s -g {rg} -l %s --address-prefix 10.5.0.0/16" %
(vnet_name, VNET_LOCATION))
self.cmd("az network vnet subnet create -n %s --vnet-name %s --address-prefixes '10.5.0.0/24' "
"--delegations 'Microsoft.Netapp/volumes' -g {rg}" % (subnet_name, vnet_name))
def create_volume(self, account_name, pool_name, volume_name, volume_only=False, backup_id=None, vnet_name=None):
if vnet_name is None:
vnet_name = self.create_random_name(prefix='cli-vnet-backup', length=24)
subnet_name = "default"
if not volume_only:
# create vnet, account and pool
self.setup_vnet(vnet_name, subnet_name)
self.cmd("netappfiles account create -g {rg} -a '%s' -l %s" % (account_name, LOCATION))
self.cmd("netappfiles pool create -g {rg} -a %s -p %s -l %s --service-level 'Premium' --size 4" %
(account_name, pool_name, LOCATION))
# create volume
if backup_id is None:
return self.cmd("netappfiles volume create -g {rg} -a %s -p %s -v %s -l %s --vnet %s --subnet %s "
"--file-path %s --usage-threshold 100" %
(account_name, pool_name, volume_name, LOCATION, vnet_name, subnet_name, volume_name)
).get_output_in_json()
else:
return self.cmd("netappfiles volume create -g {rg} -a %s -p %s -v %s -l %s --vnet %s --subnet %s "
"--file-path %s --usage-threshold 100 --backup-id %s" %
(account_name, pool_name, volume_name, LOCATION, vnet_name, subnet_name, volume_name,
backup_id)).get_output_in_json()
def create_backup(self, account_name, pool_name, volume_name, backup_name, backup_only=False, vnet_name=None):
if not backup_only:
# create account, pool and volume
self.create_volume(account_name, pool_name, volume_name, vnet_name=vnet_name)
# get vault
vaults = self.get_vaults(account_name)
# volume update with backup policy
self.cmd("az netappfiles volume update -g {rg} -a %s -p %s -v %s --vault-id %s --backup-enabled %s " %
(account_name, pool_name, volume_name, vaults[0]['id'], True))
# create backup
return self.cmd("az netappfiles volume backup create -g {rg} -a %s -p %s -v %s -l %s --backup-name %s" %
(account_name, pool_name, volume_name, LOCATION, backup_name)).get_output_in_json()
def delete_backup(self, account_name, pool_name, volume_name):
vaults = self.get_vaults(account_name)
# Delete
self.cmd("az netappfiles volume update -g {rg} -a %s -p %s -v %s --vault-id %s --backup-enabled %s " %
(account_name, pool_name, volume_name, vaults[0]['id'], False))
def get_vaults(self, account_name):
return self.cmd("az netappfiles vault list -g {rg} -a %s" % account_name).get_output_in_json()
def wait_for_backup_created(self, account_name, pool_name, volume_name, backup_name):
attempts = 0
while attempts < 40:
attempts += 1
backup = self.cmd("netappfiles volume backup show -g {rg} -a %s -p %s -v %s -b %s" %
(account_name, pool_name, volume_name, backup_name)).get_output_in_json()
if backup['provisioningState'] != "Creating":
break
if self.is_live or self.in_recording:
time.sleep(60)
def wait_for_backup_initialized(self, account_name, pool_name, volume_name, backup_name):
attempts = 0
while attempts < 60:
attempts += 1
backup = self.cmd("netappfiles volume backup show -g {rg} -a %s -p %s -v %s -b %s" %
(account_name, pool_name, volume_name, backup_name)).get_output_in_json()
if backup['provisioningState'] != "Uninitialized":
break
if self.is_live or self.in_recording:
time.sleep(60)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_create_delete_backup(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
backup = self.create_backup(account_name, pool_name, volume_name, backup_name)
assert backup is not None
assert backup['id'] is not None
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
backup_list = self.cmd("netappfiles volume backup list -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert len(backup_list) == 1
# create second backup to test delete backup
backup_name2 = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name2, backup_only=True)
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name2)
backup_list = self.cmd("netappfiles volume backup list -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert len(backup_list) == 2
# delete backup
self.cmd("az netappfiles volume backup delete -g {rg} -a %s -p %s -v %s --backup-name %s" %
(account_name, pool_name, volume_name, backup_name))
backup_list = self.cmd("netappfiles volume backup list -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert len(backup_list) == 1
self.delete_backup(account_name, pool_name, volume_name)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_list_backup(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name)
backup_list = self.cmd("netappfiles volume backup list -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert len(backup_list) == 1
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
# create backup 2
backup_name2 = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name2, True)
backup_list = self.cmd("netappfiles volume backup list -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert len(backup_list) == 2
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name2)
self.delete_backup(account_name, pool_name, volume_name)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_get_backup_by_name(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name)
# get backup and validate
backup = self.cmd("netappfiles volume backup show -g {rg} -a %s -p %s -v %s -b %s" %
(account_name, pool_name, volume_name, backup_name)).get_output_in_json()
assert backup is not None
assert backup['name'] == account_name + "/" + pool_name + "/" + volume_name + "/" + backup_name
# get backup by id and validate
backup_from_id = self.cmd("az netappfiles volume backup show --ids %s" % backup['id']).get_output_in_json()
assert backup_from_id['name'] == account_name + "/" + pool_name + "/" + volume_name + "/" + backup_name
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
self.delete_backup(account_name, pool_name, volume_name)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_update_backup(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name)
# update backup
# tags = "Tag1=Value1 Tag2=Value2"
label = "label"
self.cmd("netappfiles volume backup update -g {rg} -a %s -p %s -v %s --backup-name %s --label %s" %
(account_name, pool_name, volume_name, backup_name, label))
# get backup and validate
backup = self.cmd("netappfiles volume backup show -g {rg} -a %s -p %s -v %s --backup-name %s" %
(account_name, pool_name, volume_name, backup_name)).get_output_in_json()
assert backup is not None
assert backup['name'] == account_name + "/" + pool_name + "/" + volume_name + "/" + backup_name
assert backup['id'] is not None
# there is a bug in update where the label is not updated - will be fixed later
# assert backup['label'] == label
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
self.delete_backup(account_name, pool_name, volume_name)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_disable_backup_for_volume(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name)
# get vault
vaults = self.cmd("az netappfiles vault list -g {rg} -a %s" % account_name).get_output_in_json()
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
# volume update
volume = self.cmd("az netappfiles volume update -g {rg} -a %s -p %s -v %s --vault-id %s --backup-enabled %s" %
(account_name, pool_name, volume_name, vaults[0]['id'], False)).get_output_in_json()
assert not volume['dataProtection']['backup']['backupEnabled']
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_restore_backup_to_new_volume(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
vnet_name = self.create_random_name(prefix='cli-vnet-backup', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name, vnet_name=vnet_name)
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
backup = self.cmd("netappfiles volume backup show -g {rg} -a %s -p %s -v %s --backup-name %s" %
(account_name, pool_name, volume_name, backup_name)).get_output_in_json()
# create new volume and restore backup
volume2_name = self.create_random_name(prefix='cli-vol-', length=24)
self.create_volume(account_name, pool_name, volume2_name, volume_only=True, backup_id=backup['backupId'],
vnet_name=vnet_name)
volume2 = self.cmd("netappfiles volume show -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert volume2['dataProtection']['backup']['backupEnabled']
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
self.delete_backup(account_name, pool_name, volume_name)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_backup_', additional_tags={'owner': 'cli_test'})
def test_get_backup_status(self):
# create backup
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
backup_name = self.create_random_name(prefix='cli-backup-', length=24)
vnet_name = self.create_random_name(prefix='cli-vnet-backup', length=24)
self.create_backup(account_name, pool_name, volume_name, backup_name, vnet_name=vnet_name)
status = self.cmd("az netappfiles volume backup status -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert status['mirrorState'] == "Uninitialized"
self.wait_for_backup_created(account_name, pool_name, volume_name, backup_name)
self.wait_for_backup_initialized(account_name, pool_name, volume_name, backup_name)
status = self.cmd("az netappfiles volume backup status -g {rg} -a %s -p %s -v %s" %
(account_name, pool_name, volume_name)).get_output_in_json()
assert status['mirrorState'] == "Mirrored"
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Kiwi Markup
This script implements the mark-up for the Kiwi system (essentially a
static website generator). It expects to be given a list of text lines
which it converts to the equivalent HTML format.
Because this is the first part of a larger system it is not really
intended to be run stand-alone, but see the "if __name___..." section
at the end of the script for simple command-line use.
See README.md for more details
"""
import sys
import re
import cgi
KIWI_MODE_STD = 0
KIWI_MODE_ORG = 1
# Regex definitions ("Now you have two problems...")
# Regex for SETEXT style headers, starting (after up to three
# whitespace characters) with a row of up to six '#' characters. The
# header text can also be followed by additional '#' characters up
# to the end of the line -- these will be omitted from the output.
HEADER_REGEX = r"^[\s]{0,3}([#]{1,6})[\s]*([^#]*)"
# Regex for org-mode headers, starting with a row of up to 6 '*'
# characters.
ORG_HEADER_REGEX = r"^[\s]{0,3}([\*]{1,6})(.*)"
# Regex for list items, optionally indented by whitespace, and indicated by a
# single dash or asterisk followed by whitespace and then the actual text of
# the item.
LIST_REGEX = r"^([\s]*)[-\*][\s]+(.*)"
# Regex for table headers, which consist of a row of '-' characters
# split up by one or more '|' characters or '+' characters.
TABLE_HEADER_REGEX = r"^[\s]{0,3}(\||\+)*((-{3,})(\||\+))+"
# Regexes for bold and emphasized text
BOLD_START_REGEX = r"(^|[\[\]\s]| _)(\*\*)([^\s])"
BOLD_END_REGEX = r"([^\s])(\*\*)(_ |[\):;.,?\[\]\s]+|$)"
EMPH_START_REGEX = r"(^|[\[\]\s\"]|<b>)(_)([^\s])"
EMPH_END_REGEX = r"([^\s])(_)(<\/b>|[\):;.,?\"\[\]\s]+|$)"
# Regex for Markdown-style URL mark-up: [title-text](path/to/url)
MD_URL_REGEX = r"\[([^]]*)\]\(([^\)]*)\)"
# Regex for org-mode URL mark-up: [[path/to/url][title-text]]
ORG_URL_REGEX = r"\[\[([^]]*)\]\[([^]]*)\]\]"
# Regex for Markdown-style image mark-up: 
MD_IMG_REGEX = r"!\[([^]]*)\]\(([^\)]*)\)"
# IMG_REGEX matches: [img.class-name:alt-text](path/to/image.png) where the
# class-name and alt-text elements are optional, but the regex will return
# empty groups if either of them is missing. Note that this regex is very
# clumsy, but it returns the class name in group 3, the alt-text in group 6,
# and the file path in group 7. If the class name or the alt-text are
# empty, the groups will still exist (as groups 3 and 6) but will be
# empty.
IMG_REGEX = r"\[img(()|\.([^\]:]*))(()|:([^\]]*))\]\(([^\)]*)\)"
# AUDIO_REGEX matches: [audio.class-name:alt-text](path/to/image.png) where the
# class-name and alt-text elements are optional, but the regex will return
# empty groups if either of them is missing (see the IMG_REGEX above for
# additional details about the groups).
AUDIO_REGEX = r"\[audio(()|\.([^\]:]*))(()|:([^\]]*))\]\(([^\)]*)\)"
# LINK_REGEX matches: [link.class-name:alt-text](path/to/link) where the
# class-name and alt-text elements are optional, but the regex will return
# empty groups if either of them is missing (see the IMG_REGEX above for
# additional details about the groups).
LINK_REGEX = r"\[link(()|\.([^\]:]*))(()|:([^\]]*))\]\(([^\)]*)\)"
# FOOTNOTE_REGEX for footnotes (links to footnote_nn)
FOOTNOTE_REGEX = r"\[\^([0-9]+)\]"
# FOOTNOTE_TARGET_REGEX for footnote targets (links to footnote_ref_nn)
FOOTNOTE_TARGET_REGEX = r"\[\^([0-9]+)\]:"
# CODEBLOCK_START_REGEX finds the beginning of a block of text that should
# be formatted as code. The marker can optionally include a language
# name after the 'code:' marker, but should otherwise be on a line on its
# own.
CODEBLOCK_START_REGEX = r"^[\s]*code:([^\s]*)[\s]*$"
# CODEBLOCK_END_REGEX finds the marker for the end of a block of text
# that should be formatted as code. The marker should be on a line on its
# own.
CODEBLOCK_END_REGEX = r"^[\s]*:code[\s]*$"
class KiwiMarkup:
"""
Main processing class. Call the execute() method to process a list of
text lines. On return, the KiwiMarkup.output variable will hold a list
of lines in HTML format. Note that this is an HTML fragment, and does
not include any framing <HTML> and <BODY> tags -- it is assumed that
the calling program will take the output and insert it into an appropriate
template.
"""
def __init__(self):
self.state = KiwiState()
self.boldStartPattern = re.compile(BOLD_START_REGEX)
self.boldEndPattern = re.compile(BOLD_END_REGEX)
self.emphStartPattern = re.compile(EMPH_START_REGEX)
self.emphEndPattern = re.compile(EMPH_END_REGEX)
self.mdUrlPattern = re.compile(MD_URL_REGEX)
self.orgmodeUrlPattern = re.compile(ORG_URL_REGEX)
self.mdImgPattern = re.compile(MD_IMG_REGEX)
self.imgPattern = re.compile(IMG_REGEX)
self.audioPattern = re.compile(AUDIO_REGEX)
self.linkPattern = re.compile(LINK_REGEX)
self.footnotePattern = re.compile(FOOTNOTE_REGEX)
self.footnoteTargetPattern = re.compile(FOOTNOTE_TARGET_REGEX)
def execute(self, lines, mode = None):
"""
Main entry point. The lines parameter should be a list of
the plain text lines which are to be converted to HTML,
and mode indicates the actual processing required -- the
default is KIWI_MODE_STD.
"""
assert (lines), "No lines provided for processing"
if (mode == None):
mode = KIWI_MODE_STD
if len(lines) > 0:
# Check the first line to see if this is an
# org-mode file, and if it is, override the
# mode.
if re.search("-*- mode: org -*-", lines[0]):
mode = KIWI_MODE_ORG
self.mode = mode
self.line = KiwiLineScanner(self.mode)
self.thisLine = None
self.nextLine = None
self.indents = []
self.output = []
# Process the lines
for line in lines:
# The processing often needs to know the contents of the next
# line, so we read one line ahead. Therefore thisLine is
# actually the line we read previously (and will be None on the
# very first cycle of this loop)
self.thisLine = self.nextLine
# Convert tabs to spaces
self.nextLine = re.sub("\t", " ", line.rstrip())
if not self.line.skipNextLine:
self.processLine()
else:
# Never skip more than one line
self.line.skipNextLine = False
# Process the final line
if not self.line.skipNextLine:
self.thisLine = self.nextLine
self.nextLine = ""
self.processLine()
self.endAllSections()
return len(self.output) > 0
def startParagraph(self):
"""
Starts a new <p> section, provided there is not one already open.
"""
if not self.state.inParagraph:
self.output.append('<p>')
self.state.inParagraph = True
def endParagraph(self):
"""
Ends a current <p> section. If no paragraph is open, does nothing.
"""
if self.state.inParagraph:
self.output.append('</p>')
self.state.inParagraph = False
def startBlock(self):
"""
Starts a new 'PRE' block, provided there is no block open. Does nothing
if the block is already open.
"""
if not self.state.inBlock:
self.output.append('<pre>')
self.state.inBlock = True
def endBlock(self):
"""
Ends any current 'PRE' block. If no block is open, does nothing.
"""
if self.state.inBlock:
self.output.append('</pre>')
self.state.inBlock = False
def listIndent(self, increment = 0):
"""
Returns a string of spaces to indent a list item, based on the current
number of sub-lists that are open. Note that this is purely to apply
'pretty' formatting to the HTML code, and has no other effect.
The 'increment' parameter allows items to be indented on step further,
so that LI tags can be indented more deeply than the UL tags
"""
return " " * (len(self.indents) - 1 + increment)
def startList(self):
"""
Starts a new unordered list ('<UL>'). If one is already open,
and the current line is more deeply indented than the list
is at present, starts a new sub-list. If the current line is
indented by less than the list, ends the current sub-list.
"""
nested = False
if len(self.indents) > 0:
# Get the last indentation level, and if the
# current line is indented by a greater amount
# we have to start a sub-list. If it is
# indented by a lesser amount, we have to
# end the current sub-list.
indent = self.indents[-1]
if self.line.listIndent > indent:
nested = True
elif self.line.listIndent < indent:
self.endNestedList()
if nested or not self.state.inList:
# Save the indentation level
self.indents.append(self.line.listIndent)
self.endParagraph()
# If a sub-list is being started, indent the tag
# by an extra amount
if nested:
self.output.append('%s<ul>' % self.listIndent(1))
else:
self.output.append('%s<ul>' % self.listIndent())
self.state.inList = True
def endNestedList(self):
"""
Ends a sublist if one is open. This should only be called directly
from endList() below.
"""
if len(self.indents) > 0:
indent = self.indents[-1]
if self.line.listIndent < indent:
# Close the list and the LI tag
self.output.append('%s</ul>' % self.listIndent(1))
self.output.append('%s</li>' % self.listIndent())
self.indents.pop()
# It's possible that the current line is actually
# ending more than one list, so recursively call
# call this method again to check.
self.endNestedList()
def endList(self):
"""
Ends the current list, if any. If the current list is a sublist,
this ends the sublist, and only closes the list completely if
there are no more sublists.
"""
if self.state.inList:
if len(self.indents) > 0:
self.endNestedList()
if len(self.indents) == 0:
self.output.append('%s</ul>' % self.listIndent())
self.state.inList = False
def endAllLists(self):
"""
Forces any sublists to be closed, and also closes the main
list, if any.
"""
while len(self.indents) > 0:
self.output.append('%s</ul>' % self.listIndent(1))
self.output.append('%s</li>' % self.listIndent())
self.indents.pop()
self.endList()
def startTable(self):
"""
Starts a new table ('<TABLE>'). If one is already open, does nothing.
"""
if not self.state.inTable:
self.output.append('<table>')
self.state.inTable = True
def endTable(self):
"""
Ends any open table. If no table is open, does nothing.
"""
if self.state.inTable:
self.output.append('</table>')
self.state.inTable = False
def startOrgSection(self):
"""
Starts a set of org-mode headers. A group of org-mode headers
which are not separated by blank lines will be gathered under
one '<p>' tag, separated by line-break ('<br>') tags.
"""
if not self.state.inOrgSection:
self.output.append('<p>')
self.state.inOrgSection = True
def startCodeSection(self):
"""
Starts a block of text that should be formatted as code
"""
if not self.state.inCodeSection:
self.output.append('<pre>')
self.output.append('<code>')
self.state.inCodeSection = True
def endCodeSection(self):
"""
Ends a block of code
"""
if self.state.inCodeSection:
self.output.append('</code>')
self.output.append('</pre>')
self.state.inCodeSection = False
def endAllSections(self):
"""
Closes any/all open tags
"""
self.endBlock()
self.endAllLists()
self.endTable()
self.endParagraph()
def addListLine(self):
"""
Adds a new list item, starting a new list if necessary.
"""
self.startList()
if self.line.isNestedList:
# For sub-lists the HTML spec requires that we leave the LI tag open
self.thisLine = "%s<li>%s" % (self.listIndent(1), self.line.listText)
else:
self.thisLine = "%s<li>%s</li>" % (self.listIndent(1), self.line.listText)
def imgAttributes(self, line):
"""
Extracts any attributes from a line containing img markup
"""
attributes = re.search(IMG_REGEX, line)
if (attributes):
return (attributes.group(1)[1:], attributes.group(3)[1:])
else:
return None
def re_sub(self, pattern, replacement, string):
"""
Work-around for re.sub unmatched group error.
Note that an alternative would be to use the regex package, but this
is not a default part of the Python library, and I want to avoid
external dependencies if at all possible.
See https://gist.github.com/gromgull/3922244
"""
def _r(m):
# Now this is ugly.
# Python has a "feature" where unmatched groups return None
# then re.sub chokes on this.
# see http://bugs.python.org/issue1519638
# this works around and hooks into the internal of the re module...
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
class _m():
def __init__(self, m):
self.m=m
self.string=m.string
def group(self, n):
return m.group(n) or ""
return re._expand(pattern, _m(m), replacement)
return re.sub(pattern, _r, string)
def applyInlineMarkup(self, line):
"""
Applies markup to the supplied line and returns the results. It
assumes the self.line holds the additional details for the line.
"""
line = self.boldStartPattern.sub(r"\1<b>\3", line)
line = self.boldEndPattern.sub(r"\1</b>\3", line)
line = self.emphStartPattern.sub(r"\1<i>\3", line)
line = self.emphEndPattern.sub(r"\1</i>\3", line)
line = self.mdImgPattern.sub(r"<img src='\2' alt='\1' title='\1'/>", line)
line = self.re_sub(self.imgPattern, r"<img src='\7' class='\3' alt='\6' title='\6'/>", line)
line = self.re_sub(self.audioPattern, r"<audio width='300px' height='32px' src='\7' class='\3' controls='controls'> Your browser does not support audio playback. </audio>", line)
line = self.re_sub(self.linkPattern, r"<a href='\7' class='\3' alt='\6'>\6</a>", line)
line = self.mdUrlPattern.sub(r"<a href='\2'>\1</a>", line)
line = self.orgmodeUrlPattern.sub(r"<a href='\1'>\2</a>", line)
line = self.footnoteTargetPattern.sub(r"\1. <a name='footnote_target_\1' href='#footnote_ref_\1'> ↩</a>", line)
line = self.footnotePattern.sub(r"<a name='footnote_ref_\1' href='#footnote_target_\1'>[<sup>\1</sup>]</a>", line)
return line
def processLine(self):
"""
Processes the current line, converting it into the appropriate
HTML.
"""
includeLine = True;
if (self.thisLine != None):
# Scan the line to get the details for it, then carry out the
# appropriate actions, based on the line type
self.line.scan(self.thisLine, self.nextLine, self.state)
if self.line.isCodeStart:
self.endAllSections()
self.startCodeSection()
includeLine = False
elif self.line.isCodeEnd:
self.endCodeSection()
includeLine = False
elif self.state.inCodeSection:
# If we are in a code section, we don't want to do any
# other processing of the line
pass
elif self.line.isList:
self.endBlock()
self.endTable()
self.endParagraph()
self.addListLine()
elif self.line.isBlock:
self.endAllLists()
self.endTable()
self.endParagraph()
self.startBlock()
elif self.line.isTable:
self.endBlock()
self.endAllLists()
self.endParagraph()
self.startTable()
self.output.append(" <tr>")
for column in self.line.tableColumns:
column = self.applyInlineMarkup(column)
if self.line.isTableHeader:
self.output.append(" <th>%s</th>" % column)
else:
self.output.append(" <td>%s</td>" % column)
self.output.append(" </tr>")
includeLine = False
elif self.line.isHeader:
self.endAllSections()
self.thisLine = "<h%d>%s</h%d>" %(self.line.headerLevel, self.line.headerText, self.line.headerLevel)
elif self.line.isHorizontalLine:
self.endAllSections()
self.thisLine = "<hr>"
elif self.line.isParagraph:
self.endBlock()
self.endAllLists()
self.endTable()
self.startParagraph()
elif self.line.isBlankLine:
self.endAllLists()
self.endTable()
self.endParagraph()
# Do not output blank lines
includeLine = False
if includeLine:
if not self.state.inBlock and not self.state.inCodeSection:
self.thisLine = self.applyInlineMarkup(self.thisLine)
else:
self.thisLine = self.thisLine[4:]
self.thisLine = cgi.escape(self.thisLine)
self.output.append(self.thisLine)
class KiwiState:
"""
Simple class to hold the current state of the processor
"""
inBold = False
inItalic = False
inParagraph = False
inTable = False
inList = False
inBlock = False
inCodeSection = False
class KiwiLineScanner:
"""
Simple class to scan the current line and store details about it.
"""
isParagraph = True
isHeader = False
isList = False
isTable = False
isTableHeader = False
isBlock = False
isBlank = False
isHorizontalLine = False
headerLevel = 0
headerText = ""
listIndent = 0
listText = ""
tableColumns = []
skipNextLine = False
def __init__(self, mode):
self.headerPattern = re.compile(HEADER_REGEX)
self.orgHeaderPattern = re.compile(ORG_HEADER_REGEX)
self.listPattern = re.compile(LIST_REGEX)
self.tableHeaderPattern = re.compile(TABLE_HEADER_REGEX)
self.codeStartPattern = re.compile(CODEBLOCK_START_REGEX)
self.codeEndPattern = re.compile(CODEBLOCK_END_REGEX)
self.mode = mode
def reset(self):
self.isParagraph = True
self.isHeader = False
self.isList = False
self.isTable = False
self.isTableHeader = False
self.isBlankLine = False
self.isBlock = False
self.isCodeStart = False
self.isCodeEnd = False
self.skipNextLine = False
self.headerLevel = 0
self.headerText = ""
self.listIndent = 0
self.listText = ""
self.isNestedList = False
self.codeLanguage = ""
self.tableColumns = []
def scan(self, thisLine, nextLine, state):
"""
Main entry point. This is passed the current and next lines
in the list, and the KiwiState instance that the main
processor is using.
"""
self.state = state
self.reset()
if thisLine.strip() == "":
self.isBlankLine = True
self.isParagraph = False
else:
if (self.mode == KIWI_MODE_ORG) and (thisLine.strip()[0] == "*"):
match = re.search(self.orgHeaderPattern, thisLine)
if match:
elements = match.groups()
header = elements[0]
level = len(header)
text = ""
if (len(elements) > 1):
text = elements[1].strip()
# Reconstruct the line as a list
thisLine = "%s* %s" % (" " * level, text)
self.check_for_header(thisLine, nextLine)
self.check_for_table(thisLine, nextLine)
self.check_for_block(thisLine)
self.check_for_list(thisLine, nextLine)
self.check_for_horizontal_line(thisLine)
self.check_for_code_start(thisLine)
self.check_for_code_end(thisLine)
def check_for_header(self, thisLine, nextLine):
# Check for '#' style of header
match = re.search(self.headerPattern, thisLine)
if match:
self.isParagraph = False
self.isHeader = True
elements = match.groups()
header = elements[0]
self.headerLevel = len(header)
if (len(elements) > 1):
self.headerText = elements[1]
# Check for 'underline' style of header
elif re.search(r"^={5,}=+$", nextLine):
self.isParagraph = False
self.isHeader = True
self.skipNextLine = True
self.headerLevel = 1
self.headerText = thisLine
elif re.search(r"^-{5,}-+$", nextLine):
self.isParagraph = False
self.isHeader = True
self.skipNextLine = True
self.headerLevel = 2
self.headerText = thisLine
def check_for_list(self, thisLine, nextLine):
match = re.search(self.listPattern, thisLine)
if match and not self.state.inBlock:
self.isParagraph = False
self.isList = True
# The regex returns the number of spaces that the
# line is indented by, in the first match group
self.listIndent = len(match.groups()[0])
# The second match group holds the remainder of the
# line following the asterisk
self.listText = match.groups()[1]
# Check the next line. If it is another list entry,
# but at a deeper indentation level, then we are
# about to start a nested list (we need to know
# this in advance, because HTML requires that we
# don't close the LI tag on the current line if
# it is followed by a sublist -- essentially the
# sub-list in inside LI tag).
match = re.search(self.listPattern, nextLine)
if match:
self.isNestedList = len(match.groups()[0]) > self.listIndent
def check_for_table(self, thisLine, nextLine):
"""
Checks whether the current line represents a table column. It uses
two different criteria.
First it checks the next line, to see if it is a table divider line.
This is the same as standard Markdown.
However, in the absence of a table divider it will also look for the
presence of at least two '|' characters in the line, which will also
be taken as indicating a table.
"""
match = re.search(self.tableHeaderPattern, nextLine)
if match:
self.isTable = True
self.isTableHeader = True
self.skipNextLine = True
self.tableColumns = [column.strip() for column in thisLine.split("|")]
if (len(self.tableColumns) >= 3) or (len(self.tableColumns) > 0 and self.state.inTable):
self.isTable = True
def check_for_block(self, thisLine):
"""
Checks for text which indented by at least 4 spaces, which will be
treated as a PRE block.
"""
if thisLine[0:4] == " " and not self.state.inTable:
self.isParagraph = False
self.isBlock = True
def check_for_horizontal_line(self, thisLine):
"""
If we come across a row of hyphens that is not a header indicator (i.e.
it is preceded by at least one blank line) it will be detected here and
treated as a horizontal line
"""
if re.search(r"^[-]{5}[-]+$", thisLine):
self.isParagraph = False
self.isHorizontalLine = True
def check_for_code_start(self, thisLine):
match = re.search(self.codeStartPattern, thisLine)
if match:
self.isCodeStart = True
self.codeLanguage = match.group(1)
def check_for_code_end(self, thisLine):
match = re.search(self.codeEndPattern, thisLine)
if match:
self.isCodeEnd = True
self.codeLanguage = ""
if __name__ == "__main__":
# For testing purposes only. Pass a file name on the command-line,
# and it will be converted to an HTML fragment, which will then be
# output.
if len(sys.argv) > 1:
f = open(sys.argv[1])
lines = f.readlines()
f.close()
kiwi = KiwiMarkup()
kiwi.execute(lines)
print("\n".join(kiwi.output))
| |
from __future__ import absolute_import, unicode_literals
import urlparse
from mopidy.internal import deprecation
from mopidy.mpd import exceptions, protocol, translator
@protocol.commands.add('add')
def add(context, uri):
"""
*musicpd.org, current playlist section:*
``add {URI}``
Adds the file ``URI`` to the playlist (directories add recursively).
``URI`` can also be a single file.
*Clarifications:*
- ``add ""`` should add all tracks in the library to the current playlist.
"""
if not uri.strip('/'):
return
# If we have an URI just try and add it directly without bothering with
# jumping through browse...
if urlparse.urlparse(uri).scheme != '':
if context.core.tracklist.add(uris=[uri]).get():
return
try:
uris = []
for path, ref in context.browse(uri, lookup=False):
if ref:
uris.append(ref.uri)
except exceptions.MpdNoExistError as e:
e.message = 'directory or file not found'
raise
if not uris:
raise exceptions.MpdNoExistError('directory or file not found')
context.core.tracklist.add(uris=uris).get()
@protocol.commands.add('addid', songpos=protocol.UINT)
def addid(context, uri, songpos=None):
"""
*musicpd.org, current playlist section:*
``addid {URI} [POSITION]``
Adds a song to the playlist (non-recursive) and returns the song id.
``URI`` is always a single file or URL. For example::
addid "foo.mp3"
Id: 999
OK
*Clarifications:*
- ``addid ""`` should return an error.
"""
if not uri:
raise exceptions.MpdNoExistError('No such song')
length = context.core.tracklist.get_length()
if songpos is not None and songpos > length.get():
raise exceptions.MpdArgError('Bad song index')
tl_tracks = context.core.tracklist.add(
uris=[uri], at_position=songpos).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
return ('Id', tl_tracks[0].tlid)
@protocol.commands.add('delete', songrange=protocol.RANGE)
def delete(context, songrange):
"""
*musicpd.org, current playlist section:*
``delete [{POS} | {START:END}]``
Deletes a song from the playlist.
"""
start = songrange.start
end = songrange.stop
if end is None:
end = context.core.tracklist.get_length().get()
tl_tracks = context.core.tracklist.slice(start, end).get()
if not tl_tracks:
raise exceptions.MpdArgError('Bad song index', command='delete')
for (tlid, _) in tl_tracks:
context.core.tracklist.remove({'tlid': [tlid]})
@protocol.commands.add('deleteid', tlid=protocol.UINT)
def deleteid(context, tlid):
"""
*musicpd.org, current playlist section:*
``deleteid {SONGID}``
Deletes the song ``SONGID`` from the playlist
"""
tl_tracks = context.core.tracklist.remove({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
@protocol.commands.add('clear')
def clear(context):
"""
*musicpd.org, current playlist section:*
``clear``
Clears the current playlist.
"""
context.core.tracklist.clear()
@protocol.commands.add('move', songrange=protocol.RANGE, to=protocol.UINT)
def move_range(context, songrange, to):
"""
*musicpd.org, current playlist section:*
``move [{FROM} | {START:END}] {TO}``
Moves the song at ``FROM`` or range of songs at ``START:END`` to
``TO`` in the playlist.
"""
start = songrange.start
end = songrange.stop
if end is None:
end = context.core.tracklist.get_length().get()
context.core.tracklist.move(start, end, to)
@protocol.commands.add('moveid', tlid=protocol.UINT, to=protocol.UINT)
def moveid(context, tlid, to):
"""
*musicpd.org, current playlist section:*
``moveid {FROM} {TO}``
Moves the song with ``FROM`` (songid) to ``TO`` (playlist index) in
the playlist. If ``TO`` is negative, it is relative to the current
song in the playlist (if there is one).
"""
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
position = context.core.tracklist.index(tl_tracks[0]).get()
context.core.tracklist.move(position, position + 1, to)
@protocol.commands.add('playlist')
def playlist(context):
"""
*musicpd.org, current playlist section:*
``playlist``
Displays the current playlist.
.. note::
Do not use this, instead use ``playlistinfo``.
"""
deprecation.warn('mpd.protocol.current_playlist.playlist')
return playlistinfo(context)
@protocol.commands.add('playlistfind')
def playlistfind(context, tag, needle):
"""
*musicpd.org, current playlist section:*
``playlistfind {TAG} {NEEDLE}``
Finds songs in the current playlist with strict matching.
*GMPC:*
- does not add quotes around the tag.
"""
if tag == 'filename':
tl_tracks = context.core.tracklist.filter({'uri': [needle]}).get()
if not tl_tracks:
return None
position = context.core.tracklist.index(tl_tracks[0]).get()
return translator.track_to_mpd_format(tl_tracks[0], position=position)
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('playlistid', tlid=protocol.UINT)
def playlistid(context, tlid=None):
"""
*musicpd.org, current playlist section:*
``playlistid {SONGID}``
Displays a list of songs in the playlist. ``SONGID`` is optional
and specifies a single song to display info for.
"""
if tlid is not None:
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
position = context.core.tracklist.index(tl_tracks[0]).get()
return translator.track_to_mpd_format(tl_tracks[0], position=position)
else:
return translator.tracks_to_mpd_format(
context.core.tracklist.get_tl_tracks().get())
@protocol.commands.add('playlistinfo')
def playlistinfo(context, parameter=None):
"""
*musicpd.org, current playlist section:*
``playlistinfo [[SONGPOS] | [START:END]]``
Displays a list of all songs in the playlist, or if the optional
argument is given, displays information only for the song
``SONGPOS`` or the range of songs ``START:END``.
*ncmpc and mpc:*
- uses negative indexes, like ``playlistinfo "-1"``, to request
the entire playlist
"""
if parameter is None or parameter == '-1':
start, end = 0, None
else:
tracklist_slice = protocol.RANGE(parameter)
start, end = tracklist_slice.start, tracklist_slice.stop
tl_tracks = context.core.tracklist.get_tl_tracks().get()
if start and start > len(tl_tracks):
raise exceptions.MpdArgError('Bad song index')
if end and end > len(tl_tracks):
end = None
return translator.tracks_to_mpd_format(tl_tracks, start, end)
@protocol.commands.add('playlistsearch')
def playlistsearch(context, tag, needle):
"""
*musicpd.org, current playlist section:*
``playlistsearch {TAG} {NEEDLE}``
Searches case-sensitively for partial matches in the current
playlist.
*GMPC:*
- does not add quotes around the tag
- uses ``filename`` and ``any`` as tags
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('plchanges', version=protocol.INT)
def plchanges(context, version):
"""
*musicpd.org, current playlist section:*
``plchanges {VERSION}``
Displays changed songs currently in the playlist since ``VERSION``.
To detect songs that were deleted at the end of the playlist, use
``playlistlength`` returned by status command.
*MPDroid:*
- Calls ``plchanges "-1"`` two times per second to get the entire playlist.
"""
# XXX Naive implementation that returns all tracks as changed
tracklist_version = context.core.tracklist.get_version().get()
if version < tracklist_version:
return translator.tracks_to_mpd_format(
context.core.tracklist.get_tl_tracks().get())
elif version == tracklist_version:
# A version match could indicate this is just a metadata update, so
# check for a stream ref and let the client know about the change.
stream_title = context.core.playback.get_stream_title().get()
if stream_title is None:
return None
tl_track = context.core.playback.get_current_tl_track().get()
position = context.core.tracklist.index(tl_track).get()
return translator.track_to_mpd_format(
tl_track, position=position, stream_title=stream_title)
@protocol.commands.add('plchangesposid', version=protocol.INT)
def plchangesposid(context, version):
"""
*musicpd.org, current playlist section:*
``plchangesposid {VERSION}``
Displays changed songs currently in the playlist since ``VERSION``.
This function only returns the position and the id of the changed
song, not the complete metadata. This is more bandwidth efficient.
To detect songs that were deleted at the end of the playlist, use
``playlistlength`` returned by status command.
"""
# XXX Naive implementation that returns all tracks as changed
if int(version) != context.core.tracklist.get_version().get():
result = []
for (position, (tlid, _)) in enumerate(
context.core.tracklist.get_tl_tracks().get()):
result.append(('cpos', position))
result.append(('Id', tlid))
return result
@protocol.commands.add('shuffle', songrange=protocol.RANGE)
def shuffle(context, songrange=None):
"""
*musicpd.org, current playlist section:*
``shuffle [START:END]``
Shuffles the current playlist. ``START:END`` is optional and
specifies a range of songs.
"""
if songrange is None:
start, end = None, None
else:
start, end = songrange.start, songrange.stop
context.core.tracklist.shuffle(start, end)
@protocol.commands.add('swap', songpos1=protocol.UINT, songpos2=protocol.UINT)
def swap(context, songpos1, songpos2):
"""
*musicpd.org, current playlist section:*
``swap {SONG1} {SONG2}``
Swaps the positions of ``SONG1`` and ``SONG2``.
"""
tracks = context.core.tracklist.get_tracks().get()
song1 = tracks[songpos1]
song2 = tracks[songpos2]
del tracks[songpos1]
tracks.insert(songpos1, song2)
del tracks[songpos2]
tracks.insert(songpos2, song1)
# TODO: do we need a tracklist.replace()
context.core.tracklist.clear()
with deprecation.ignore('core.tracklist.add:tracks_arg'):
context.core.tracklist.add(tracks=tracks).get()
@protocol.commands.add('swapid', tlid1=protocol.UINT, tlid2=protocol.UINT)
def swapid(context, tlid1, tlid2):
"""
*musicpd.org, current playlist section:*
``swapid {SONG1} {SONG2}``
Swaps the positions of ``SONG1`` and ``SONG2`` (both song ids).
"""
tl_tracks1 = context.core.tracklist.filter({'tlid': [tlid1]}).get()
tl_tracks2 = context.core.tracklist.filter({'tlid': [tlid2]}).get()
if not tl_tracks1 or not tl_tracks2:
raise exceptions.MpdNoExistError('No such song')
position1 = context.core.tracklist.index(tl_tracks1[0]).get()
position2 = context.core.tracklist.index(tl_tracks2[0]).get()
swap(context, position1, position2)
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add(
# 'prio', priority=protocol.UINT, position=protocol.RANGE)
def prio(context, priority, position):
"""
*musicpd.org, current playlist section:*
``prio {PRIORITY} {START:END...}``
Set the priority of the specified songs. A higher priority means that
it will be played first when "random" mode is enabled.
A priority is an integer between 0 and 255. The default priority of new
songs is 0.
"""
pass
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('prioid')
def prioid(context, *args):
"""
*musicpd.org, current playlist section:*
``prioid {PRIORITY} {ID...}``
Same as prio, but address the songs with their id.
"""
pass
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('addtagid', tlid=protocol.UINT)
def addtagid(context, tlid, tag, value):
"""
*musicpd.org, current playlist section:*
``addtagid {SONGID} {TAG} {VALUE}``
Adds a tag to the specified song. Editing song tags is only possible
for remote songs. This change is volatile: it may be overwritten by
tags received from the server, and the data is gone when the song gets
removed from the queue.
"""
pass
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('cleartagid', tlid=protocol.UINT)
def cleartagid(context, tlid, tag):
"""
*musicpd.org, current playlist section:*
``cleartagid {SONGID} [TAG]``
Removes tags from the specified song. If TAG is not specified, then all
tag values will be removed. Editing song tags is only possible for
remote songs.
"""
pass
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._user_operations import build_create_or_update_request, build_delete_request, build_generate_sso_url_request, build_get_entity_tag_request, build_get_request, build_get_shared_access_token_request, build_list_by_service_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UserOperations:
"""UserOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_service(
self,
resource_group_name: str,
service_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
expand_groups: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.UserCollection"]:
"""Lists a collection of registered users in the specified service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| firstName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
lastName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| email | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| state | filter | eq | |</br>| registrationDate | filter | ge, le, eq, ne, gt, lt |
|</br>| note | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>| groups | expand | | |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param expand_groups: Detailed Group in response.
:type expand_groups: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.UserCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_service_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
expand_groups=expand_groups,
template_url=self.list_by_service.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_service_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
expand_groups=expand_groups,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("UserCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users'} # type: ignore
@distributed_trace_async
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
user_id: str,
**kwargs: Any
) -> bool:
"""Gets the entity state (Etag) version of the user specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_entity_tag_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.get_entity_tag.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
user_id: str,
**kwargs: Any
) -> "_models.UserContract":
"""Gets the details of the user specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserContract, or the result of cls(response)
:rtype: ~api_management_client.models.UserContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('UserContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
user_id: str,
parameters: "_models.UserCreateParameters",
notify: Optional[bool] = None,
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.UserContract":
"""Creates or Updates a user.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:param parameters: Create or update parameters.
:type parameters: ~api_management_client.models.UserCreateParameters
:param notify: Send an Email notification to the User.
:type notify: bool
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserContract, or the result of cls(response)
:rtype: ~api_management_client.models.UserContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'UserCreateParameters')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
notify=notify,
if_match=if_match,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('UserContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('UserContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
service_name: str,
user_id: str,
if_match: str,
parameters: "_models.UserUpdateParameters",
**kwargs: Any
) -> "_models.UserContract":
"""Updates the details of the user specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~api_management_client.models.UserUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserContract, or the result of cls(response)
:rtype: ~api_management_client.models.UserContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'UserUpdateParameters')
request = build_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
content_type=content_type,
if_match=if_match,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('UserContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
service_name: str,
user_id: str,
if_match: str,
delete_subscriptions: Optional[bool] = None,
notify: Optional[bool] = None,
app_type: Optional[Union[str, "_models.AppType"]] = None,
**kwargs: Any
) -> None:
"""Deletes specific user.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param delete_subscriptions: Whether to delete user's subscription or not.
:type delete_subscriptions: bool
:param notify: Send an Account Closed Email notification to the User.
:type notify: bool
:param app_type: Determines the type of application which send the create user request. Default
is legacy publisher portal.
:type app_type: str or ~api_management_client.models.AppType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
if_match=if_match,
delete_subscriptions=delete_subscriptions,
notify=notify,
app_type=app_type,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}'} # type: ignore
@distributed_trace_async
async def generate_sso_url(
self,
resource_group_name: str,
service_name: str,
user_id: str,
**kwargs: Any
) -> "_models.GenerateSsoUrlResult":
"""Retrieves a redirection URL containing an authentication token for signing a given user into
the developer portal.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenerateSsoUrlResult, or the result of cls(response)
:rtype: ~api_management_client.models.GenerateSsoUrlResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenerateSsoUrlResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generate_sso_url_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
template_url=self.generate_sso_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenerateSsoUrlResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_sso_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}/generateSsoUrl'} # type: ignore
@distributed_trace_async
async def get_shared_access_token(
self,
resource_group_name: str,
service_name: str,
user_id: str,
parameters: "_models.UserTokenParameters",
**kwargs: Any
) -> "_models.UserTokenResult":
"""Gets the Shared Access Authorization Token for the User.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param user_id: User identifier. Must be unique in the current API Management service instance.
:type user_id: str
:param parameters: Create Authorization Token parameters.
:type parameters: ~api_management_client.models.UserTokenParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserTokenResult, or the result of cls(response)
:rtype: ~api_management_client.models.UserTokenResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserTokenResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'UserTokenParameters')
request = build_get_shared_access_token_request(
resource_group_name=resource_group_name,
service_name=service_name,
user_id=user_id,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.get_shared_access_token.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('UserTokenResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_access_token.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/users/{userId}/token'} # type: ignore
| |
# -*- coding: utf-8 -*-
#
# nova documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'ext.nova_todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nova.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'vmwareapi_readme',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['nova.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/nova-all', 'nova-all', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-os-compute', 'nova-api-os-compute',
u'Cloud controller fabric', [u'OpenStack'], 1),
('man/nova-api', 'nova-api', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-console', 'nova-console', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-network', 'nova-network', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rpc-zmq-receiver', 'nova-rpc-zmq-receiver', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
[u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ["."]
html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'novadoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Nova.tex', u'Nova Documentation',
u'Anso Labs, LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'swift': ('http://swift.openstack.org', None)}
| |
import email.utils
import email.encoders
import logging
import mimetypes
import imghdr
from contextlib import closing
from cStringIO import StringIO
from os import path
from email.mime import audio
from flanker.utils import is_pure_ascii
from flanker.mime import bounce
from flanker.mime.message import headers, charsets
from flanker.mime.message.headers import (WithParams, ContentType, MessageId,
Subject)
from flanker.mime.message.headers.parametrized import fix_content_type
from flanker.mime.message.errors import EncodingError, DecodingError
log = logging.getLogger(__name__)
CTE = WithParams('7bit', {})
class Stream(object):
def __init__(self, content_type, start, end, string, stream):
self.content_type = content_type
self.start = start
self.end = end
self.string = string
self.stream = stream
self._headers = None
self._body_start = None
self._body = None
self._body_changed = False
self.size = len(self.string)
@property
def headers(self):
self._load_headers()
return self._headers
@property
def body(self):
self._load_body()
return self._body
@body.setter
def body(self, value):
self._set_body(value)
def read_message(self):
self.stream.seek(self.start)
return self.stream.read(self.end - self.start + 1)
def read_body(self):
self._load_headers()
self.stream.seek(self._body_start)
return self.stream.read(self.end - self._body_start + 1)
def _load_headers(self):
if self._headers is None:
self.stream.seek(self.start)
self._headers = headers.MimeHeaders.from_stream(self.stream)
self._body_start = self.stream.tell()
def _load_body(self):
if self._body is None:
self._load_headers()
self.stream.seek(self._body_start)
self._body = decode_body(
self.content_type,
self.headers.get('Content-Transfer-Encoding', CTE).value,
self.stream.read(self.end - self._body_start + 1))
def _set_body(self, value):
self._body = value
self._body_changed = True
def headers_changed(self, ignore_prepends=False):
return self._headers is not None and self._headers.have_changed(ignore_prepends)
def body_changed(self):
return self._body_changed
def adjust_content_type(content_type, body=None, filename=None):
"""Adjust content type based on filename or body contents
"""
if filename and str(content_type) == 'application/octet-stream':
# check if our internal guess returns anything
guessed = _guess_type(filename)
if guessed:
return guessed
# our internal attempt didn't return anything, use mimetypes
guessed = mimetypes.guess_type(filename)[0]
if guessed:
main, sub = fix_content_type(
guessed, default=('application', 'octet-stream'))
content_type = ContentType(main, sub)
if content_type.main == 'image' and body:
sub = imghdr.what(None, body)
if sub:
content_type = ContentType('image', sub)
elif content_type.main == 'audio' and body:
sub = audio._whatsnd(body)
if sub:
content_type = ContentType('audio', sub)
return content_type
def _guess_type(filename):
"""
Internal content type guesser. This is used to hard code certain tricky content-types
that heuristic content type checker get wrong.
"""
if filename.endswith(".bz2"):
return ContentType("application", "x-bzip2")
if filename.endswith(".gz"):
return ContentType("application", "x-gzip")
return None
class Body(object):
def __init__(
self, content_type, body, charset=None, disposition=None, filename=None):
self.headers = headers.MimeHeaders()
self.body = body
self.disposition = disposition or ('attachment' if filename else None)
self.filename = filename
self.size = len(body)
if self.filename:
self.filename = path.basename(self.filename)
content_type = adjust_content_type(content_type, body, filename)
if content_type.main == 'text':
# the text should have a charset
if not charset:
charset = "utf-8"
# it should be stored as unicode. period
self.body = charsets.convert_to_unicode(charset, body)
# let's be simple when possible
if charset != 'ascii' and is_pure_ascii(body):
charset = 'ascii'
self.headers['MIME-Version'] = '1.0'
self.headers['Content-Type'] = content_type
if charset:
content_type.params['charset'] = charset
if self.disposition:
self.headers['Content-Disposition'] = WithParams(disposition)
if self.filename:
self.headers['Content-Disposition'].params['filename'] = self.filename
self.headers['Content-Type'].params['name'] = self.filename
@property
def content_type(self):
return self.headers['Content-Type']
def headers_changed(self, ignore_prepends=False):
return True
def body_changed(self):
return True
class Part(object):
def __init__(self, ctype):
self.headers = headers.MimeHeaders()
self.body = None
self.headers['Content-Type'] = ctype
self.headers['MIME-Version'] = '1.0'
self.size = 0
@property
def content_type(self):
return self.headers['Content-Type']
def headers_changed(self, ignore_prepends=False):
return True
def body_changed(self):
return True
class RichPartMixin(object):
def __init__(self, is_root=False):
self._is_root = is_root
self._bounce = None
@property
def message_id(self):
return MessageId.from_string(self.headers.get('Message-Id', ''))
@message_id.setter
def message_id(self, value):
if not MessageId.is_valid(value):
raise ValueError("invalid message id format")
self.headers['Message-Id'] = "<{0}>".format(value)
@property
def subject(self):
return self.headers.get('Subject', '')
@property
def clean_subject(self):
"""
Subject without re, fw, fwd, HA prefixes
"""
return Subject(self.subject).strip_replies()
@property
def references(self):
"""
Returns a list of message ids referencing the message in accordance to
the Jamie Zawinski threading algorithm.
See http://www.jwz.org/doc/threading.html for details.
"""
refs = list(MessageId.scan(self.headers.get('References', '')))
if not refs:
reply = MessageId.from_string(self.headers.get('In-Reply-To', ''))
if reply:
refs.append(reply[0])
return refs
@property
def detected_file_name(self):
"""
Detects file name based on content type or part name.
"""
ctype = self.content_type
file_name = ctype.params.get('name', '') or ctype.params.get('filename', '')
value, params = self.content_disposition
if value == 'attachment':
file_name = params.get('filename', '') or file_name
# filenames can be presented as tuples, like:
# ('us-ascii', 'en-us', 'image.jpg')
if isinstance(file_name, tuple) and len(file_name) == 3:
# encoding permissible to be empty
encoding = file_name[0]
if encoding:
file_name = file_name[2].decode(encoding)
else:
file_name = file_name[2]
file_name = headers.mime_to_unicode(file_name)
return file_name
@property
def detected_format(self):
return self.detected_content_type.format_type
@property
def detected_subtype(self):
return self.detected_content_type.subtype
@property
def detected_content_type(self):
"""
Returns content type based on the body content, the file name and the
original content type provided inside the message.
"""
return adjust_content_type(self.content_type,
filename=self.detected_file_name)
def is_body(self):
return (not self.detected_file_name and
(self.content_type.format_type == 'text' or
self.content_type.format_type == 'message'))
def is_root(self):
return self._is_root
def set_root(self, val):
self._is_root = bool(val)
def walk(self, with_self=False, skip_enclosed=False):
"""
Returns iterator object traversing through the message parts. If the
top level part needs to be included then set the `with_self` to `True`.
If the parts of the enclosed messages should not be included then set
the `skip_enclosed` parameter to `True`.
"""
if with_self:
yield self
if self.content_type.is_multipart():
for p in self.parts:
yield p
for x in p.walk(with_self=False, skip_enclosed=skip_enclosed):
yield x
elif self.content_type.is_message_container() and not skip_enclosed:
yield self.enclosed
for p in self.enclosed.walk(with_self=False):
yield p
def is_attachment(self):
return self.content_disposition[0] == 'attachment'
def is_inline(self):
return self.content_disposition[0] == 'inline'
def is_delivery_notification(self):
"""
Tells whether a message is a system delivery notification.
"""
content_type = self.content_type
return (content_type == 'multipart/report'
and content_type.params.get('report-type') == 'delivery-status')
def get_attached_message(self):
"""
Returns attached message if found, `None` otherwise.
"""
try:
for part in self.walk(with_self=True):
if part.content_type == 'message/rfc822':
for p in part.walk():
return p
except Exception:
log.exception("Failed to get attached message")
return None
def remove_headers(self, *header_names):
"""
Removes all passed headers name in one operation.
"""
for header_name in header_names:
if header_name in self.headers:
del self.headers[header_name]
@property
def bounce(self):
"""
If the message is NOT bounce, then `None` is returned. Otherwise
it returns a bounce object that provides the values:
* score - a value between 0 and 1, where 0 means that the message is
definitely not a bounce, and 1 means that is definitely a
bounce;
* status - delivery status;
* notification - human readable description;
* diagnostic_code - smtp diagnostic codes;
Can raise MimeError in case if MIME is screwed.
"""
if not self._bounce:
self._bounce = bounce.detect(self)
return self._bounce
def is_bounce(self, probability=0.3):
"""
Determines whether the message is a bounce message based on
given probability. 0.3 is a good conservative base.
"""
return self.bounce.score > probability
def __str__(self):
return "({0})".format(self.content_type)
class MimePart(RichPartMixin):
def __init__(self, container, parts=None, enclosed=None, is_root=False):
RichPartMixin.__init__(self, is_root)
self._container = container
self.parts = parts or []
self.enclosed = enclosed
@property
def size(self):
""" Returns message size in bytes"""
if self.is_root() and not self.was_changed():
if isinstance(self._container, Stream):
return self._container.size
else:
return sum(part._container.size
for part in self.walk(with_self=True))
else:
with closing(_CounterIO()) as out:
self.to_stream(out)
return out.getvalue()
@property
def headers(self):
"""Returns multi dictionary with headers converted to unicode,
headers like Content-Type, Content-Disposition are tuples
("value", {"param": "val"})"""
return self._container.headers
@property
def content_type(self):
""" returns object with properties:
main - main part of content type
sub - subpart of content type
params - dictionary with parameters
"""
return self._container.content_type
@property
def content_disposition(self):
""" returns tuple (value, params) """
return self.headers.get('Content-Disposition', WithParams(None))
@property
def content_encoding(self):
return self.headers.get(
'Content-Transfer-Encoding', WithParams('7bit'))
@content_encoding.setter
def content_encoding(self, value):
self.headers['Content-Transfer-Encoding'] = value
@property
def body(self):
""" returns decoded body """
if self.content_type.is_singlepart()\
or self.content_type.is_delivery_status():
return self._container.body
@body.setter
def body(self, value):
if self.content_type.is_singlepart()\
or self.content_type.is_delivery_status():
self._container.body = value
@property
def charset(self):
return self.content_type.get_charset()
@charset.setter
def charset(self, value):
charset = value.lower()
self.content_type.set_charset(value)
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = ContentType('text', 'plain', {})
self.headers['Content-Type'].params['charset'] = charset
self.headers.changed = True
def to_string(self):
"""
Returns a MIME representation of the message.
"""
# this optimisation matters *A LOT*
# when there are no prepended headers
# we submit the original string,
# no copying, no alternation, yeah!
if self.is_root() and not self.was_changed(ignore_prepends=True):
with closing(StringIO()) as out:
self.headers.to_stream(out, prepends_only=True)
return out.getvalue() + self._container.string
else:
with closing(StringIO()) as out:
self.to_stream(out)
return out.getvalue()
def to_stream(self, out):
"""
Serializes the message using a file like object.
"""
if not self.was_changed():
out.write(self._container.read_message())
else:
try:
original_position = out.tell()
self._to_stream_when_changed(out)
except DecodingError:
out.seek(original_position)
out.write(self._container.read_message())
def was_changed(self, ignore_prepends=False):
if self._container.headers_changed(ignore_prepends):
return True
if self.content_type.is_singlepart():
if self._container.body_changed():
return True
return False
elif self.content_type.is_multipart():
return any(p.was_changed() for p in self.parts)
elif self.content_type.is_message_container():
return self.enclosed.was_changed()
def to_python_message(self):
return email.message_from_string(self.to_string())
def append(self, *messages):
for m in messages:
self.parts.append(m)
m.set_root(False)
def enclose(self, message):
self.enclosed = message
message.set_root(False)
def _to_stream_when_changed(self, out):
ctype = self.content_type
if ctype.is_singlepart():
if self._container.body_changed():
charset, encoding, body = encode_body(self)
if charset:
self.charset = charset
self.content_encoding = WithParams(encoding)
else:
body = self._container.read_body()
# RFC allows subparts without headers
if self.headers:
self.headers.to_stream(out)
elif self.is_root():
raise EncodingError("Root message should have headers")
out.write(CRLF)
out.write(body)
else:
self.headers.to_stream(out)
out.write(CRLF)
if ctype.is_multipart():
boundary = ctype.get_boundary_line()
for index, part in enumerate(self.parts):
out.write(
(CRLF if index != 0 else "") + boundary + CRLF)
part.to_stream(out)
out.write(CRLF + ctype.get_boundary_line(final=True) + CRLF)
elif ctype.is_message_container():
self.enclosed.to_stream(out)
def decode_body(content_type, content_encoding, body):
# decode the transfer encoding
try:
body = decode_transfer_encoding(content_encoding, body)
except Exception:
raise DecodingError("Failed to decode body")
# decode the charset next
return decode_charset(content_type, body)
def decode_transfer_encoding(encoding, body):
if encoding == 'base64':
return email.utils._bdecode(body)
elif encoding == 'quoted-printable':
return email.utils._qdecode(body)
else:
return body
def decode_charset(ctype, body):
if ctype.main != 'text':
return body
charset = ctype.get_charset()
body = charsets.convert_to_unicode(charset, body)
# for text/html unicode bodies make sure to replace
# the whitespace (0xA0) with Outlook is reported to
# have a bug there
if ctype.sub =='html' and charset == 'utf-8':
# Outlook bug
body = body.replace(u'\xa0', u' ')
return body
def encode_body(part):
content_type = part.content_type
content_encoding = part.content_encoding.value
body = part._container.body
charset = content_type.get_charset()
if content_type.main == 'text':
charset, body = encode_charset(charset, body)
content_encoding = choose_text_encoding(
charset, content_encoding, body)
else:
content_encoding = 'base64'
body = encode_transfer_encoding(content_encoding, body)
return charset, content_encoding, body
def encode_charset(preferred_charset, text):
try:
charset = preferred_charset or 'ascii'
text = text.encode(preferred_charset)
except:
charset = 'utf-8'
text = text.encode(charset)
return charset, text
def encode_transfer_encoding(encoding, body):
if encoding == 'quoted-printable':
return email.encoders._qencode(body)
elif encoding == 'base64':
return email.encoders._bencode(body)
else:
return body
def choose_text_encoding(charset, preferred_encoding, body):
if charset in ('ascii', 'iso-8859-1', 'us-ascii'):
if has_long_lines(body):
return stronger_encoding(preferred_encoding, 'quoted-printable')
else:
return preferred_encoding
else:
return stronger_encoding(preferred_encoding, 'base64')
def stronger_encoding(a, b):
weights = {'7bit': 0, 'quoted-printable': 1, 'base64': 1, '8bit': 3}
if weights.get(a, -1) >= weights[b]:
return a
return b
def has_long_lines(text, max_line_len=599):
'''
Returns True if text contains lines longer than a certain length.
Some SMTP servers (Exchange) refuse to accept messages "wider" than
certain length.
'''
if not text:
return False
for line in text.splitlines():
if len(line) >= max_line_len:
return True
return False
CRLF = "\r\n"
class _CounterIO(object):
def __init__(self):
self.length = 0
def tell(self):
return self.length
def write(self, s):
self.length += len(s)
def seek(self, p):
self.length = p
def getvalue(self):
return self.length
def close(self):
pass
| |
# Copyright 2014 Red Hat, Inc.
#
# Author: Rich Megginson <rmeggins@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
import time
import requests
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
from designate import exceptions
from designate.backend import base
from designate.i18n import _LE
LOG = logging.getLogger(__name__)
IPA_DEFAULT_PORT = 443
class IPABaseError(exceptions.Backend):
error_code = 500
error_type = 'unknown_ipa_error'
class IPAAuthError(IPABaseError):
error_type = 'authentication_error'
# map of designate domain parameters to the corresponding
# ipa parameter
# NOTE: ipa manages serial, and does not honor
# increment_serial=False - this means the designate serial
# and the ipa serial will diverge if updates are made
# using increment_serial=False
domain2ipa = {'ttl': 'dnsttl', 'email': 'idnssoarname',
'serial': 'idnssoaserial', 'expire': 'idnssoaexpire',
'minimum': 'idnssoaminimum', 'refresh': 'idnssoarefresh',
'retry': 'idnssoaretry'}
# map of designate record types to ipa
rectype2iparectype = {'A': ('arecord', '%(data)s'),
'AAAA': ('aaaarecord', '%(data)s'),
'MX': ('mxrecord', '%(data)s'),
'CNAME': ('cnamerecord', '%(data)s'),
'TXT': ('txtrecord', '%(data)s'),
'SRV': ('srvrecord', '%(data)s'),
'NS': ('nsrecord', '%(data)s'),
'PTR': ('ptrrecord', '%(data)s'),
'SPF': ('spfrecord', '%(data)s'),
'SSHFP': ('sshfprecord', '%(data)s')}
IPA_INVALID_DATA = 3009
IPA_NOT_FOUND = 4001
IPA_DUPLICATE = 4002
IPA_NO_CHANGES = 4202
class IPAUnknownError(IPABaseError):
pass
class IPACommunicationFailure(IPABaseError):
error_type = 'communication_failure'
pass
class IPAInvalidData(IPABaseError):
error_type = 'invalid_data'
pass
class IPADomainNotFound(IPABaseError):
error_type = 'domain_not_found'
pass
class IPARecordNotFound(IPABaseError):
error_type = 'record_not_found'
pass
class IPADuplicateDomain(IPABaseError):
error_type = 'duplicate_domain'
pass
class IPADuplicateRecord(IPABaseError):
error_type = 'duplicate_record'
pass
ipaerror2exception = {
IPA_INVALID_DATA: {
'dnszone': IPAInvalidData,
'dnsrecord': IPAInvalidData
},
IPA_NOT_FOUND: {
'dnszone': IPADomainNotFound,
'dnsrecord': IPARecordNotFound
},
IPA_DUPLICATE: {
'dnszone': IPADuplicateDomain,
'dnsrecord': IPADuplicateRecord
},
# NOTE: Designate will send updates with all fields
# even if they have not changed value. If none of
# the given values has changed, IPA will return
# this error code - this can be ignored
IPA_NO_CHANGES: {
'dnszone': None,
'dnsrecord': None
}
}
def abs2rel_name(domain, rsetname):
"""convert rsetname from absolute form foo.bar.tld. to the name
relative to the domain. For IPA, if domain is rsetname, then use
"@" as the relative name. If rsetname does not end with a subset
of the domain, the just return the raw rsetname
"""
if rsetname.endswith(domain):
idx = rsetname.rfind(domain)
if idx == 0:
rsetname = "@"
elif idx > 0:
rsetname = rsetname[:idx].rstrip(".")
return rsetname
class IPABackend(base.Backend):
__plugin_name__ = 'ipa'
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(
name='backend:ipa', title="Configuration for IPA Backend"
)
opts = [
cfg.StrOpt('ipa-host', default='localhost.localdomain',
help='IPA RPC listener host - must be FQDN'),
cfg.IntOpt('ipa-port', default=IPA_DEFAULT_PORT,
help='IPA RPC listener port'),
cfg.StrOpt('ipa-client-keytab',
help='Kerberos client keytab file'),
cfg.StrOpt('ipa-auth-driver-class',
default='designate.backend.impl_ipa.auth.IPAAuth',
help='Class that implements the authentication '
'driver for IPA'),
cfg.StrOpt('ipa-ca-cert',
help='CA certificate for use with https to IPA'),
cfg.StrOpt('ipa-base-url', default='/ipa',
help='Base URL for IPA RPC, relative to host[:port]'),
cfg.StrOpt('ipa-json-url',
default='/json',
help='URL for IPA JSON RPC, relative to IPA base URL'),
cfg.IntOpt('ipa-connect-retries', default=1,
help='How many times Designate will attempt to retry '
'the connection to IPA before giving up'),
cfg.BoolOpt('ipa-force-ns-use', default=False,
help='IPA requires that a specified '
'name server or SOA MNAME is resolvable - if this '
'option is set, Designate will force IPA to use a '
'given name server even if it is not resolvable'),
cfg.StrOpt('ipa-version', default='2.65',
help='IPA RPC JSON version')
]
return [(group, opts)]
def start(self):
LOG.debug('IPABackend start')
self.request = requests.Session()
authclassname = cfg.CONF[self.name].ipa_auth_driver_class
authclass = importutils.import_class(authclassname)
self.request.auth = \
authclass(cfg.CONF[self.name].ipa_client_keytab,
cfg.CONF[self.name].ipa_host)
ipa_base_url = cfg.CONF[self.name].ipa_base_url
if ipa_base_url.startswith("http"): # full URL
self.baseurl = ipa_base_url
else: # assume relative to https://host[:port]
self.baseurl = "https://" + cfg.CONF[self.name].ipa_host
ipa_port = cfg.CONF[self.name].ipa_port
if ipa_port != IPA_DEFAULT_PORT:
self.baseurl += ":" + str(ipa_port)
self.baseurl += ipa_base_url
ipa_json_url = cfg.CONF[self.name].ipa_json_url
if ipa_json_url.startswith("http"): # full URL
self.jsonurl = ipa_json_url
else: # assume relative to https://host[:port]
self.jsonurl = self.baseurl + ipa_json_url
xtra_hdrs = {'Content-Type': 'application/json',
'Referer': self.baseurl}
self.request.headers.update(xtra_hdrs)
self.request.verify = cfg.CONF[self.name].ipa_ca_cert
self.ntries = cfg.CONF[self.name].ipa_connect_retries
self.force = cfg.CONF[self.name].ipa_force_ns_use
def create_zone(self, context, zone):
LOG.debug('Create Zone %r' % zone)
ipareq = {'method': 'dnszone_add', 'id': 0}
params = [zone['name']]
servers = self.central_service.get_zone_ns_records(self.admin_context)
# just use the first one for zone creation - add the others
# later, below - use force because designate assumes the NS
# already exists somewhere, is resolvable, and already has
# an A/AAAA record
args = {'idnssoamname': servers[0]['name']}
if self.force:
args['force'] = True
for dkey, ipakey in list(domain2ipa.items()):
if dkey in zone:
args[ipakey] = zone[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
# add NS records for all of the other servers
if len(servers) > 1:
ipareq = {'method': 'dnsrecord_add', 'id': 0}
params = [zone['name'], "@"]
args = {'nsrecord': servers[1:]}
if self.force:
args['force'] = True
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def update_zone(self, context, zone):
LOG.debug('Update Zone %r' % zone)
ipareq = {'method': 'dnszone_mod', 'id': 0}
params = [zone['name']]
args = {}
for dkey, ipakey in list(domain2ipa.items()):
if dkey in zone:
args[ipakey] = zone[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_zone(self, context, zone):
LOG.debug('Delete Zone %r' % zone)
ipareq = {'method': 'dnszone_del', 'id': 0}
params = [zone['name']]
args = {}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def create_recordset(self, context, domain, recordset):
LOG.debug('Discarding create_recordset call, not-applicable')
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet %r / %r' % (domain, recordset))
# designate allows to update a recordset if there are no
# records in it - we should ignore this case
if not self._recset_has_records(context, recordset):
LOG.debug('No records in %r / %r - skipping' % (domain, recordset))
return
# The only thing IPA allows is to change the ttl, since that is
# stored "per recordset"
if 'ttl' not in recordset:
return
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
dname = domain['name']
rsetname = abs2rel_name(dname, recordset['name'])
params = [domain['name'], rsetname]
args = {'dnsttl': recordset['ttl']}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_recordset(self, context, domain, recordset):
LOG.debug('Delete RecordSet %r / %r' % (domain, recordset))
# designate allows to delete a recordset if there are no
# records in it - we should ignore this case
if not self._recset_has_records(context, recordset):
LOG.debug('No records in %r / %r - skipping' % (domain, recordset))
return
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
dname = domain['name']
rsetname = abs2rel_name(dname, recordset['name'])
params = [domain['name'], rsetname]
rsettype = rectype2iparectype[recordset['type']][0]
args = {rsettype: None}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def create_record(self, context, domain, recordset, record):
LOG.debug('Create Record %r / %r / %r' % (domain, recordset, record))
ipareq = {'method': 'dnsrecord_add', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, [record])
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def update_record(self, context, domain, recordset, record):
LOG.debug('Update Record %r / %r / %r' % (domain, recordset, record))
# for modify operations - IPA does not support a way to change
# a particular field in a given record - e.g. for an MX record
# with several values, IPA stores them like this:
# name: "server1.local."
# data: ["10 mx1.server1.local.", "20 mx2.server1.local."]
# we could do a search of IPA, compare the values in the
# returned array - but that adds an additional round trip
# and is error prone
# instead, we just get all of the current values and send
# them in one big modify
criteria = {'recordset_id': record['recordset_id']}
reclist = self.central_service.find_records(self.admin_context,
criteria)
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, reclist)
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_record(self, context, domain, recordset, record):
LOG.debug('Delete Record %r / %r / %r' % (domain, recordset, record))
ipareq = {'method': 'dnsrecord_del', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, [record])
args['del_all'] = 0
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def ping(self, context):
LOG.debug('Ping')
# NOTE: This call will cause ipa to issue an error, but
# 1) it should not throw an exception
# 2) the response will indicate ipa is running
# 3) the bandwidth usage is minimal
ipareq = {'method': 'dnszone_show', 'id': 0}
params = ['@']
args = {}
ipareq['params'] = [params, args]
retval = {'result': True}
try:
self._call_and_handle_error(ipareq)
except Exception as e:
retval = {'result': False, 'reason': str(e)}
return retval
def _rec_to_ipa_rec(self, domain, recordset, reclist):
dname = domain['name']
rsetname = abs2rel_name(dname, recordset['name'])
params = [dname, rsetname]
rectype = recordset['type']
vals = []
for record in reclist:
vals.append(rectype2iparectype[rectype][1] % record)
args = {rectype2iparectype[rectype][0]: vals}
ttl = recordset.get('ttl') or domain.get('ttl')
if ttl is not None:
args['dnsttl'] = ttl
return params, args
def _ipa_error_to_exception(self, resp, ipareq):
exc = None
if resp['error'] is None:
return exc
errcode = resp['error']['code']
method = ipareq['method']
methtype = method.split('_')[0]
exclass = ipaerror2exception.get(errcode, {}).get(methtype,
IPAUnknownError)
if exclass:
LOG.debug("Error: ipa command [%s] returned error [%s]" %
(pprint.pformat(ipareq), pprint.pformat(resp)))
elif errcode: # not mapped
LOG.debug("Ignoring IPA error code %d: %s" %
(errcode, pprint.pformat(resp)))
return exclass
def _call_and_handle_error(self, ipareq):
if 'version' not in ipareq['params'][1]:
ipareq['params'][1]['version'] = cfg.CONF[self.name].ipa_version
need_reauth = False
while True:
status_code = 200
try:
if need_reauth:
self.request.auth.refresh_auth()
rawresp = self.request.post(self.jsonurl,
data=json.dumps(ipareq))
status_code = rawresp.status_code
except IPAAuthError:
status_code = 401
if status_code == 401:
if self.ntries == 0:
# persistent inability to auth
LOG.error(_LE("Error: could not authenticate to IPA - "
"please check for correct keytab file"))
# reset for next time
self.ntries = cfg.CONF[self.name].ipa_connect_retries
raise IPACommunicationFailure()
else:
LOG.debug("Refresh authentication")
need_reauth = True
self.ntries -= 1
time.sleep(1)
else:
# successful - reset
self.ntries = cfg.CONF[self.name].ipa_connect_retries
break
try:
resp = json.loads(rawresp.text)
except ValueError:
# response was not json - some sort of error response
LOG.debug("Error: unknown error from IPA [%s]" % rawresp.text)
raise IPAUnknownError("unable to process response from IPA")
# raise the appropriate exception, if error
exclass = self._ipa_error_to_exception(resp, ipareq)
if exclass:
# could add additional info/message to exception here
raise exclass()
return resp
def _recset_has_records(self, context, recordset):
"""Return True if the recordset has records, False otherwise"""
criteria = {'recordset_id': recordset['id']}
num = self.central_service.count_records(self.admin_context,
criteria)
return num > 0
| |
# Copyright 2013 Tom Ruette
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, twitter, glob, random, time, codecs, hashlib
from geopy import geocoders
from collections import Counter
def getSettings():
""" method to read in the settings from settings.txt """
out = {}
fin = open("settings.txt", "r")
txt = fin.read()
fin.close()
regex = re.compile("convergence=(\d+)")
out["convergence"] = int(regex.findall(txt)[0])
regex = re.compile("locmin=(\d+)")
out["locmin"] = int(regex.findall(txt)[0])
regex = re.compile("new_seeds=(.+)")
out["seeds"] = regex.findall(txt)[0].split(",")
regex = re.compile("consumer_key=(.+)")
ckey = regex.findall(txt)[0]
regex = re.compile("consumer_secret=(.+)")
csecret = regex.findall(txt)[0]
regex = re.compile("access_token_key=(.+)")
atkey = regex.findall(txt)[0]
regex = re.compile("access_token_secret=(.+)")
atsecret = regex.findall(txt)[0]
out["api"] = (ckey, csecret, atkey, atsecret)
return out
def getPriorSeeds(f):
""" go through the seedlist and structure them as they should """
out = []
try:
fin = codecs.open(f, "r", "utf-8")
seeds = fin.readlines()
fin.close()
if len(seeds) == 0:
raise IOError
for seed in seeds:
uname = unicode(seed.split(",")[0])
loc = unicode(",".join(seed.strip().split(",")[1:]))
if loc != False:
out.append( (uname, loc) )
except IOError:
stts = getSettings()
seeds = stts["seeds"]
i = 0
haveLocs = usersByLoc(seeds)
while i < len(seeds):
loc = acceptableLocation(seeds[i+1].strip(), [], haveLocs)
if loc != False:
seed = (unicode(seeds[i].strip()), unicode(loc[0]))
out.append(seed)
i = i + 2
return out
def getNewSeeds(sample, seeds, api):
""" wrapper to get from a list of seeds (sample) new seeds that do not occur
in the given seeds (seeds) already """
out = []
i = 1
for s in sample:
i = i + 1
friends = getFriends(s, seeds, api)
remaining = set(friends) - set(seeds) - set(out)
out.extend(list(remaining))
return out
def getFriends(s, seeds, api):
""" call the twitter api for new friends from a single user """
out = []
try:
uname = unicode(s[0])
print "searching friends for", s
# call to api
rls = getRateLimitStatus(api)
if rls["resources"]["friends"]["/friends/ids"]["remaining"] > 1:
ids = api.GetFriendIDs(screen_name=uname)
else:
sleeptime = rls["resources"]["friends"]["/friends/ids"]["reset"] - time.time()
print "sleeping for", sleeptime + 100, "seconds"
time.sleep(sleeptime + 100)
ids = api.GetFriendIDs(screen_name=uname)
haveLocs = usersByLoc(seeds)
count = 1
for ajd in ids:
# call to api
try:
rls = getRateLimitStatus(api)
if rls["resources"]["users"]["/users/show/:id"]["remaining"] > 1:
friend = api.GetUser(user_id=ajd)
else:
sleeptime = rls["resources"]["users"]["/users/show/:id"]["reset"] - time.time()
print "sleeping for", sleeptime + 100, "seconds"
time.sleep(sleeptime + 100)
friend = api.GetUser(user_id=ajd)
except:
print "\tsleeping for a while to give things a bit of a break"
time.sleep(900.0)
continue
floc = unicode(friend.location)
checkedloc = acceptableLocation(floc, seeds, haveLocs)
if checkedloc != False:
print "\tadding:", unicode(friend.screen_name), floc, checkedloc, count, "of", len(ids)
out.append( (unicode(friend.screen_name), unicode(checkedloc[0])) )
count = count + 1
except:
print "\tsleeping for a while to give things a bit of a break"
time.sleep(900.0)
return out
def getLocDB():
""" read the db in which normalizations of reported locations are stored, rep
loc is key, norm loc, lat and long are values """
db = {}
try:
fin = codecs.open("locdb.txt", "r", "utf-8")
lines = fin.readlines()
fin.close()
for line in lines:
l = line.strip().split("\t")
if len(l) == 4:
db[l[0]] = [l[1], l[2], l[3]]
except IOError:
print "no location database available"
return db
def getLocDBnorm():
""" read the db in which normalizations of reported locations are stored, norm
loc is key, lat and long are values """
db = {}
try:
fin = codecs.open("locdb.txt", "r", "utf-8")
lines = fin.readlines()
fin.close()
for line in lines:
l = line.strip().split("\t")
if len(l) == 4:
db[l[1]] = [l[2], l[3]]
except IOError:
print "no location database available"
return db
def setLocDB(db):
""" store the db with normalizations of reported locations """
lines = []
for l in db.keys():
lst = [l, db[l][0], unicode(db[l][1]), unicode(db[l][2])]
line = u"\t".join(lst)
lines.append(line)
fout = codecs.open("locdb.txt", "w", "utf-8")
fout.write( u"\n".join(lines))
fout.close()
def usersByLoc(seeds):
""" turn the unsorted seedlist into a dictionary per location """
out = {}
for seed in seeds:
uname = seed[0]
loc = seed[1]
try:
out[loc].append(uname)
except KeyError:
out[loc] = [uname]
return out
def acceptableLocation(l, seeds, haveLocs):
""" check if the reported location is an acceptable location via normalization
with the google geocoder """
stts = getSettings()
locmin = stts["locmin"]
out = False
if len(l) > 0:
locdb = getLocDB()
try:
[place, lat, lng] = locdb[l]
try:
if len(haveLocs[place]) < locmin:
out = [place, lat, lng]
except:
out = [place, lat, lng]
except:
fin = codecs.open("cities.txt", "r", "utf-8")
locations = fin.readlines()
fin.close()
g = geocoders.GoogleV3()
try:
time.sleep(20.0) # sleep a bit so that we do not overdo the geocoder
place, (lat, lng) = list(g.geocode(l.encode("utf-8"),
exactly_one=False))[0]
for location in locations:
location = location.strip()
regex = re.compile(r"\b" + location + r"\b", re.IGNORECASE)
if len(regex.findall(place)) > 0:
# check if the amount of locations is not too big
try:
if len(haveLocs[place]) < locmin:
out = [place, lat, lng]
# if the location is not in haveLocs yet, it's ok
except KeyError:
out = [place, lat, lng]
locdb[l] = [place, lat, lng]
setLocDB(locdb)
except Exception, e:
out = out
return out
def saveSeeds(seeds):
""" write out the seedlist """
out = []
for seed in seeds:
out.append( seed[0].strip() + "," + seed[1].strip() )
fout = codecs.open("seedlist.txt", "w", "utf-8")
fout.write( u"\n".join(out) )
fout.close()
def getSeeds(api):
""" get the seedlist """
seeds = getPriorSeeds("seedlist.txt")
milked = []
stts = getSettings()
convergence = stts["convergence"]
while (len(seeds) < convergence):
seedsample = []
try:
seedsample = random.sample(set(seeds), 1)
except ValueError:
seedsample = seeds
milked.extend(seedsample)
newseeds = getNewSeeds(seedsample, seeds, api)
seeds.extend(newseeds)
saveSeeds(seeds)
print "there are now", len(seeds), "available"
return seeds
def getRateLimitStatus(api):
try:
return api.GetRateLimitStatus()
except:
print "sleeping for 5 minutes to give the api some rest"
time.sleep(300.00)
return getRateLimitStatus(api)
def getTweets(uname, loc, api):
""" fetch the tweets of a given user, parameter loc is the standardized
location for this user """
out = []
locdb = getLocDBnorm()
# call to api
rls = getRateLimitStatus(api)
if rls["resources"]["statuses"]["/statuses/user_timeline"]["remaining"] > 1:
tl = api.GetUserTimeline(screen_name=uname, count=200)
else:
sleeptime = rls["resources"]["statuses"]["/statuses/user_timeline"]["reset"] - time.time()
print "sleeping for", sleeptime + 100, "seconds"
time.sleep(sleeptime + 100)
tl = api.GetUserTimeline(screen_name=uname, count=200)
print "\tfound", len(tl), "statuses"
for s in tl:
source = unicode(s.source) # source
date = unicode(s.created_at) # data
identifier = unicode(s.id) # tweet id
text = unicode(s.text).replace("\n", " ").replace("\r", " ") # tweet itself
reploc = unicode(s.user.location) # reported location
(lat, lng) = locdb[loc] # geo
if reploc == None:
reploc = "NA"
out.append( unicode(u"<tweet user=\"" + uname + u"\" norm_loc=\"" + loc +
u"\" rep_loc=\"" + reploc + u"\" date=\"" + date +
u"\" id=\"" + identifier + u"\" lat=\"" + lat +
u"\" lng=\"" + lng + u"\">" + text + u"</tweet>") )
return out
def xmlstore(l):
""" store a list of tweets from getTweets as xml """
print "writing out to file"
out = "<tweets>\n" + "\n".join(l) + "\n</tweets"
fname = "./tweets/" + hashlib.sha224(out.encode("utf-8")).hexdigest() + ".xml"
fout = codecs.open(fname, "w", "utf-8")
fout.write(out)
fout.close()
def sortSeeds(seeds, sorter):
""" sort the seeds, currently only biglocationfirst available """
out = []
if sorter == "bigLocationsFirst":
locs = []
db = {}
for (uname, loc) in seeds:
locs.append(loc)
try:
db[loc].append(uname)
except KeyError:
db[loc] = [uname]
freqs = Counter(locs)
while freqs:
curloc = freqs.most_common(1)[0]
del freqs[curloc[0]]
for seed in db[curloc[0]]:
out.append( (seed, curloc[0]) )
return out
def getUserNames():
""" extract from downloaded tweets all the downloaded usernames """
out = []
fl = glob.glob("./tweets/*")
regex = re.compile("user=\"(.+?)\"")
for f in fl:
fin = codecs.open(f, "r", "utf-8")
xml = fin.read()
fin.close()
out.extend( regex.findall(xml) )
return set(out)
def export_corpus():
""" return a csv with tweet id, norm loc, lat and long """
return "NA"
def import_corpus():
""" from the output of export_corpus, download the tweet and reconstruct the
xml as before """
return "NA"
def search(regexstr):
""" search for the regex in the text of tweets """
out = []
# TODO: regex more complicated to also return metadata
regex = re.compile(r">(.*?" + regexstr + r".*?)<", re.IGNORECASE)
fl = glob.glob("./tweets/*")
for f in fl:
fin = codecs.open(f, "r", "utf-8")
xml = fin.read()
fin.close()
out.extend( regex.findall(xml) )
return list(set(out))
return "NA"
def main():
""" this is where it all starts: twitter users are sought, and if enough users
are found, their tweets are downloaded """
# get user input from file
print "reading in settings..."
stts = getSettings()
(consumerkey, consumersecret, accesstokenkey, accesstokensecret) = stts["api"]
# initialize the api
print "initializing Twitter api..."
api = twitter.Api(consumer_key=consumerkey,
consumer_secret=consumersecret,
access_token_key=accesstokenkey,
access_token_secret=accesstokensecret)
# add to the seedlist
seeds = getSeeds(api)
print "got all the seeds we need..."
users_have = getUserNames()
sortedSeeds = sortSeeds(seeds, "bigLocationsFirst")
tweetnum = 0
tweets = []
for (seed, loc) in sortedSeeds:
if seed not in users_have:
print "grabbing tweets for", seed, loc
try:
tweets.extend( getTweets(seed, loc, api) )
except Exception, e:
print "\tException", e
if len(tweets) > 10000:
xmlstore(tweets)
tweets = []
xmlstore(tweets)
if __name__ == "__main__":
main()
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Contains WithSimilarityScore mixin.
This defines a procedure of getting "similar" objects which have similar
relationships.
"""
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import literal
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy.orm import aliased
from sqlalchemy.sql import func
from ggrc import db
from ggrc.models.relationship import Relationship
from ggrc.models.snapshot import Snapshot
class WithSimilarityScore(object):
"""Defines a routine to get similar object with mappings to same objects."""
# pylint: disable=too-few-public-methods
# example of similarity_options:
# similarity_options = {
# "relevant_types": {"Audit": {"weight": 5}, type: {"weight": w}},
# "threshold": 10,
# }
@classmethod
def get_similar_objects_query(cls, id_, types="all", relevant_types=None,
threshold=None):
"""Get objects of types similar to cls instance by their mappings.
Args:
id_: the id of the object to which the search will be applied;
types: a list of types of relevant objects (or "all" if you need to find
objects of any type);
relevant_types: use this parameter to override parameters from
cls.similarity_options["relevant_types"];
threshold: use this parameter to override
cls.similarity_options["threshold"].
Returns:
SQLAlchemy query that yields results with columns [(id, type, weight)] -
the id and type of similar objects with respective weights.
"""
if not types or (not isinstance(types, list) and types != "all"):
raise ValueError("Expected types = 'all' or a non-empty list of "
"requested types, got {!r} instead.".format(types))
if not hasattr(cls, "similarity_options"):
raise AttributeError("Expected 'similarity_options' defined for "
"'{c.__name__}' model.".format(c=cls))
if relevant_types is None:
relevant_types = cls.similarity_options["relevant_types"]
if threshold is None:
threshold = cls.similarity_options["threshold"]
# naming: self is "object", the object mapped to it is "related",
# the object mapped to "related" is "similar"
queries_for_union = []
# find "similar" objects with Relationship table
queries_for_union += cls._join_relationships(id_)
# find "similar" objects based on snapshots
queries_for_union += cls._join_snapshots(id_)
# find "similar" objects when Relationship table is not used
queries_for_union += cls._emulate_relationships(id_, types, relevant_types)
joined = queries_for_union.pop().union(*queries_for_union).subquery()
# define weights for every "related" object type with values from
# relevant_types dict
weight_case = case(
[(joined.c.related_type == type_, parameters["weight"])
for type_, parameters in relevant_types.items()],
else_=0)
weight_sum = func.sum(weight_case).label("weight")
# return the id and type of "similar" object together with its measure of
# similarity
result = db.session.query(
joined.c.similar_id.label("id"),
joined.c.similar_type.label("type"),
weight_sum,
).filter(or_(
# filter out self
joined.c.similar_id != id_,
joined.c.similar_type != cls.__name__,
))
# do the filtering by "similar" object types
if types is not None:
if not types:
# types is provided but is empty
return []
elif types == "all":
# any type will pass, no filtering applied
pass
else:
# retain only types from the provided list
result = result.filter(joined.c.similar_type.in_(types))
# group by "similar" objects to sum up weights correctly
result = result.group_by(
joined.c.similar_type,
joined.c.similar_id,
).having(
# filter out "similar" objects that have insufficient similarity
weight_sum >= threshold,
)
return result
@classmethod
def _join_snapshots(cls, id_):
"""Retrieves related objects with snapshots
Performs a query where it first:
1) Find all directly mapped snapshots
2) Join with snapshots to find type and id of snapshots (child_type and
child_id) - left snapshots
3) Join with snapshots to find snapshots with the same child_type and
child_id (right_snapshots)
4) Find all objects mapped to right snapshots (right_relationships)
Arg:
id_: ID of instance performing similarity query on
Return:
[(related_type, similar_id, similar_type)] where related type is the type
related objects, similar_id and similar_type being id and type of
second tier objects.
"""
left_snapshot = aliased(Snapshot, name="left_snapshot")
right_snapshot = aliased(Snapshot, name="right_snapshot")
left_relationship = aliased(Relationship, name="left_relationship")
right_relationship = aliased(Relationship, name="right_relationship")
snapshot_ids = select([
left_relationship.destination_id.label("snapshot_left_id"),
]).where(
and_(
left_relationship.source_type == cls.__name__,
left_relationship.source_id == id_,
left_relationship.destination_type == "Snapshot"
)
).union(
select([
left_relationship.source_id.label("snapshot_left_id"),
]).where(
and_(
left_relationship.destination_type == cls.__name__,
left_relationship.destination_id == id_,
left_relationship.source_type == "Snapshot"
)
)
).alias("snapshot_ids")
left_snapshot_join = snapshot_ids.outerjoin(
left_snapshot,
left_snapshot.id == snapshot_ids.c.snapshot_left_id
)
right_snapshot_join = left_snapshot_join.outerjoin(
right_snapshot,
and_(
right_snapshot.child_type == left_snapshot.child_type,
right_snapshot.child_id == left_snapshot.child_id
)
).alias("right_snapshot_join")
return [
db.session.query(
right_relationship.source_type.label("similar_type"),
right_relationship.source_id.label("similar_id"),
right_snapshot.child_type.label("related_type"),
).filter(
and_(
right_relationship.destination_type == "Snapshot",
right_relationship.destination_id ==
right_snapshot_join.c.right_snapshot_id,
)
),
db.session.query(
right_relationship.destination_type.label("similar_type"),
right_relationship.destination_id.label("similar_id"),
right_snapshot.child_type.label("related_type"),
).filter(
and_(
right_relationship.source_type == "Snapshot",
right_relationship.source_id ==
right_snapshot_join.c.right_snapshot_id,
)
)
]
@classmethod
def _join_relationships(cls, id_):
"""Make a self-join of Relationship table to find common mappings.
Returns a query with results for [(related_type, similar_id, similar_type)]
where similar_id and similar_type describe a second-tier mapped object and
related_type is the type of a common mapped object between "object" and
"similar".
"""
# get all Relationships for self
object_to_related = db.session.query(Relationship).filter(
or_(and_(Relationship.source_type == cls.__name__,
Relationship.source_id == id_),
and_(Relationship.destination_type == cls.__name__,
Relationship.destination_id == id_))).subquery()
# define how to get id and type of "related" objects
related_id_case = (case([(and_(object_to_related.c.source_id == id_,
object_to_related.c.source_type ==
cls.__name__),
object_to_related.c.destination_id)],
else_=object_to_related.c.source_id)
.label("related_id"))
related_type_case = (case([(and_(object_to_related.c.source_id == id_,
object_to_related.c.source_type ==
cls.__name__),
object_to_related.c.destination_type)],
else_=object_to_related.c.source_type)
.label("related_type"))
related_to_similar = aliased(Relationship, name="related_to_similar")
# self-join Relationships to get "similar" id and type; save "related" type
# to get the weight of this relationship later
return [
db.session.query(
related_type_case,
related_to_similar.destination_id.label("similar_id"),
related_to_similar.destination_type.label("similar_type"),
).join(
related_to_similar,
and_(related_id_case == related_to_similar.source_id,
related_type_case == related_to_similar.source_type),
),
db.session.query(
related_type_case,
related_to_similar.source_id.label("similar_id"),
related_to_similar.source_type.label("similar_type"),
).join(
related_to_similar,
and_(related_id_case == related_to_similar.destination_id,
related_type_case == related_to_similar.destination_type),
),
]
@classmethod
def _emulate_relationships(cls, id_, types, relevant_types):
"""Get a list of queries for second-tier objects mapped via foreign key.
This is used primarily to determine Requests mapped to Audits (Request
model has a foreign key to Audit model and is not mapped with a
Relationship object).
Each query returns results compliant with the results of
_join_relationships as they get UNIONed.
"""
# Note: this is a hack that can go away only when Request-Audit mapping
# will be implemented in Relationships table
from ggrc.models import Audit, Request
result = []
if Audit.__name__ in relevant_types:
# Note: this code assumes that `types` is a single-element list
if cls is Request and Request.__name__ in types:
similar_requests = aliased(cls, name="similar_requests")
result.append(db.session.query(
literal(Audit.__name__).label("related_type"),
similar_requests.id.label("similar_id"),
literal(Request.__name__).label("similar_type"),
).select_from(
similar_requests,
).join(
cls,
similar_requests.audit_id == cls.audit_id,
).filter(
and_(cls.id == id_,
cls.id != similar_requests.id),
))
elif cls is Request and Request.__name__ not in types:
audit_to_similar = aliased(Relationship, name="audit_to_similar")
result.append(db.session.query(
literal(Audit.__name__).label("related_type"),
audit_to_similar.source_id.label("similar_id"),
audit_to_similar.source_type.label("similar_type"),
).select_from(
audit_to_similar,
).join(
cls,
and_(audit_to_similar.destination_id == cls.audit_id,
audit_to_similar.destination_type == Audit.__name__),
))
result.append(db.session.query(
literal(Audit.__name__).label("related_type"),
audit_to_similar.destination_id.label("similar_id"),
audit_to_similar.destination_type.label("similar_type"),
).select_from(
audit_to_similar,
).join(
cls,
and_(audit_to_similar.source_id == cls.audit_id,
audit_to_similar.source_type == Audit.__name__),
))
elif cls is not Request and Request.__name__ in types:
self_to_audit = aliased(Relationship, name="self_to_audit")
request = aliased(Request)
result.append(db.session.query(
literal(Audit.__name__).label("related_type"),
request.id.label("similar_id"),
literal(Request.__name__).label("similar_type"),
).select_from(
request,
).join(
self_to_audit,
or_(
and_(self_to_audit.source_id == id_,
self_to_audit.source_type == cls.__name__,
self_to_audit.destination_id == request.audit_id,
self_to_audit.destination_type == Audit.__name__),
and_(self_to_audit.destination_id == id_,
self_to_audit.destination_type == cls.__name__,
self_to_audit.source_id == request.audit_id,
self_to_audit.source_type == Audit.__name__),
),
))
return result
| |
import functools
from itertools import imap
import urecord
from relations.tuple import Tuple
__all__ = ['Relation', 'RelationalError', 'UndefinedFields',
'NotUnionCompatible']
class RelationalError(Exception):
"""An undefined or invalid operation was attempted."""
pass
class UndefinedFields(RelationalError):
"""An undefined field was used in an operation on one or more relations."""
pass
class NotUnionCompatible(RelationalError):
"""A set operation was attempted between non-union-compatible relations."""
pass
def check_union_compatible(method):
@functools.wraps(method)
def wrapper(self, other):
if not self.is_union_compatible(other):
raise NotUnionCompatible
return method(self, other)
return wrapper
class Relation(object):
def __init__(self, *fields, **kwargs):
self.heading = frozenset(fields)
self.tuple = urecord.Record(*sorted(fields), instance=Tuple)
self.tuples = {}
def __repr__(self):
return '<Relation%r>' % (self.tuple._fields,)
def __len__(self):
return len(self.tuples)
def __contains__(self, tuple_):
return tuple_ in self.tuples
def __iter__(self):
return iter(self.tuples)
def clone(self):
"""Create a new, empty relation with the same heading as this one."""
return type(self)(*self.tuple._fields)
def is_union_compatible(self, other):
return self.heading == other.heading
@check_union_compatible
def update(self, other):
"""
Merge this relation with another union-compatible relation.
This method modifies (and returns) this relation. The other relation
is not modified.
"""
self.tuples.update(other.tuples)
return self
@check_union_compatible
def union(self, other):
"""Safe set union between two union-compatible relations."""
return self.clone().update(self).update(other)
@check_union_compatible
def intersection(self, other):
"""Safe set intersection between two union-compatible relations."""
new_relation = self.clone()
new_relation.tuples.update(
(tuple_, tuple_) for tuple_ in
set(self.tuples).intersection(set(other.tuples)))
return new_relation
@check_union_compatible
def difference(self, other):
"""Safe set difference between two union-compatible relations."""
new_relation = self.clone()
new_relation.tuples.update(
(tuple_, tuple_) for tuple_ in
set(self.tuples).difference(set(other.tuples)))
return new_relation
def add(self, **kwargs):
"""
Add a tuple to this relation.
This method attempts to be as efficient as possible, re-using the same
Python object if the tuple already exists in this relation.
Arguments should be given in keyword form:
>>> employees = Relation('name', 'department')
>>> alice = employees.add(name='Alice', department='Finance')
>>> alice.name
'Alice'
>>> alice.department
'Finance'
"""
tuple_ = self.tuple(**kwargs)
return self.tuples.setdefault(tuple_, tuple_)
def contains(self, **kwargs):
"""
Determine if this relation contains the specified tuple.
Arguments are given in the same form as for :meth:`add`. This is easier
than having to construct a tuple and use Python's `in` operator, e.g.:
>>> employees.contains(name='Alice', department='Sales')
True
Whereas without this method you'd do:
>>> employees.tuple(name='Alice', department='Sales') in employees
True
"""
return self.tuple(**kwargs) in self
def select(self, predicate):
"""
Filter the tuples in this relation based on a predicate.
Returns a new, union-compatible relation.
"""
new_relation = self.clone()
new_relation.tuples.update(
(tuple_, tuple_) for tuple_ in filter(predicate, self.tuples))
return new_relation
def project(self, *fields):
"""
Return a new relation with a heading restricted to the given fields.
The new relation is not union-compatible, and will also be a set, so
it may have a smaller cardinality than the original relation. Here's
an example:
>>> employees = Relation('name', 'department')
>>> employees.add(name='Alice', department='Sales')
>>> employees.add(name='Bob', department='Sales')
>>> len(employees)
2
>>> departments = employees.project('department')
>>> len(departments)
1
>>> departments.contains(department='Sales')
True
"""
new_relation = type(self)(*fields)
if not new_relation.heading.issubset(self.heading):
undefined_fields = tuple(new_relation.heading.difference(self.heading))
raise UndefinedFields("Undefined fields used in project(): %r" %
undefined_fields)
projection = self.tuple._make_projection(*fields)
new_relation.tuples.update((tuple_, tuple_)
for tuple_ in imap(
lambda t: new_relation.tuple(*t._index_restrict(*projection)),
self.tuples))
return new_relation
def rename(self, **new_fields):
"""
Rename some fields in this relation.
Accepts keyword arguments in the form
``new_field_name='old_field_name'``. The new relation returned will
only be union-compatible if no arguments are given to this function.
"""
if not is_bijection(new_fields):
raise RelationalError("Field mapping is not one-to-one")
elif not set(new_fields.values()).issubset(self.heading):
undefined_fields = tuple(set(new_fields.values()).difference(self.heading))
raise UndefinedFields("Undefined fields used in rename(): %r" %
undefined_fields)
# Get a complete bijection from new field names => old field names
renamed_fields = set(new_fields.values())
for field_name in self.heading:
if field_name not in renamed_fields:
new_fields[field_name] = field_name
new_relation = type(self)(*new_fields.keys())
reordering = self.tuple._make_reordering(**new_fields)
new_relation.tuples.update(
(tuple_, tuple_) for tuple_ in imap(
lambda t: new_relation.tuple(*t._index_restrict(*reordering)),
self.tuples))
return new_relation
def natural_join(self, other):
new_relation = type(self)(*self.heading.union(other.heading))
common_fields = self.heading.intersection(other.heading)
projection1 = self.tuple._make_projection(*common_fields)
projection2 = other.tuple._make_projection(*common_fields)
for tuple1 in self:
for tuple2 in other:
if (tuple1._index_restrict(*projection1) ==
tuple2._index_restrict(*projection2)):
row = tuple1._asdict()
row.update(tuple2._asdict())
new_relation.add(**row)
return new_relation
def is_bijection(dictionary):
"""Check if a dictionary is a proper one-to-one mapping."""
return len(set(dictionary.keys())) == len(set(dictionary.values()))
def invert_bijection(dictionary):
"""Return the inverse of a bijection. Does not check the input."""
return dict((value, key) for (key, value) in dictionary.iteritems())
| |
"""
pygments.lexers._sourcemod_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of SourceMod functions.
It is able to re-generate itself.
Do not edit the FUNCTIONS list by hand.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
FUNCTIONS = (
'OnEntityCreated',
'OnEntityDestroyed',
'OnGetGameDescription',
'OnLevelInit',
'SDKHook',
'SDKHookEx',
'SDKUnhook',
'SDKHooks_TakeDamage',
'SDKHooks_DropWeapon',
'TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
'AddToTopMenu',
'GetTopMenuInfoString',
'GetTopMenuObjName',
'RemoveFromTopMenu',
'DisplayTopMenu',
'DisplayTopMenuCategory',
'FindTopMenuCategory',
'SetTopMenuTitleCaching',
'OnAdminMenuCreated',
'OnAdminMenuReady',
'GetAdminTopMenu',
'AddTargetsToMenu',
'AddTargetsToMenu2',
'RedisplayAdminMenu',
'TEHook',
'AddTempEntHook',
'RemoveTempEntHook',
'TE_Start',
'TE_IsValidProp',
'TE_WriteNum',
'TE_ReadNum',
'TE_WriteFloat',
'TE_ReadFloat',
'TE_WriteVector',
'TE_ReadVector',
'TE_WriteAngles',
'TE_WriteFloatArray',
'TE_Send',
'TE_WriteEncodedEnt',
'TE_SendToAll',
'TE_SendToClient',
'CreateKeyValues',
'KvSetString',
'KvSetNum',
'KvSetUInt64',
'KvSetFloat',
'KvSetColor',
'KvSetVector',
'KvGetString',
'KvGetNum',
'KvGetFloat',
'KvGetColor',
'KvGetUInt64',
'KvGetVector',
'KvJumpToKey',
'KvJumpToKeySymbol',
'KvGotoFirstSubKey',
'KvGotoNextKey',
'KvSavePosition',
'KvDeleteKey',
'KvDeleteThis',
'KvGoBack',
'KvRewind',
'KvGetSectionName',
'KvSetSectionName',
'KvGetDataType',
'KeyValuesToFile',
'FileToKeyValues',
'StringToKeyValues',
'KvSetEscapeSequences',
'KvNodesInStack',
'KvCopySubkeys',
'KvFindKeyById',
'KvGetNameSymbol',
'KvGetSectionSymbol',
'TE_SetupSparks',
'TE_SetupSmoke',
'TE_SetupDust',
'TE_SetupMuzzleFlash',
'TE_SetupMetalSparks',
'TE_SetupEnergySplash',
'TE_SetupArmorRicochet',
'TE_SetupGlowSprite',
'TE_SetupExplosion',
'TE_SetupBloodSprite',
'TE_SetupBeamRingPoint',
'TE_SetupBeamPoints',
'TE_SetupBeamLaser',
'TE_SetupBeamRing',
'TE_SetupBeamFollow',
'HookEvent',
'HookEventEx',
'UnhookEvent',
'CreateEvent',
'FireEvent',
'CancelCreatedEvent',
'GetEventBool',
'SetEventBool',
'GetEventInt',
'SetEventInt',
'GetEventFloat',
'SetEventFloat',
'GetEventString',
'SetEventString',
'GetEventName',
'SetEventBroadcast',
'GetUserMessageType',
'GetUserMessageId',
'GetUserMessageName',
'StartMessage',
'StartMessageEx',
'EndMessage',
'MsgHook',
'MsgPostHook',
'HookUserMessage',
'UnhookUserMessage',
'StartMessageAll',
'StartMessageOne',
'InactivateClient',
'ReconnectClient',
'GetMaxEntities',
'GetEntityCount',
'IsValidEntity',
'IsValidEdict',
'IsEntNetworkable',
'CreateEdict',
'RemoveEdict',
'GetEdictFlags',
'SetEdictFlags',
'GetEdictClassname',
'GetEntityNetClass',
'ChangeEdictState',
'GetEntData',
'SetEntData',
'GetEntDataFloat',
'SetEntDataFloat',
'GetEntDataEnt2',
'SetEntDataEnt2',
'GetEntDataVector',
'SetEntDataVector',
'GetEntDataString',
'SetEntDataString',
'FindSendPropOffs',
'FindSendPropInfo',
'FindDataMapOffs',
'FindDataMapInfo',
'GetEntSendPropOffs',
'GetEntProp',
'SetEntProp',
'GetEntPropFloat',
'SetEntPropFloat',
'GetEntPropEnt',
'SetEntPropEnt',
'GetEntPropVector',
'SetEntPropVector',
'GetEntPropString',
'SetEntPropString',
'GetEntPropArraySize',
'GetEntDataArray',
'SetEntDataArray',
'GetEntityAddress',
'GetEntityClassname',
'float',
'FloatMul',
'FloatDiv',
'FloatAdd',
'FloatSub',
'FloatFraction',
'RoundToZero',
'RoundToCeil',
'RoundToFloor',
'RoundToNearest',
'FloatCompare',
'SquareRoot',
'Pow',
'Exponential',
'Logarithm',
'Sine',
'Cosine',
'Tangent',
'FloatAbs',
'ArcTangent',
'ArcCosine',
'ArcSine',
'ArcTangent2',
'RoundFloat',
'operator%',
'DegToRad',
'RadToDeg',
'GetURandomInt',
'GetURandomFloat',
'SetURandomSeed',
'SetURandomSeedSimple',
'RemovePlayerItem',
'GivePlayerItem',
'GetPlayerWeaponSlot',
'IgniteEntity',
'ExtinguishEntity',
'TeleportEntity',
'ForcePlayerSuicide',
'SlapPlayer',
'FindEntityByClassname',
'GetClientEyeAngles',
'CreateEntityByName',
'DispatchSpawn',
'DispatchKeyValue',
'DispatchKeyValueFloat',
'DispatchKeyValueVector',
'GetClientAimTarget',
'GetTeamCount',
'GetTeamName',
'GetTeamScore',
'SetTeamScore',
'GetTeamClientCount',
'SetEntityModel',
'GetPlayerDecalFile',
'GetPlayerJingleFile',
'GetServerNetStats',
'EquipPlayerWeapon',
'ActivateEntity',
'SetClientInfo',
'GivePlayerAmmo',
'SetClientListeningFlags',
'GetClientListeningFlags',
'SetListenOverride',
'GetListenOverride',
'IsClientMuted',
'TR_GetPointContents',
'TR_GetPointContentsEnt',
'TR_TraceRay',
'TR_TraceHull',
'TR_TraceRayFilter',
'TR_TraceHullFilter',
'TR_TraceRayEx',
'TR_TraceHullEx',
'TR_TraceRayFilterEx',
'TR_TraceHullFilterEx',
'TR_GetFraction',
'TR_GetEndPosition',
'TR_GetEntityIndex',
'TR_DidHit',
'TR_GetHitGroup',
'TR_GetPlaneNormal',
'TR_PointOutsideWorld',
'SortIntegers',
'SortFloats',
'SortStrings',
'SortFunc1D',
'SortCustom1D',
'SortCustom2D',
'SortADTArray',
'SortFuncADTArray',
'SortADTArrayCustom',
'CompileRegex',
'MatchRegex',
'GetRegexSubString',
'SimpleRegexMatch',
'TF2_GetPlayerClass',
'TF2_SetPlayerClass',
'TF2_RemoveWeaponSlot',
'TF2_RemoveAllWeapons',
'TF2_IsPlayerInCondition',
'TF2_GetObjectType',
'TF2_GetObjectMode',
'NominateMap',
'RemoveNominationByMap',
'RemoveNominationByOwner',
'GetExcludeMapList',
'GetNominatedMapList',
'CanMapChooserStartVote',
'InitiateMapChooserVote',
'HasEndOfMapVoteFinished',
'EndOfMapVoteEnabled',
'OnNominationRemoved',
'OnMapVoteStarted',
'CreateTimer',
'KillTimer',
'TriggerTimer',
'GetTickedTime',
'GetMapTimeLeft',
'GetMapTimeLimit',
'ExtendMapTimeLimit',
'GetTickInterval',
'OnMapTimeLeftChanged',
'IsServerProcessing',
'CreateDataTimer',
'ByteCountToCells',
'CreateArray',
'ClearArray',
'CloneArray',
'ResizeArray',
'GetArraySize',
'PushArrayCell',
'PushArrayString',
'PushArrayArray',
'GetArrayCell',
'GetArrayString',
'GetArrayArray',
'SetArrayCell',
'SetArrayString',
'SetArrayArray',
'ShiftArrayUp',
'RemoveFromArray',
'SwapArrayItems',
'FindStringInArray',
'FindValueInArray',
'ProcessTargetString',
'ReplyToTargetError',
'MultiTargetFilter',
'AddMultiTargetFilter',
'RemoveMultiTargetFilter',
'OnBanClient',
'OnBanIdentity',
'OnRemoveBan',
'BanClient',
'BanIdentity',
'RemoveBan',
'CreateTrie',
'SetTrieValue',
'SetTrieArray',
'SetTrieString',
'GetTrieValue',
'GetTrieArray',
'GetTrieString',
'RemoveFromTrie',
'ClearTrie',
'GetTrieSize',
'GetFunctionByName',
'CreateGlobalForward',
'CreateForward',
'GetForwardFunctionCount',
'AddToForward',
'RemoveFromForward',
'RemoveAllFromForward',
'Call_StartForward',
'Call_StartFunction',
'Call_PushCell',
'Call_PushCellRef',
'Call_PushFloat',
'Call_PushFloatRef',
'Call_PushArray',
'Call_PushArrayEx',
'Call_PushString',
'Call_PushStringEx',
'Call_Finish',
'Call_Cancel',
'NativeCall',
'CreateNative',
'ThrowNativeError',
'GetNativeStringLength',
'GetNativeString',
'SetNativeString',
'GetNativeCell',
'GetNativeCellRef',
'SetNativeCellRef',
'GetNativeArray',
'SetNativeArray',
'FormatNativeString',
'RequestFrameCallback',
'RequestFrame',
'OnRebuildAdminCache',
'DumpAdminCache',
'AddCommandOverride',
'GetCommandOverride',
'UnsetCommandOverride',
'CreateAdmGroup',
'FindAdmGroup',
'SetAdmGroupAddFlag',
'GetAdmGroupAddFlag',
'GetAdmGroupAddFlags',
'SetAdmGroupImmuneFrom',
'GetAdmGroupImmuneCount',
'GetAdmGroupImmuneFrom',
'AddAdmGroupCmdOverride',
'GetAdmGroupCmdOverride',
'RegisterAuthIdentType',
'CreateAdmin',
'GetAdminUsername',
'BindAdminIdentity',
'SetAdminFlag',
'GetAdminFlag',
'GetAdminFlags',
'AdminInheritGroup',
'GetAdminGroupCount',
'GetAdminGroup',
'SetAdminPassword',
'GetAdminPassword',
'FindAdminByIdentity',
'RemoveAdmin',
'FlagBitsToBitArray',
'FlagBitArrayToBits',
'FlagArrayToBits',
'FlagBitsToArray',
'FindFlagByName',
'FindFlagByChar',
'FindFlagChar',
'ReadFlagString',
'CanAdminTarget',
'CreateAuthMethod',
'SetAdmGroupImmunityLevel',
'GetAdmGroupImmunityLevel',
'SetAdminImmunityLevel',
'GetAdminImmunityLevel',
'FlagToBit',
'BitToFlag',
'ServerCommand',
'ServerCommandEx',
'InsertServerCommand',
'ServerExecute',
'ClientCommand',
'FakeClientCommand',
'FakeClientCommandEx',
'PrintToServer',
'PrintToConsole',
'ReplyToCommand',
'GetCmdReplySource',
'SetCmdReplySource',
'IsChatTrigger',
'ShowActivity2',
'ShowActivity',
'ShowActivityEx',
'FormatActivitySource',
'SrvCmd',
'RegServerCmd',
'ConCmd',
'RegConsoleCmd',
'RegAdminCmd',
'GetCmdArgs',
'GetCmdArg',
'GetCmdArgString',
'CreateConVar',
'FindConVar',
'ConVarChanged',
'HookConVarChange',
'UnhookConVarChange',
'GetConVarBool',
'SetConVarBool',
'GetConVarInt',
'SetConVarInt',
'GetConVarFloat',
'SetConVarFloat',
'GetConVarString',
'SetConVarString',
'ResetConVar',
'GetConVarDefault',
'GetConVarFlags',
'SetConVarFlags',
'GetConVarBounds',
'SetConVarBounds',
'GetConVarName',
'QueryClientConVar',
'GetCommandIterator',
'ReadCommandIterator',
'CheckCommandAccess',
'CheckAccess',
'IsValidConVarChar',
'GetCommandFlags',
'SetCommandFlags',
'FindFirstConCommand',
'FindNextConCommand',
'SendConVarValue',
'AddServerTag',
'RemoveServerTag',
'CommandListener',
'AddCommandListener',
'RemoveCommandListener',
'CommandExists',
'OnClientSayCommand',
'OnClientSayCommand_Post',
'TF2_IgnitePlayer',
'TF2_RespawnPlayer',
'TF2_RegeneratePlayer',
'TF2_AddCondition',
'TF2_RemoveCondition',
'TF2_SetPlayerPowerPlay',
'TF2_DisguisePlayer',
'TF2_RemovePlayerDisguise',
'TF2_StunPlayer',
'TF2_MakeBleed',
'TF2_GetClass',
'TF2_CalcIsAttackCritical',
'TF2_OnIsHolidayActive',
'TF2_IsHolidayActive',
'TF2_IsPlayerInDuel',
'TF2_RemoveWearable',
'TF2_OnConditionAdded',
'TF2_OnConditionRemoved',
'TF2_OnWaitingForPlayersStart',
'TF2_OnWaitingForPlayersEnd',
'TF2_OnPlayerTeleport',
'SQL_Connect',
'SQL_DefConnect',
'SQL_ConnectCustom',
'SQLite_UseDatabase',
'SQL_CheckConfig',
'SQL_GetDriver',
'SQL_ReadDriver',
'SQL_GetDriverIdent',
'SQL_GetDriverProduct',
'SQL_SetCharset',
'SQL_GetAffectedRows',
'SQL_GetInsertId',
'SQL_GetError',
'SQL_EscapeString',
'SQL_QuoteString',
'SQL_FastQuery',
'SQL_Query',
'SQL_PrepareQuery',
'SQL_FetchMoreResults',
'SQL_HasResultSet',
'SQL_GetRowCount',
'SQL_GetFieldCount',
'SQL_FieldNumToName',
'SQL_FieldNameToNum',
'SQL_FetchRow',
'SQL_MoreRows',
'SQL_Rewind',
'SQL_FetchString',
'SQL_FetchFloat',
'SQL_FetchInt',
'SQL_IsFieldNull',
'SQL_FetchSize',
'SQL_BindParamInt',
'SQL_BindParamFloat',
'SQL_BindParamString',
'SQL_Execute',
'SQL_LockDatabase',
'SQL_UnlockDatabase',
'SQLTCallback',
'SQL_IsSameConnection',
'SQL_TConnect',
'SQL_TQuery',
'SQL_CreateTransaction',
'SQL_AddQuery',
'SQLTxnSuccess',
'SQLTxnFailure',
'SQL_ExecuteTransaction',
'CloseHandle',
'CloneHandle',
'MenuHandler',
'CreateMenu',
'DisplayMenu',
'DisplayMenuAtItem',
'AddMenuItem',
'InsertMenuItem',
'RemoveMenuItem',
'RemoveAllMenuItems',
'GetMenuItem',
'GetMenuSelectionPosition',
'GetMenuItemCount',
'SetMenuPagination',
'GetMenuPagination',
'GetMenuStyle',
'SetMenuTitle',
'GetMenuTitle',
'CreatePanelFromMenu',
'GetMenuExitButton',
'SetMenuExitButton',
'GetMenuExitBackButton',
'SetMenuExitBackButton',
'SetMenuNoVoteButton',
'CancelMenu',
'GetMenuOptionFlags',
'SetMenuOptionFlags',
'IsVoteInProgress',
'CancelVote',
'VoteMenu',
'VoteMenuToAll',
'VoteHandler',
'SetVoteResultCallback',
'CheckVoteDelay',
'IsClientInVotePool',
'RedrawClientVoteMenu',
'GetMenuStyleHandle',
'CreatePanel',
'CreateMenuEx',
'GetClientMenu',
'CancelClientMenu',
'GetMaxPageItems',
'GetPanelStyle',
'SetPanelTitle',
'DrawPanelItem',
'DrawPanelText',
'CanPanelDrawFlags',
'SetPanelKeys',
'SendPanelToClient',
'GetPanelTextRemaining',
'GetPanelCurrentKey',
'SetPanelCurrentKey',
'RedrawMenuItem',
'InternalShowMenu',
'GetMenuVoteInfo',
'IsNewVoteAllowed',
'PrefetchSound',
'EmitAmbientSound',
'FadeClientVolume',
'StopSound',
'EmitSound',
'EmitSentence',
'GetDistGainFromSoundLevel',
'AmbientSHook',
'NormalSHook',
'AddAmbientSoundHook',
'AddNormalSoundHook',
'RemoveAmbientSoundHook',
'RemoveNormalSoundHook',
'EmitSoundToClient',
'EmitSoundToAll',
'ATTN_TO_SNDLEVEL',
'GetGameSoundParams',
'EmitGameSound',
'EmitAmbientGameSound',
'EmitGameSoundToClient',
'EmitGameSoundToAll',
'PrecacheScriptSound',
'strlen',
'StrContains',
'strcmp',
'strncmp',
'StrEqual',
'strcopy',
'Format',
'FormatEx',
'VFormat',
'StringToInt',
'StringToIntEx',
'IntToString',
'StringToFloat',
'StringToFloatEx',
'FloatToString',
'BreakString',
'TrimString',
'SplitString',
'ReplaceString',
'ReplaceStringEx',
'GetCharBytes',
'IsCharAlpha',
'IsCharNumeric',
'IsCharSpace',
'IsCharMB',
'IsCharUpper',
'IsCharLower',
'StripQuotes',
'CharToUpper',
'CharToLower',
'FindCharInString',
'StrCat',
'ExplodeString',
'ImplodeStrings',
'GetVectorLength',
'GetVectorDistance',
'GetVectorDotProduct',
'GetVectorCrossProduct',
'NormalizeVector',
'GetAngleVectors',
'GetVectorAngles',
'GetVectorVectors',
'AddVectors',
'SubtractVectors',
'ScaleVector',
'NegateVector',
'MakeVectorFromPoints',
'BaseComm_IsClientGagged',
'BaseComm_IsClientMuted',
'BaseComm_SetClientGag',
'BaseComm_SetClientMute',
'FormatUserLogText',
'FindPluginByFile',
'FindTarget',
'AcceptEntityInput',
'SetVariantBool',
'SetVariantString',
'SetVariantInt',
'SetVariantFloat',
'SetVariantVector3D',
'SetVariantPosVector3D',
'SetVariantColor',
'SetVariantEntity',
'GameRules_GetProp',
'GameRules_SetProp',
'GameRules_GetPropFloat',
'GameRules_SetPropFloat',
'GameRules_GetPropEnt',
'GameRules_SetPropEnt',
'GameRules_GetPropVector',
'GameRules_SetPropVector',
'GameRules_GetPropString',
'GameRules_SetPropString',
'GameRules_GetRoundState',
'OnClientConnect',
'OnClientConnected',
'OnClientPutInServer',
'OnClientDisconnect',
'OnClientDisconnect_Post',
'OnClientCommand',
'OnClientSettingsChanged',
'OnClientAuthorized',
'OnClientPreAdminCheck',
'OnClientPostAdminFilter',
'OnClientPostAdminCheck',
'GetMaxClients',
'GetMaxHumanPlayers',
'GetClientCount',
'GetClientName',
'GetClientIP',
'GetClientAuthString',
'GetClientAuthId',
'GetSteamAccountID',
'GetClientUserId',
'IsClientConnected',
'IsClientInGame',
'IsClientInKickQueue',
'IsClientAuthorized',
'IsFakeClient',
'IsClientSourceTV',
'IsClientReplay',
'IsClientObserver',
'IsPlayerAlive',
'GetClientInfo',
'GetClientTeam',
'SetUserAdmin',
'GetUserAdmin',
'AddUserFlags',
'RemoveUserFlags',
'SetUserFlagBits',
'GetUserFlagBits',
'CanUserTarget',
'RunAdminCacheChecks',
'NotifyPostAdminCheck',
'CreateFakeClient',
'SetFakeClientConVar',
'GetClientHealth',
'GetClientModel',
'GetClientWeapon',
'GetClientMaxs',
'GetClientMins',
'GetClientAbsAngles',
'GetClientAbsOrigin',
'GetClientArmor',
'GetClientDeaths',
'GetClientFrags',
'GetClientDataRate',
'IsClientTimingOut',
'GetClientTime',
'GetClientLatency',
'GetClientAvgLatency',
'GetClientAvgLoss',
'GetClientAvgChoke',
'GetClientAvgData',
'GetClientAvgPackets',
'GetClientOfUserId',
'KickClient',
'KickClientEx',
'ChangeClientTeam',
'GetClientSerial',
'GetClientFromSerial',
'FindStringTable',
'GetNumStringTables',
'GetStringTableNumStrings',
'GetStringTableMaxStrings',
'GetStringTableName',
'FindStringIndex',
'ReadStringTable',
'GetStringTableDataLength',
'GetStringTableData',
'SetStringTableData',
'AddToStringTable',
'LockStringTables',
'AddFileToDownloadsTable',
'GetEntityFlags',
'SetEntityFlags',
'GetEntityMoveType',
'SetEntityMoveType',
'GetEntityRenderMode',
'SetEntityRenderMode',
'GetEntityRenderFx',
'SetEntityRenderFx',
'SetEntityRenderColor',
'GetEntityGravity',
'SetEntityGravity',
'SetEntityHealth',
'GetClientButtons',
'EntityOutput',
'HookEntityOutput',
'UnhookEntityOutput',
'HookSingleEntityOutput',
'UnhookSingleEntityOutput',
'SMC_CreateParser',
'SMC_ParseFile',
'SMC_GetErrorString',
'SMC_ParseStart',
'SMC_SetParseStart',
'SMC_ParseEnd',
'SMC_SetParseEnd',
'SMC_NewSection',
'SMC_KeyValue',
'SMC_EndSection',
'SMC_SetReaders',
'SMC_RawLine',
'SMC_SetRawLine',
'BfWriteBool',
'BfWriteByte',
'BfWriteChar',
'BfWriteShort',
'BfWriteWord',
'BfWriteNum',
'BfWriteFloat',
'BfWriteString',
'BfWriteEntity',
'BfWriteAngle',
'BfWriteCoord',
'BfWriteVecCoord',
'BfWriteVecNormal',
'BfWriteAngles',
'BfReadBool',
'BfReadByte',
'BfReadChar',
'BfReadShort',
'BfReadWord',
'BfReadNum',
'BfReadFloat',
'BfReadString',
'BfReadEntity',
'BfReadAngle',
'BfReadCoord',
'BfReadVecCoord',
'BfReadVecNormal',
'BfReadAngles',
'BfGetNumBytesLeft',
'CreateProfiler',
'StartProfiling',
'StopProfiling',
'GetProfilerTime',
'OnPluginStart',
'AskPluginLoad2',
'OnPluginEnd',
'OnPluginPauseChange',
'OnGameFrame',
'OnMapStart',
'OnMapEnd',
'OnConfigsExecuted',
'OnAutoConfigsBuffered',
'OnAllPluginsLoaded',
'GetMyHandle',
'GetPluginIterator',
'MorePlugins',
'ReadPlugin',
'GetPluginStatus',
'GetPluginFilename',
'IsPluginDebugging',
'GetPluginInfo',
'FindPluginByNumber',
'SetFailState',
'ThrowError',
'GetTime',
'FormatTime',
'LoadGameConfigFile',
'GameConfGetOffset',
'GameConfGetKeyValue',
'GameConfGetAddress',
'GetSysTickCount',
'AutoExecConfig',
'RegPluginLibrary',
'LibraryExists',
'GetExtensionFileStatus',
'OnLibraryAdded',
'OnLibraryRemoved',
'ReadMapList',
'SetMapListCompatBind',
'OnClientFloodCheck',
'OnClientFloodResult',
'CanTestFeatures',
'GetFeatureStatus',
'RequireFeature',
'LoadFromAddress',
'StoreToAddress',
'CreateStack',
'PushStackCell',
'PushStackString',
'PushStackArray',
'PopStackCell',
'PopStackString',
'PopStackArray',
'IsStackEmpty',
'PopStack',
'OnPlayerRunCmd',
'BuildPath',
'OpenDirectory',
'ReadDirEntry',
'OpenFile',
'DeleteFile',
'ReadFileLine',
'ReadFile',
'ReadFileString',
'WriteFile',
'WriteFileString',
'WriteFileLine',
'ReadFileCell',
'WriteFileCell',
'IsEndOfFile',
'FileSeek',
'FilePosition',
'FileExists',
'RenameFile',
'DirExists',
'FileSize',
'FlushFile',
'RemoveDir',
'CreateDirectory',
'GetFileTime',
'LogToOpenFile',
'LogToOpenFileEx',
'PbReadInt',
'PbReadFloat',
'PbReadBool',
'PbReadString',
'PbReadColor',
'PbReadAngle',
'PbReadVector',
'PbReadVector2D',
'PbGetRepeatedFieldCount',
'PbSetInt',
'PbSetFloat',
'PbSetBool',
'PbSetString',
'PbSetColor',
'PbSetAngle',
'PbSetVector',
'PbSetVector2D',
'PbAddInt',
'PbAddFloat',
'PbAddBool',
'PbAddString',
'PbAddColor',
'PbAddAngle',
'PbAddVector',
'PbAddVector2D',
'PbRemoveRepeatedFieldValue',
'PbReadMessage',
'PbReadRepeatedMessage',
'PbAddMessage',
'SetNextMap',
'GetNextMap',
'ForceChangeLevel',
'GetMapHistorySize',
'GetMapHistory',
'GeoipCode2',
'GeoipCode3',
'GeoipCountry',
'MarkNativeAsOptional',
'RegClientCookie',
'FindClientCookie',
'SetClientCookie',
'GetClientCookie',
'SetAuthIdCookie',
'AreClientCookiesCached',
'OnClientCookiesCached',
'CookieMenuHandler',
'SetCookiePrefabMenu',
'SetCookieMenuItem',
'ShowCookieMenu',
'GetCookieIterator',
'ReadCookieIterator',
'GetCookieAccess',
'GetClientCookieTime',
'LoadTranslations',
'SetGlobalTransTarget',
'GetClientLanguage',
'GetServerLanguage',
'GetLanguageCount',
'GetLanguageInfo',
'SetClientLanguage',
'GetLanguageByCode',
'GetLanguageByName',
'CS_OnBuyCommand',
'CS_OnCSWeaponDrop',
'CS_OnGetWeaponPrice',
'CS_OnTerminateRound',
'CS_RespawnPlayer',
'CS_SwitchTeam',
'CS_DropWeapon',
'CS_TerminateRound',
'CS_GetTranslatedWeaponAlias',
'CS_GetWeaponPrice',
'CS_GetClientClanTag',
'CS_SetClientClanTag',
'CS_GetTeamScore',
'CS_SetTeamScore',
'CS_GetMVPCount',
'CS_SetMVPCount',
'CS_GetClientContributionScore',
'CS_SetClientContributionScore',
'CS_GetClientAssists',
'CS_SetClientAssists',
'CS_AliasToWeaponID',
'CS_WeaponIDToAlias',
'CS_IsValidWeaponID',
'CS_UpdateClientModel',
'LogToGame',
'SetRandomSeed',
'GetRandomFloat',
'GetRandomInt',
'IsMapValid',
'IsDedicatedServer',
'GetEngineTime',
'GetGameTime',
'GetGameTickCount',
'GetGameDescription',
'GetGameFolderName',
'GetCurrentMap',
'PrecacheModel',
'PrecacheSentenceFile',
'PrecacheDecal',
'PrecacheGeneric',
'IsModelPrecached',
'IsDecalPrecached',
'IsGenericPrecached',
'PrecacheSound',
'IsSoundPrecached',
'CreateDialog',
'GetEngineVersion',
'PrintToChat',
'PrintToChatAll',
'PrintCenterText',
'PrintCenterTextAll',
'PrintHintText',
'PrintHintTextToAll',
'ShowVGUIPanel',
'CreateHudSynchronizer',
'SetHudTextParams',
'SetHudTextParamsEx',
'ShowSyncHudText',
'ClearSyncHud',
'ShowHudText',
'ShowMOTDPanel',
'DisplayAskConnectBox',
'EntIndexToEntRef',
'EntRefToEntIndex',
'MakeCompatEntRef',
'SetClientViewEntity',
'SetLightStyle',
'GetClientEyePosition',
'CreateDataPack',
'WritePackCell',
'WritePackFloat',
'WritePackString',
'ReadPackCell',
'ReadPackFloat',
'ReadPackString',
'ResetPack',
'GetPackPosition',
'SetPackPosition',
'IsPackReadable',
'LogMessage',
'LogToFile',
'LogToFileEx',
'LogAction',
'LogError',
'OnLogAction',
'GameLogHook',
'AddGameLogHook',
'RemoveGameLogHook',
'FindTeamByName',
'StartPrepSDKCall',
'PrepSDKCall_SetVirtual',
'PrepSDKCall_SetSignature',
'PrepSDKCall_SetAddress',
'PrepSDKCall_SetFromConf',
'PrepSDKCall_SetReturnInfo',
'PrepSDKCall_AddParameter',
'EndPrepSDKCall',
'SDKCall',
'GetPlayerResourceEntity',
)
if __name__ == '__main__': # pragma: no cover
import re
import sys
try:
from urllib import FancyURLopener
except ImportError:
from urllib.request import FancyURLopener
from pygments.util import format_lines
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
class Opener(FancyURLopener):
version = 'Mozilla/5.0 (Pygments Sourcemod Builtins Update)'
opener = Opener()
def get_version():
f = opener.open('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+(?:-\w+)?)</td>')
for line in f:
m = r.search(line)
if m is not None:
return m.groups()[0]
raise ValueError('No version in api docs')
def get_sm_functions():
f = opener.open('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def regenerate(filename, natives):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('FUNCTIONS = (')]
footer = content[content.find("if __name__ == '__main__':")-1:]
with open(filename, 'w') as fp:
fp.write(header)
fp.write(format_lines('FUNCTIONS', natives))
fp.write(footer)
def run():
version = get_version()
print('> Downloading function index for SourceMod %s' % version)
functions = get_sm_functions()
print('> %d functions found:' % len(functions))
functionlist = []
for full_function_name in functions:
print('>> %s' % full_function_name)
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
run()
| |
#!/usr/bin/env python -W ignore::DeprecationWarning
'''
KPCA based feature engineering for MNIST handwritten digits classification with
combination of kernels in each layers
Author : Akhil P M
Kernel used : Arc-cosine Kernel, Gaussian Kernel, Polynomial kernel
'''
import kernel
from settings import *
from umkl_new import *
n=3000
n_kernels = 5
D = np.zeros((n,n))
M = np.zeros((n,n))
P = np.zeros((n,n))
matP = np.zeros((n_kernels, n_kernels))
vecQ = np.zeros((n_kernels,1))
gamma = 0.01
def compute_J(N, theta):
if N == 0:
return np.pi - theta
elif N == 1:
return np.sin(theta) + (np.pi - theta) * np.cos(theta)
elif N == 2:
return 3*np.sin(theta)*np.cos(theta) + (np.pi - theta)*(1 + 2*pow(np.cos(theta), 2))
elif N == 3:
return 4*pow(np.sin(theta), 3) + 15*np.sin(theta)*pow(np.cos(theta), 2) + \
(np.pi- theta)*(9*pow(np.sin(theta),2)*np.cos(theta) + 15*pow(np.cos(theta),3))
else:
return np.zeros(theta.shape)
def arc_cosine_vector(X, Y):
"""param = a vector of n(degree) values at each layer """
param = np.array([0, 3, 3])
no_of_layers = len(param)
M = np.dot(X, Y.T)
temp1 = np.diag(np.dot(X, X.T))
temp2 = np.diag(np.dot(Y, Y.T))
for i in xrange(no_of_layers):
norm_matrix = np.outer(temp1,temp2) #the matix of k_xx, and K_yy's
theta = np.arccos( np.maximum( np.minimum(M/np.sqrt(norm_matrix), 1.0), -1.0))
n_l = param[i]
M = np.multiply(np.power(norm_matrix, n_l/2.0), compute_J(n_l, theta)) / np.pi
if i < no_of_layers-1:
zero1 = np.zeros(len(temp1))
zero2 = np.zeros(len(temp2))
temp1 = np.multiply(np.power(temp1, n_l), compute_J(n_l, zero1)) / np.pi
temp2 = np.multiply(np.power(temp2, n_l), compute_J(n_l, zero2)) / np.pi
return M
def arc_cosine(X, Y):
lenX = X.shape[0]
incr = 1000
M = np.zeros((lenX, Y.shape[0]))
for i in range(0,lenX,incr):
M[i:i+incr] = arc_cosine_vector(X[i:i+incr], Y)
return M
def stratified_sampling(trainX, trainY):
""" stratified inputs for KPCA is extracted by this function """
no_of_classes = len(np.unique(trainY))
representers = np.zeros(no_of_classes)
no_of_reps = int(3000/no_of_classes)
kpcaX = np.zeros((3000, trainX.shape[1]))
count = 0
index = 0
for i in xrange(trainX.shape[0]):
label = trainY[i]
if representers[label] < no_of_reps:
kpcaX[index] = trainX[i]
index += 1
representers[label] += 1
if representers[label] == no_of_reps:
count += 1
if count == no_of_classes:
break
return kpcaX
def uncertainity_sampling(trainX, trainY):
""" sample most uncertain points using active learning techniques,
specifically using label propagation algorithm """
n_total_samples = len(trainY)
n_labeled_points = 100
unlabelled_indices = np.arange(n_total_samples)[n_labeled_points:]
y_train = np.copy(trainY)
y_train[unlabelled_indices] = -1
lp_model = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
lp_model.fit(trainX, y_train)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select 3000 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[-3000:]
print(uncertainty_index)
kpcaX = trainx[uncertainty_index]
print(kpcaX.shape)
return kpcaX
def strategic_sampling(trainX, trainY):
""" digits like 7,8,9,4 are more confusing to the learner than 1,2 etc. This strategy
aims to choose more points for denoising in case of highly confused group than least
confused one. """
need = np.array([260, 225, 280, 300, 350, 290, 285, 320, 330, 360])
def multi_KPCA(trainX, trainY, testX, testY, param, k_type):
""" KPCA using combination of kernels """
kpca = KernelPCA(kernel='precomputed')
#kpcaX = stratified_sampling(trainX, trainY) #trainX[0:3000]
kpcaX = trainX[0:3000]
kpcaY = trainY[0:3000]
#for i in range(10):
# print np.sum(kpcaY==i),
kpca_train = np.zeros((3000, 3000))
kernel_train = np.zeros((trainX.shape[0], 3000))
kernel_test = np.zeros((testX.shape[0], 3000))
#get the coefficients
mu = getUMKL_coefficients(trainX[:n], k_type, param)
print(mu)
kpca_train = getUMKL_gram_matrix(kpcaX, kpcaX, k_type, param, mu)
kernel_train = getUMKL_gram_matrix(trainX, kpcaX, k_type, param, mu)
kernel_test = getUMKL_gram_matrix(testX, kpcaX, k_type, param, mu)
kpca.fit(kpca_train)
trainX_kpca = kpca.transform(kernel_train)
testX_kpca = kpca.transform(kernel_test)
gc.collect()
get_individual_kernel_performance(kpcaX, trainX, trainY, testX, testY, k_type, param, mu)
return trainX_kpca, testX_kpca
def read_cmd_arguments(no_of_layers, no_of_kernels):
""" get parameters of each layer as cmd arguments"""
config = sys.argv[1]
param = genfromtxt(config, delimiter=',')
print(param)
k_type = genfromtxt('kernels.csv', delimiter=',')
return param, k_type
def main():
#ignore all warnings
warnings.filterwarnings("ignore")
#set the parameters
no_of_layers = 5
no_of_kernels = 7
kparam = np.array([0,3,3])
coeff = np.zeros((no_of_layers, no_of_kernels))
""" param = a vector of kernel parameter values at each layer """
param, k_type = read_cmd_arguments(no_of_layers, no_of_kernels)
#set the timer
start = time.time()
#load the data
trainX = np.load('trainX.npy')
testX = np.load('testX.npy')
trainY = np.load('trainY.npy')
testY = np.load('testY.npy')
#trainX = np.load('trainX_feat.npy')
#testX = np.load('testX_feat.npy')
#trainY = np.load('trainY_feat.npy')
#testY = np.load('testY_feat.npy')
print('\n!!! Data Loading Completed !!!\n')
#shuffle the training data
shuffle = np.random.permutation(trainX.shape[0])
trainX = trainX[shuffle]
trainY = trainY[shuffle]
selector = SelectPercentile(f_classif, percentile=5)
#extract the features using KPCA
for i in xrange(no_of_layers):
trainX_kpca, testX_kpca = multi_KPCA(trainX, trainY, testX, testY, param[i], k_type[i])
selector.fit(trainX_kpca, trainY)
trainX = selector.transform(trainX_kpca)
testX = selector.transform(testX_kpca)
print(trainX_kpca.shape)
print(trainX.shape)
np.save('trainX_feat'+str(i+1), trainX)
np.save('testX_feat'+str(i+1), testX)
parameters = {'n_neighbors' : list(np.arange(20)+1)}
clf = GridSearchCV(KNeighborsClassifier(weights='distance', n_jobs=-1), parameters)
clf.fit(trainX, trainY)
pred = clf.predict(testX)
print(accuracy_score(testY, pred))
print('============================ Layer %d Completed ============================' %(i+1))
print(testX.shape)
#save the new featurset for further exploration
np.save('trainX_feat', trainX)
np.save('testX_feat', testX)
np.save('trainY_feat', trainY)
np.save('testY_feat', testY)
#fit the svm model and compute accuaracy measure
parameters = {'n_neighbors' : list(np.arange(20)+1)}
clf = GridSearchCV(KNeighborsClassifier(weights='distance', n_jobs=-1), parameters)
#clf = svm.SVC(kernel=arc_cosine, cache_size=2048)
clf.fit(trainX, trainY)
pred = clf.predict(testX)
print(accuracy_score(testY, pred))
print(confusion_matrix(testY, pred))
#print(clf.best_params_)
print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
print('Test Time : %f Minutes\n' %((time.time()-start)/60))
print('completed time ' + str(datetime.now().hour) + ':' + str(datetime.now().minute))
if __name__ == '__main__':
main()
| |
#!/usr/bin/python3
#
# Copyright (c) 2015, VSHN AG, info@vshn.ch
# Licensed under "BSD 3-Clause". See LICENSE file.
#
# Authors:
# - Andre Keller <andre.keller@vshn.ch>
#
"""
MikroTik Router OS Python API
"""
import binascii
import hashlib
import logging
LOG = logging.getLogger(__name__)
class ApiError(Exception):
"""
Exception returned when API call fails.
(!trap event)
"""
pass
class ApiUnrecoverableError(Exception):
"""
Exception returned when API call fails in an unrecovarable manner.
(!fatal event)
"""
pass
class ApiRos:
"""
MikroTik Router OS Python API base class
For a basic understanding of this code, its important to read through
http://wiki.mikrotik.com/wiki/Manual:API.
Within MikroTik API 'words' and 'sentences' have a very specific meaning
"""
def __init__(self, sock):
"""
Initialize base class.
Args:
sock - Socket (should already be opened and connected)
"""
self.sock = sock
self.currenttag = 0
def login(self, username, password):
"""
Perform API login
Args:
username - Username used to login
password - Password used to login
"""
# request login
# Mikrotik answers with a challenge in the 'ret' attribute
# 'ret' attribute accessible as attrs['ret']
_, attrs = self.talk(["/login"])[0]
# Prepare response for challenge-response login
# response is MD5 of 0-char + plaintext-password + challange
response = hashlib.md5()
response.update(b'\x00')
response.update(password.encode('UTF-8'))
response.update(binascii.unhexlify((attrs['ret']).encode('UTF-8')))
response = "00" + binascii.hexlify(response.digest()).decode('UTF-8')
# send response & login request
self.talk(["/login",
"=name=%s" % username,
"=response=%s" % response])
def talk(self, words):
"""
Communicate with the API
Args:
words - List of API words to send
"""
if not words:
return
# Write sentence to API
self.write_sentence(words)
replies = []
# Wait for reply
while True:
# read sentence
sentence = self.read_sentence()
# empty sentences are ignored
if len(sentence) == 0:
continue
# extract first word from sentence.
# this indicates the type of reply:
# - !re
# Replay
# - !done
# Acknowledgement
# - !trap
# API Error
# - !fatal
# Unrecoverable API Error
reply = sentence.pop(0)
attrs = {}
# extract attributes from the words replied by the API
for word in sentence:
# try to determine if there is a second equal sign in the
# word.
try:
second_eq_pos = word.index('=', 1)
except IndexError:
attrs[word[1:]] = ''
else:
attrs[word[1:second_eq_pos]] = word[second_eq_pos + 1:]
replies.append((reply, attrs))
if reply == '!done':
if replies[0][0] == '!trap':
raise ApiError(replies[0][1])
if replies[0][0] == '!fatal':
self.sock.close()
raise ApiUnrecoverableError(replies[0][1])
return replies
def write_sentence(self, words):
"""
writes a sentence word by word to API socket.
Ensures sentence is terminated with a zero-length word.
Args:
words - List of API words to send
"""
for word in words:
self.write_word(word)
# write zero-length word to indicate end of sentence.
self.write_word('')
def read_sentence(self):
"""
reads sentence word by word from API socket.
API uses zero-length word to terminate sentence, so words are read
until zero-length word is received.
Returns:
words - List of API words read from socket
"""
words = []
while True:
word = self.read_word()
if not word:
return words
words.append(word)
def write_word(self, word):
"""
writes word to API socket
The MikroTik API expects the length of the word to be sent over the
wire using a special encoding followed by the word itself.
See http://wiki.mikrotik.com/wiki/Manual:API#API_words for details.
Args:
word
"""
length = len(word)
LOG.debug("<<< %s", word)
# word length < 128
if length < 0x80:
self.write_sock(chr(length))
# word length < 16384
elif length < 0x4000:
length |= 0x8000
self.write_sock(chr((length >> 8) & 0xFF))
self.write_sock(chr(length & 0xFF))
# word length < 2097152
elif length < 0x200000:
length |= 0xC00000
self.write_sock(chr((length >> 16) & 0xFF))
self.write_sock(chr((length >> 8) & 0xFF))
self.write_sock(chr(length & 0xFF))
# word length < 268435456
elif length < 0x10000000:
length |= 0xE0000000
self.write_sock(chr((length >> 24) & 0xFF))
self.write_sock(chr((length >> 16) & 0xFF))
self.write_sock(chr((length >> 8) & 0xFF))
self.write_sock(chr(length & 0xFF))
# word length < 549755813888
elif length < 0x8000000000:
self.write_sock(chr(0xF0))
self.write_sock(chr((length >> 24) & 0xFF))
self.write_sock(chr((length >> 16) & 0xFF))
self.write_sock(chr((length >> 8) & 0xFF))
self.write_sock(chr(length & 0xFF))
else:
raise ApiUnrecoverableError("word-length exceeded")
self.write_sock(word)
def read_word(self):
"""
read word from API socket
The MikroTik API sends the length of the word to be received over the
wire using a special encoding followed by the word itself.
This function will first determine the length, and then read the
word from the socket.
See http://wiki.mikrotik.com/wiki/Manual:API#API_words for details.
"""
# value of first byte determines how many bytes the encoded length
# of the words will have.
# we read the first char from the socket and determine its ASCII code.
# (ASCII code is used to encode the length. Char "a" == 65 f.e.
length = ord(self.read_sock(1))
# if most significant bit is 0
# -> length < 128, no additional bytes need to be read
if (length & 0x80) == 0x00:
pass
# if the two most significant bits are 10
# -> length is >= 128, but < 16384
elif (length & 0xC0) == 0x80:
# unmask and shift the second lowest byte
length &= ~0xC0
length <<= 8
# read the lowest byte
length += ord(self.read_sock(1))
# if the three most significant bits are 110
# -> length is >= 16384, but < 2097152
elif (length & 0xE0) == 0xC0:
# unmask and shift the third lowest byte
length &= ~0xE0
length <<= 8
# read and shift second lowest byte
length += ord(self.read_sock(1))
length <<= 8
# read lowest byte
length += ord(self.read_sock(1))
# if the four most significant bits are 1110
# length is >= 2097152, but < 268435456
elif (length & 0xF0) == 0xE0:
# unmask and shift the fourth lowest byte
length &= ~0xF0
length <<= 8
# read and shift third lowest byte
length += ord(self.read_sock(1))
length <<= 8
# read and shift second lowest byte
length += ord(self.read_sock(1))
length <<= 8
# read lowest byte
length += ord(self.read_sock(1))
# if the five most significant bits are 11110
# length is >= 268435456, but < 4294967296
elif (length & 0xF8) == 0xF0:
# read and shift fourth lowest byte
length = ord(self.read_sock(1))
length <<= 8
# read and shift third lowest byte
length += ord(self.read_sock(1))
length <<= 8
# read and shift second lowest byte
length += ord(self.read_sock(1))
length <<= 8
# read lowest byte
length += ord(self.read_sock(1))
else:
raise ApiUnrecoverableError("unknown control byte received")
# read actual word from socket, using length determined above
ret = self.read_sock(length)
LOG.debug(">>> %s", ret)
return ret
def write_sock(self, string):
"""
write string to API socket
Args:
string - String to send
"""
try:
self.sock.sendall(bytes(string, 'latin-1'))
except OSError as exc:
raise ApiUnrecoverableError("could not send to socket") from exc
def read_sock(self, length):
"""
read string with specified length from API socket
Args:
length - Number of chars to read from socket
Returns:
string - String as read from socket
"""
string = ''
while len(string) < length:
# read data from socket with a maximum buffer size of 4k
chunk = self.sock.recv(min(length - len(string), 4096))
if not chunk:
raise ApiUnrecoverableError("could not read from socket")
string = string + chunk.decode('UTF-8', 'replace')
return string
| |
"""Support for Apple HomeKit."""
import asyncio
import ipaddress
import logging
import os
from aiohttp import web
from pyhap.const import STANDALONE_AID
import voluptuous as vol
from homeassistant.components import zeroconf
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OCCUPANCY,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier import DOMAIN as HUMIDIFIER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_ENTITY_ID,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PORT,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
)
from homeassistant.core import CoreState, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, Unauthorized
from homeassistant.helpers import device_registry, entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import BASE_FILTER_SCHEMA, FILTER_SCHEMA
from homeassistant.helpers.reload import async_integration_yaml_config
from homeassistant.loader import IntegrationNotFound, async_get_integration
from homeassistant.util import get_local_ip
from .accessories import get_accessory
from .aidmanager import AccessoryAidStorage
from .const import (
AID_STORAGE,
ATTR_INTERGRATION,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
BRIDGE_NAME,
BRIDGE_SERIAL_NUMBER,
CONF_ADVERTISE_IP,
CONF_AUTO_START,
CONF_ENTITY_CONFIG,
CONF_ENTRY_INDEX,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_SAFE_MODE,
CONF_ZEROCONF_DEFAULT_INTERFACE,
CONFIG_OPTIONS,
DEFAULT_AUTO_START,
DEFAULT_HOMEKIT_MODE,
DEFAULT_PORT,
DEFAULT_SAFE_MODE,
DOMAIN,
HOMEKIT,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODES,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
MANUFACTURER,
SERVICE_HOMEKIT_RESET_ACCESSORY,
SERVICE_HOMEKIT_START,
SHUTDOWN_TIMEOUT,
UNDO_UPDATE_LISTENER,
)
from .util import (
dismiss_setup_message,
get_persist_fullpath_for_entry_id,
migrate_filesystem_state_data_for_primary_imported_entry_id,
port_is_available,
remove_state_files_for_entry_id,
show_setup_message,
validate_entity_config,
)
_LOGGER = logging.getLogger(__name__)
MAX_DEVICES = 150
# #### Driver Status ####
STATUS_READY = 0
STATUS_RUNNING = 1
STATUS_STOPPED = 2
STATUS_WAIT = 3
def _has_all_unique_names_and_ports(bridges):
"""Validate that each homekit bridge configured has a unique name."""
names = [bridge[CONF_NAME] for bridge in bridges]
ports = [bridge[CONF_PORT] for bridge in bridges]
vol.Schema(vol.Unique())(names)
vol.Schema(vol.Unique())(ports)
return bridges
BRIDGE_SCHEMA = vol.All(
cv.deprecated(CONF_ZEROCONF_DEFAULT_INTERFACE),
vol.Schema(
{
vol.Optional(CONF_HOMEKIT_MODE, default=DEFAULT_HOMEKIT_MODE): vol.In(
HOMEKIT_MODES
),
vol.Optional(CONF_NAME, default=BRIDGE_NAME): vol.All(
cv.string, vol.Length(min=3, max=25)
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_ADVERTISE_IP): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_AUTO_START, default=DEFAULT_AUTO_START): cv.boolean,
vol.Optional(CONF_SAFE_MODE, default=DEFAULT_SAFE_MODE): cv.boolean,
vol.Optional(CONF_FILTER, default={}): BASE_FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG, default={}): validate_entity_config,
vol.Optional(CONF_ZEROCONF_DEFAULT_INTERFACE): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
),
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [BRIDGE_SCHEMA], _has_all_unique_names_and_ports)},
extra=vol.ALLOW_EXTRA,
)
RESET_ACCESSORY_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids}
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the HomeKit from yaml."""
hass.data.setdefault(DOMAIN, {})
_async_register_events_and_services(hass)
if DOMAIN not in config:
return True
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = {entry.data[CONF_NAME]: entry for entry in current_entries}
for index, conf in enumerate(config[DOMAIN]):
if _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
continue
conf[CONF_ENTRY_INDEX] = index
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
@callback
def _async_update_config_entry_if_from_yaml(hass, entries_by_name, conf):
"""Update a config entry with the latest yaml.
Returns True if a matching config entry was found
Returns False if there is no matching config entry
"""
bridge_name = conf[CONF_NAME]
if (
bridge_name in entries_by_name
and entries_by_name[bridge_name].source == SOURCE_IMPORT
):
entry = entries_by_name[bridge_name]
# If they alter the yaml config we import the changes
# since there currently is no practical way to support
# all the options in the UI at this time.
data = conf.copy()
options = {}
for key in CONFIG_OPTIONS:
options[key] = data[key]
del data[key]
hass.config_entries.async_update_entry(entry, data=data, options=options)
return True
return False
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up HomeKit from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
conf = entry.data
options = entry.options
name = conf[CONF_NAME]
port = conf[CONF_PORT]
_LOGGER.debug("Begin setup HomeKit for %s", name)
# If the previous instance hasn't cleaned up yet
# we need to wait a bit
if not await hass.async_add_executor_job(port_is_available, port):
_LOGGER.warning("The local port %s is in use", port)
raise ConfigEntryNotReady
if CONF_ENTRY_INDEX in conf and conf[CONF_ENTRY_INDEX] == 0:
_LOGGER.debug("Migrating legacy HomeKit data for %s", name)
hass.async_add_executor_job(
migrate_filesystem_state_data_for_primary_imported_entry_id,
hass,
entry.entry_id,
)
aid_storage = AccessoryAidStorage(hass, entry.entry_id)
await aid_storage.async_initialize()
# ip_address and advertise_ip are yaml only
ip_address = conf.get(CONF_IP_ADDRESS)
advertise_ip = conf.get(CONF_ADVERTISE_IP)
homekit_mode = options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
entity_config = options.get(CONF_ENTITY_CONFIG, {}).copy()
auto_start = options.get(CONF_AUTO_START, DEFAULT_AUTO_START)
safe_mode = options.get(CONF_SAFE_MODE, DEFAULT_SAFE_MODE)
entity_filter = FILTER_SCHEMA(options.get(CONF_FILTER, {}))
homekit = HomeKit(
hass,
name,
port,
ip_address,
entity_filter,
entity_config,
safe_mode,
homekit_mode,
advertise_ip,
entry.entry_id,
)
zeroconf_instance = await zeroconf.async_get_instance(hass)
await hass.async_add_executor_job(homekit.setup, zeroconf_instance)
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
AID_STORAGE: aid_storage,
HOMEKIT: homekit,
UNDO_UPDATE_LISTENER: undo_listener,
}
if hass.state == CoreState.running:
await homekit.async_start()
elif auto_start:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, homekit.async_start)
return True
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
if entry.source == SOURCE_IMPORT:
return
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
dismiss_setup_message(hass, entry.entry_id)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
homekit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
if homekit.status == STATUS_RUNNING:
await homekit.async_stop()
for _ in range(0, SHUTDOWN_TIMEOUT):
if not await hass.async_add_executor_job(
port_is_available, entry.data[CONF_PORT]
):
_LOGGER.info("Waiting for the HomeKit server to shutdown")
await asyncio.sleep(1)
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Remove a config entry."""
return await hass.async_add_executor_job(
remove_state_files_for_entry_id, hass, entry.entry_id
)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
data = dict(entry.data)
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
del data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
@callback
def _async_register_events_and_services(hass: HomeAssistant):
"""Register events and services for HomeKit."""
hass.http.register_view(HomeKitPairingQRView)
def handle_homekit_reset_accessory(service):
"""Handle start HomeKit service call."""
for entry_id in hass.data[DOMAIN]:
if HOMEKIT not in hass.data[DOMAIN][entry_id]:
continue
homekit = hass.data[DOMAIN][entry_id][HOMEKIT]
if homekit.status != STATUS_RUNNING:
_LOGGER.warning(
"HomeKit is not running. Either it is waiting to be "
"started or has been stopped"
)
continue
entity_ids = service.data.get("entity_id")
homekit.reset_accessories(entity_ids)
hass.services.async_register(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
handle_homekit_reset_accessory,
schema=RESET_ACCESSORY_SERVICE_SCHEMA,
)
async def async_handle_homekit_service_start(service):
"""Handle start HomeKit service call."""
for entry_id in hass.data[DOMAIN]:
if HOMEKIT not in hass.data[DOMAIN][entry_id]:
continue
homekit = hass.data[DOMAIN][entry_id][HOMEKIT]
if homekit.status == STATUS_RUNNING:
_LOGGER.debug("HomeKit is already running")
continue
if homekit.status != STATUS_READY:
_LOGGER.warning(
"HomeKit is not ready. Either it is already starting up or has "
"been stopped"
)
continue
await homekit.async_start()
hass.services.async_register(
DOMAIN, SERVICE_HOMEKIT_START, async_handle_homekit_service_start
)
async def _handle_homekit_reload(service):
"""Handle start HomeKit service call."""
config = await async_integration_yaml_config(hass, DOMAIN)
if not config or DOMAIN not in config:
return
current_entries = hass.config_entries.async_entries(DOMAIN)
entries_by_name = {entry.data[CONF_NAME]: entry for entry in current_entries}
for conf in config[DOMAIN]:
_async_update_config_entry_if_from_yaml(hass, entries_by_name, conf)
reload_tasks = [
hass.config_entries.async_reload(entry.entry_id)
for entry in current_entries
]
await asyncio.gather(*reload_tasks)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_RELOAD,
_handle_homekit_reload,
)
class HomeKit:
"""Class to handle all actions between HomeKit and Home Assistant."""
def __init__(
self,
hass,
name,
port,
ip_address,
entity_filter,
entity_config,
safe_mode,
homekit_mode,
advertise_ip=None,
entry_id=None,
):
"""Initialize a HomeKit object."""
self.hass = hass
self._name = name
self._port = port
self._ip_address = ip_address
self._filter = entity_filter
self._config = entity_config
self._safe_mode = safe_mode
self._advertise_ip = advertise_ip
self._entry_id = entry_id
self._homekit_mode = homekit_mode
self.status = STATUS_READY
self.bridge = None
self.driver = None
def setup(self, zeroconf_instance):
"""Set up bridge and accessory driver."""
# pylint: disable=import-outside-toplevel
from .accessories import HomeDriver
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.async_stop)
ip_addr = self._ip_address or get_local_ip()
persist_file = get_persist_fullpath_for_entry_id(self.hass, self._entry_id)
self.driver = HomeDriver(
self.hass,
self._entry_id,
self._name,
loop=self.hass.loop,
address=ip_addr,
port=self._port,
persist_file=persist_file,
advertised_address=self._advertise_ip,
zeroconf_instance=zeroconf_instance,
)
# If we do not load the mac address will be wrong
# as pyhap uses a random one until state is restored
if os.path.exists(persist_file):
self.driver.load()
else:
self.driver.persist()
if self._safe_mode:
_LOGGER.debug("Safe_mode selected for %s", self._name)
self.driver.safe_mode = True
def reset_accessories(self, entity_ids):
"""Reset the accessory to load the latest configuration."""
if not self.bridge:
self.driver.config_changed()
return
aid_storage = self.hass.data[DOMAIN][self._entry_id][AID_STORAGE]
removed = []
for entity_id in entity_ids:
aid = aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
if aid not in self.bridge.accessories:
continue
_LOGGER.info(
"HomeKit Bridge %s will reset accessory with linked entity_id %s",
self._name,
entity_id,
)
acc = self.remove_bridge_accessory(aid)
removed.append(acc)
if not removed:
# No matched accessories, probably on another bridge
return
self.driver.config_changed()
for acc in removed:
self.bridge.add_accessory(acc)
self.driver.config_changed()
def add_bridge_accessory(self, state):
"""Try adding accessory to bridge if configured beforehand."""
if not self._filter(state.entity_id):
return
# The bridge itself counts as an accessory
if len(self.bridge.accessories) + 1 >= MAX_DEVICES:
_LOGGER.warning(
"Cannot add %s as this would exceeded the %d device limit. Consider using the filter option",
state.entity_id,
MAX_DEVICES,
)
return
aid = self.hass.data[DOMAIN][self._entry_id][
AID_STORAGE
].get_or_allocate_aid_for_entity_id(state.entity_id)
conf = self._config.pop(state.entity_id, {})
# If an accessory cannot be created or added due to an exception
# of any kind (usually in pyhap) it should not prevent
# the rest of the accessories from being created
try:
acc = get_accessory(self.hass, self.driver, state, aid, conf)
if acc is not None:
self.bridge.add_accessory(acc)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to create a HomeKit accessory for %s", state.entity_id
)
def remove_bridge_accessory(self, aid):
"""Try adding accessory to bridge if configured beforehand."""
acc = None
if aid in self.bridge.accessories:
acc = self.bridge.accessories.pop(aid)
return acc
async def async_start(self, *args):
"""Start the accessory driver."""
if self.status != STATUS_READY:
return
self.status = STATUS_WAIT
ent_reg = await entity_registry.async_get_registry(self.hass)
dev_reg = await device_registry.async_get_registry(self.hass)
device_lookup = ent_reg.async_get_device_class_lookup(
{
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_BATTERY_CHARGING),
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_MOTION),
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_OCCUPANCY),
(SENSOR_DOMAIN, DEVICE_CLASS_BATTERY),
(SENSOR_DOMAIN, DEVICE_CLASS_HUMIDITY),
}
)
bridged_states = []
for state in self.hass.states.async_all():
if not self._filter(state.entity_id):
continue
ent_reg_ent = ent_reg.async_get(state.entity_id)
if ent_reg_ent:
await self._async_set_device_info_attributes(
ent_reg_ent, dev_reg, state.entity_id
)
self._async_configure_linked_sensors(ent_reg_ent, device_lookup, state)
bridged_states.append(state)
self._async_register_bridge(dev_reg)
await self.hass.async_add_executor_job(self._start, bridged_states)
_LOGGER.debug("Driver start for %s", self._name)
self.hass.add_job(self.driver.start_service)
self.status = STATUS_RUNNING
@callback
def _async_register_bridge(self, dev_reg):
"""Register the bridge as a device so homekit_controller and exclude it from discovery."""
formatted_mac = device_registry.format_mac(self.driver.state.mac)
# Connections and identifiers are both used here.
#
# connections exists so homekit_controller can know the
# virtual mac address of the bridge and know to not offer
# it via discovery.
#
# identifiers is used as well since the virtual mac may change
# because it will not survive manual pairing resets (deleting state file)
# which we have trained users to do over the past few years
# because this was the way you had to fix homekit when pairing
# failed.
#
connection = (device_registry.CONNECTION_NETWORK_MAC, formatted_mac)
identifier = (DOMAIN, self._entry_id, BRIDGE_SERIAL_NUMBER)
self._async_purge_old_bridges(dev_reg, identifier, connection)
dev_reg.async_get_or_create(
config_entry_id=self._entry_id,
identifiers={identifier},
connections={connection},
manufacturer=MANUFACTURER,
name=self._name,
model="Home Assistant HomeKit Bridge",
)
@callback
def _async_purge_old_bridges(self, dev_reg, identifier, connection):
"""Purge bridges that exist from failed pairing or manual resets."""
devices_to_purge = []
for entry in dev_reg.devices.values():
if self._entry_id in entry.config_entries and (
identifier not in entry.identifiers
or connection not in entry.connections
):
devices_to_purge.append(entry.id)
for device_id in devices_to_purge:
dev_reg.async_remove_device(device_id)
def _start(self, bridged_states):
# pylint: disable=unused-import, import-outside-toplevel
from . import ( # noqa: F401
type_cameras,
type_covers,
type_fans,
type_humidifiers,
type_lights,
type_locks,
type_media_players,
type_security_systems,
type_sensors,
type_switches,
type_thermostats,
)
if self._homekit_mode == HOMEKIT_MODE_ACCESSORY:
state = bridged_states[0]
conf = self._config.pop(state.entity_id, {})
acc = get_accessory(self.hass, self.driver, state, STANDALONE_AID, conf)
self.driver.add_accessory(acc)
else:
from .accessories import HomeBridge
self.bridge = HomeBridge(self.hass, self.driver, self._name)
for state in bridged_states:
self.add_bridge_accessory(state)
self.driver.add_accessory(self.bridge)
if not self.driver.state.paired:
show_setup_message(
self.hass,
self._entry_id,
self._name,
self.driver.state.pincode,
self.driver.accessory.xhm_uri(),
)
async def async_stop(self, *args):
"""Stop the accessory driver."""
if self.status != STATUS_RUNNING:
return
self.status = STATUS_STOPPED
_LOGGER.debug("Driver stop for %s", self._name)
await self.driver.async_stop()
if self.bridge:
for acc in self.bridge.accessories.values():
acc.async_stop()
else:
self.driver.accessory.async_stop()
@callback
def _async_configure_linked_sensors(self, ent_reg_ent, device_lookup, state):
if (
ent_reg_ent is None
or ent_reg_ent.device_id is None
or ent_reg_ent.device_id not in device_lookup
or ent_reg_ent.device_class
in (DEVICE_CLASS_BATTERY_CHARGING, DEVICE_CLASS_BATTERY)
):
return
if ATTR_BATTERY_CHARGING not in state.attributes:
battery_charging_binary_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((BINARY_SENSOR_DOMAIN, DEVICE_CLASS_BATTERY_CHARGING))
if battery_charging_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_CHARGING_SENSOR,
battery_charging_binary_sensor_entity_id,
)
if ATTR_BATTERY_LEVEL not in state.attributes:
battery_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(SENSOR_DOMAIN, DEVICE_CLASS_BATTERY)
)
if battery_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_BATTERY_SENSOR, battery_sensor_entity_id
)
if state.entity_id.startswith(f"{CAMERA_DOMAIN}."):
motion_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_MOTION)
)
if motion_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_MOTION_SENSOR,
motion_binary_sensor_entity_id,
)
doorbell_binary_sensor_entity_id = device_lookup[ent_reg_ent.device_id].get(
(BINARY_SENSOR_DOMAIN, DEVICE_CLASS_OCCUPANCY)
)
if doorbell_binary_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_DOORBELL_SENSOR,
doorbell_binary_sensor_entity_id,
)
if state.entity_id.startswith(f"{HUMIDIFIER_DOMAIN}."):
current_humidity_sensor_entity_id = device_lookup[
ent_reg_ent.device_id
].get((SENSOR_DOMAIN, DEVICE_CLASS_HUMIDITY))
if current_humidity_sensor_entity_id:
self._config.setdefault(state.entity_id, {}).setdefault(
CONF_LINKED_HUMIDITY_SENSOR,
current_humidity_sensor_entity_id,
)
async def _async_set_device_info_attributes(self, ent_reg_ent, dev_reg, entity_id):
"""Set attributes that will be used for homekit device info."""
ent_cfg = self._config.setdefault(entity_id, {})
if ent_reg_ent.device_id:
dev_reg_ent = dev_reg.async_get(ent_reg_ent.device_id)
if dev_reg_ent is not None:
# Handle missing devices
if dev_reg_ent.manufacturer:
ent_cfg[ATTR_MANUFACTURER] = dev_reg_ent.manufacturer
if dev_reg_ent.model:
ent_cfg[ATTR_MODEL] = dev_reg_ent.model
if dev_reg_ent.sw_version:
ent_cfg[ATTR_SOFTWARE_VERSION] = dev_reg_ent.sw_version
if ATTR_MANUFACTURER not in ent_cfg:
try:
integration = await async_get_integration(
self.hass, ent_reg_ent.platform
)
ent_cfg[ATTR_INTERGRATION] = integration.name
except IntegrationNotFound:
ent_cfg[ATTR_INTERGRATION] = ent_reg_ent.platform
class HomeKitPairingQRView(HomeAssistantView):
"""Display the homekit pairing code at a protected url."""
url = "/api/homekit/pairingqr"
name = "api:homekit:pairingqr"
requires_auth = False
async def get(self, request):
"""Retrieve the pairing QRCode image."""
if not request.query_string:
raise Unauthorized()
entry_id, secret = request.query_string.split("-")
if (
entry_id not in request.app["hass"].data[DOMAIN]
or secret
!= request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET]
):
raise Unauthorized()
return web.Response(
body=request.app["hass"].data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR],
content_type="image/svg+xml",
)
| |
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView, FormView, ListView
from django.views.generic.edit import ProcessFormView, FormMixin
from django import forms
#from django.http import HttpResponseRedirect, HttpResponse
#from django.core.context_processors import csrf
#from django.template.context import RequestContext# Context
from django.utils.translation import ugettext_lazy as _
#from django.db.models import Q
from Kraggne.models import MenuItem
from Kraggne.contrib.flatblocks.utils import GetTemplatesPath
from django.http import Http404
from django.core.paginator import InvalidPage
def addSelfToContext(slug,context):
try:
page = MenuItem.objects.get(slug=slug)
context['page'] = page
for u in page.pagevar_set.all():
u.addToContext(context)
except:
pass
class GenericViewContextMixinSlug(object):
slug = ''
def get_context_data(self, **kwargs):
context = super(GenericViewContextMixinSlug, self).get_context_data(**kwargs)
addSelfToContext(self.slug,context)
return context
class GenericViewContextMixin(GenericViewContextMixinSlug):
def get_context_data(self, **kwargs):
context = super(GenericViewContextMixin, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
else:
return context
for u in page.pagevar_set.all():
u.addToContext(context)
try:
context.pop('params')
except:
pass
return context
class GenericView(GenericViewContextMixin,TemplateView):
template_name = "Kraggne/genericPage.html"
#def __init__(self,page):
# self.test = page.url
class GenericDetailView(GenericView):
template_name = "Kraggne/genericDetailPage.html"
model = None
def get_for_object(self,**kwargs):
obj = None
if hasattr(self.model,'get_object_from_url'):
obj = self.model.get_object_from_url(**kwargs)
#by pk
pk = kwargs.get('pk')
if not obj and pk:
r =self.model.objects.filter(pk=pk)
obj = r and r[0] or None
#by slug
if not obj:
slug = kwargs.get('slug')
if slug:
r =self.model.objects.filter(slug=slug)
obj = r and r[0] or None
return obj
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/detail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_context_data(self, **kwargs):
context = super(GenericDetailView, self).get_context_data(**kwargs)
context['object'] = self.get_for_object(**kwargs)
try:
context["object_model_name"] = "%s.%s" % (self.model._meta.app_label.lower(),self.model._meta.object_name.lower())
except:
pass
return context
from django.http import HttpResponseRedirect
class GenericFormView(GenericViewContextMixin,FormView):
template_name = "Kraggne/genericFormPage.html"
#success_url = None
#def __init__(self,*args,**kwargs):
# print args
# print kwargs
# super(GenericFormView,self).__init__(*args,**kwargs)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
if hasattr(form,"request"):
form.request = self.request
return form
def post(self,request,*args,**kwarg):
try:
self.page = kwarg.pop('page')
except:
pass
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form,**kwarg)
else:
return self.form_invalid(form,**kwarg)
def get_success_url(self):
if self.success_url:
return self.success_url
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return ""
def get_context_data(self, **kwargs):
context = super(GenericFormView, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
else:
page = MenuItem.objects.get(slug=self.slug)
#self.success_url = page.formblock.url or page.url
if page.url[-1] != "/":
context['action_url'] = page.url + "/"
else:
context['action_url'] = page.url
return context
def get_form_kwargs(self):
kwargs = FormMixin.get_form_kwargs(self)
if hasattr(self,'object'):
kwargs.update({'instance': self.object})
return kwargs
def form_valid(self,form):
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
form.save_m2m()
return FormMixin.form_valid(self,form)
class GenericListView(GenericViewContextMixin,ListView):
template_name = "Kraggne/genericListPage.html"
paginate_by = 10
def get_context_data(self, **kwargs):
context = super(GenericListView, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
try:
context["object_model_name"] = "%s.%s" % (self.model._meta.app_label.lower(),self.model._meta.object_name.lower())
except:
pass
return context
def paginate_queryset(self, queryset, page_size):
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page = self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_(u"Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage:
raise Http404(_(u'Invalid page (%(page_number)s)') % {
'page_number': page_number
})
def get_paginate_by(self, queryset):
if hasattr(self.model, 'paginate_by'):
return self.model.paginate_by
return self.paginate_by
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/list.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
class GenericListFormView(GenericListView,FormMixin,ProcessFormView):
template_name = "Kraggne/genericListFormPage.html"
def get_context_data(self,form=None,**kwargs):
context = GenericListView.get_context_data(self,**kwargs)
if not form:
form_class = self.get_form_class()
form = self.get_form(form_class)
context["form"] = form
page = context["page"]
if page.url[-1] != "/":
context['action_url'] = page.url + "/"
else:
context['action_url'] = page.url
return context
def post(self,request,*args,**kwarg):
self.page = kwarg.pop('page')
return ProcessFormView.post(self,request,*args,**kwarg)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/formlist.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append("%s/%s/list.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_success_url(self):
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return None
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = FormMixin.get_form_kwargs(self)
if hasattr(self,'object'):
kwargs.update({'instance': self.object})
return kwargs
def form_valid(self,form):
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
form.save_m2m()
#if hasattr(self.object,'save_model'):
# self.object.save_model(self.request,form,False):
return FormMixin.form_valid(self,form)
def form_invalid(self,form):
self.object_list = self.get_queryset()
return self.render_to_response(self.get_context_data(form=form,object_list=self.object_list))
class GenericDetailFormView(GenericDetailView,FormMixin,ProcessFormView):
template_name = "Kraggne/genericDetailFormPage.html"
def get_context_data(self,form=None,**kwargs):
context = GenericDetailView.get_context_data(self,**kwargs)
if not form:
form_class = self.get_form_class()
form = self.get_form(form_class)
context["form"] = form
context['action_url'] = ""
return context
def post(self,request,*args,**kwarg):
self.page = kwarg.pop('page')
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form,**kwarg)
else:
return self.form_invalid(form,**kwarg)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/formdetail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append("%s/%s/detail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_success_url(self):
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return None
# def get_form_kwargs(self):
# """
# Returns the keyword arguments for instanciating the form.
# """
# kwargs = FormMixin.get_form_kwargs(self)
# if hasattr(self,'object'):
# kwargs.update({'instance': self.object})
# return kwargs
def form_valid(self,form,**kwargs):
cur_obj = self.get_for_object(**kwargs)
form.current_object = cur_obj
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
form.save_m2m()
#if hasattr(self.object,'save_model'):
# self.object.save_model(self.request,form,False):
return FormMixin.form_valid(self,form)
def form_invalid(self,form,**kwargs):
return self.render_to_response(self.get_context_data(**kwargs))
#from django.shortcuts import render_to_response
#def Generic(request,*args,**kwargs)
#form.current_object = cur_obj:
# return ''
| |
"""Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if type=='bool':
getVal=self.getboolean
elif type=='int':
getVal=self.getint
else:
getVal=self.get
if self.has_option(section,option):
#return getVal(section, option, raw, vars, default)
return getVal(section, option)
else:
return default
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
cfgFile=open(self.file,'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir='.idlerc'
userDir=os.path.expanduser('~')
if userDir != '~': #'HOME' exists as a key in os.environ
if not os.path.exists(userDir):
warn=('\n Warning: HOME environment variable points to\n '+
userDir+'\n but the path does not exist.\n')
sys.stderr.write(warn)
userDir='~'
if userDir=='~': #we still don't have a home directory
#traditionally idle has defaulted to os.getcwd(), is this adeqate?
userDir = os.getcwd() #hack for no real homedir
userDir=os.path.join(userDir,cfgDir)
if not os.path.exists(userDir):
try: #make the config dir if it doesn't exist yet
os.mkdir(userDir)
except IOError:
warn=('\n Warning: unable to create user config directory\n '+
userDir+'\n')
sys.stderr.write(warn)
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned, and warn_on_default is True, a warning is
printed to stderr.
"""
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option, type=type)
elif self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option, type=type)
else: #returning default, print warning
if warn_on_default:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' problem retrieving configration option %r\n'
' from section %r.\n'
' returning default value: %r\n' %
(option, section, default))
sys.stderr.write(warning)
return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
"""
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType, 'Invalid configType specified'
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet, 'Invalid configSet specified'
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg, 'Invalid fgBg specified'
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme, 'Invalid theme type specified'
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme.keys():
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default value: %r\n' %
(element, themeName, theme[element]))
sys.stderr.write(warning)
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
"""
Gets a list of all idle extensions declared in the config files.
active_only - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if active_only:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
#the extension is enabled
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith('_bindings') or name.endswith('_cfgBindings'):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn).keys():
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
return self.GetKeySet(self.CurrentKeys())
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys.keys():
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>']
}
if keySetName:
for event in keyBindings.keys():
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
' -\n problem retrieving key binding for event %r'
'\n from key set %r.\n'
' returning default value: %r\n' %
(event, keySetName, keyBindings[event]))
sys.stderr.write(warning)
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet, 'Invalid configSet specified'
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=string.split(value,';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(self.__helpsort)
return helpSources
def __helpsort(self, h1, h2):
if int(h1[2]) < int(h2[2]):
return -1
elif int(h1[2]) > int(h2[2]):
return 1
else:
return 0
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg.keys():
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg.keys():
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print '\n',cfg,'\n'
for key in cfg.keys():
sections=cfg[key].sections()
print key
print sections
for section in sections:
options=cfg[key].options(section)
print section
print options
for option in options:
print option, '=', cfg[key].Get(section,option)
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print idleConf.userCfg['main'].Get('Theme','name')
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
| |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.ec2 import ec2utils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class EC2InstanceMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, imap, db_imap):
for field in imap.fields:
imap[field] = db_imap[field]
imap._context = context
imap.obj_reset_changes()
return imap
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_imap = db.ec2_instance_create(self._context, self.uuid)
self._from_db_object(self._context, self, db_imap)
@base.remotable_classmethod
def get_by_uuid(cls, context, instance_uuid):
db_imap = db.ec2_instance_get_by_uuid(context, instance_uuid)
if db_imap:
return cls._from_db_object(context, cls(), db_imap)
@base.remotable_classmethod
def get_by_id(cls, context, ec2_id):
db_imap = db.ec2_instance_get_by_id(context, ec2_id)
if db_imap:
return cls._from_db_object(context, cls(), db_imap)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class EC2VolumeMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, vmap, db_vmap):
for field in vmap.fields:
vmap[field] = db_vmap[field]
vmap._context = context
vmap.obj_reset_changes()
return vmap
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_vmap = db.ec2_volume_create(self._context, self.uuid)
self._from_db_object(self._context, self, db_vmap)
@base.remotable_classmethod
def get_by_uuid(cls, context, volume_uuid):
db_vmap = db.ec2_volume_get_by_uuid(context, volume_uuid)
if db_vmap:
return cls._from_db_object(context, cls(context), db_vmap)
@base.remotable_classmethod
def get_by_id(cls, context, ec2_id):
db_vmap = db.ec2_volume_get_by_id(context, ec2_id)
if db_vmap:
return cls._from_db_object(context, cls(context), db_vmap)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class EC2SnapshotMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, smap, db_smap):
for field in smap.fields:
smap[field] = db_smap[field]
smap._context = context
smap.obj_reset_changes()
return smap
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_smap = db.ec2_snapshot_create(self._context, self.uuid)
self._from_db_object(self._context, self, db_smap)
@base.remotable_classmethod
def get_by_uuid(cls, context, snapshot_uuid):
db_smap = db.ec2_snapshot_get_by_uuid(context, snapshot_uuid)
if db_smap:
return cls._from_db_object(context, cls(context), db_smap)
@base.remotable_classmethod
def get_by_id(cls, context, ec2_id):
db_smap = db.ec2_snapshot_get_by_ec2_id(context, ec2_id)
if db_smap:
return cls._from_db_object(context, cls(context), db_smap)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class S3ImageMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, s3imap, db_s3imap):
for field in s3imap.fields:
s3imap[field] = db_s3imap[field]
s3imap._context = context
s3imap.obj_reset_changes()
return s3imap
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
db_s3imap = db.s3_image_create(self._context, self.uuid)
self._from_db_object(self._context, self, db_s3imap)
@base.remotable_classmethod
def get_by_uuid(cls, context, s3_image_uuid):
db_s3imap = db.s3_image_get_by_uuid(context, s3_image_uuid)
if db_s3imap:
return cls._from_db_object(context, cls(context), db_s3imap)
@base.remotable_classmethod
def get_by_id(cls, context, s3_id):
db_s3imap = db.s3_image_get(context, s3_id)
if db_s3imap:
return cls._from_db_object(context, cls(context), db_s3imap)
@base.NovaObjectRegistry.register
class EC2Ids(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'instance_id': fields.StringField(read_only=True),
'ami_id': fields.StringField(nullable=True, read_only=True),
'kernel_id': fields.StringField(nullable=True, read_only=True),
'ramdisk_id': fields.StringField(nullable=True, read_only=True),
}
@staticmethod
def _from_dict(ec2ids, dict_ec2ids):
for field in ec2ids.fields:
setattr(ec2ids, field, dict_ec2ids[field])
return ec2ids
@staticmethod
def _get_ec2_ids(context, instance):
ec2_ids = {}
ec2_ids['instance_id'] = ec2utils.id_to_ec2_inst_id(instance.uuid)
ec2_ids['ami_id'] = ec2utils.glance_id_to_ec2_id(context,
instance.image_ref)
for image_type in ['kernel', 'ramdisk']:
image_id = getattr(instance, '%s_id' % image_type)
ec2_id = None
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s_id' % image_type] = ec2_id
return ec2_ids
@base.remotable_classmethod
def get_by_instance(cls, context, instance):
ec2_ids = cls._get_ec2_ids(context, instance)
return cls._from_dict(cls(context), ec2_ids)
| |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Classes related to transfection worklist generation
(robot worklist generations).
AAB, Sept 2011
"""
from thelma.tools.semiconstants import PIPETTING_SPECS_NAMES
from thelma.tools.semiconstants import get_min_transfer_volume
from thelma.tools.semiconstants import get_positions_for_shape
from thelma.tools.semiconstants import get_reservoir_specs_standard_96
from thelma.tools.worklists.base import get_dynamic_dead_volume
from thelma.tools.utils.base import VOLUME_CONVERSION_FACTOR
from thelma.tools.utils.base import add_list_map_element
from thelma.tools.utils.base import get_converted_number
from thelma.tools.utils.base import get_trimmed_string
from thelma.tools.utils.base import is_valid_number
from thelma.tools.utils.base import round_up
from thelma.tools.utils.iso import IsoRequestAssociationData
from thelma.tools.utils.iso import IsoRequestLayout
from thelma.tools.utils.iso import IsoRequestLayoutConverter
from thelma.tools.utils.iso import IsoRequestParameters
from thelma.tools.utils.iso import IsoRequestPosition
from thelma.tools.utils.iso import IsoRequestSectorAssociator
from thelma.tools.utils.iso import IsoRequestValueDeterminer
from thelma.tools.utils.layouts import LIBRARY_POSITION_TYPE
from thelma.tools.utils.layouts import MOCK_POSITION_TYPE
from thelma.entities.moleculetype import MOLECULE_TYPE_IDS
from thelma.entities.moleculetype import MoleculeType
from thelma.entities.racklayout import RackLayout
from thelma.entities.tagging import TaggedRackPositionSet
__docformat__ = "reStructuredText en"
__all__ = ['TransfectionParameters',
'TransfectionPosition',
'TransfectionLayout',
'TransfectionLayoutConverter',
'TransfectionSectorAssociator',
'TransfectionAssociationData']
class TransfectionParameters(IsoRequestParameters):
"""
This a list of parameters required to generate a BioMek transfection
worklist when translating an ISO plate into a cell plate.
"""
#: The domain for transfer-related tags.
DOMAIN = 'transfection'
#: The final RNAi concentration in the assay.
FINAL_CONCENTRATION = 'final_concentration'
#: The molecule design pool (tag value: molecule design pool id).
MOLECULE_DESIGN_POOL = IsoRequestParameters.MOLECULE_DESIGN_POOL
#: The volume requested in the ISO in ul.
ISO_VOLUME = IsoRequestParameters.ISO_VOLUME
#: The concentration requested in the ISO in nM.
ISO_CONCENTRATION = IsoRequestParameters.ISO_CONCENTRATION
#: The position type (fixed, floating or empty).
POS_TYPE = IsoRequestParameters.POS_TYPE
#: The name of the RNAi reagent.
REAGENT_NAME = 'reagent_name'
#: The final dilution factor of the RNAi reagent in the cell plate.
REAGENT_DIL_FACTOR = 'reagent_dilution_factor'
#: The OptiMem dilution factor usually depends on the molecule type.
#: In library screenings however the factor is variable depending on the
#: final concentration.
OPTIMEM_DIL_FACTOR = 'optimem_dilution_factor'
ALL = IsoRequestParameters.ALL + [REAGENT_NAME, REAGENT_DIL_FACTOR,
FINAL_CONCENTRATION, OPTIMEM_DIL_FACTOR]
#: A map storing alias predicates for each parameter.
ALIAS_MAP = dict(IsoRequestParameters.ALIAS_MAP, **{
FINAL_CONCENTRATION : [],
REAGENT_NAME : [],
REAGENT_DIL_FACTOR : ['reagent_concentration'],
OPTIMEM_DIL_FACTOR : []})
#: Maps tag predicates on domains.
DOMAIN_MAP = dict(IsoRequestParameters.DOMAIN_MAP, **{
FINAL_CONCENTRATION : DOMAIN,
REAGENT_NAME : DOMAIN,
REAGENT_DIL_FACTOR : DOMAIN,
OPTIMEM_DIL_FACTOR : DOMAIN})
# Constants for calculations
#: The volume transferred into each cell (experiment) plate well (in ul).
TRANSFER_VOLUME = 5
#: The minimum volume that can be requested by the stockmanagement.
MINIMUM_ISO_VOLUME = IsoRequestParameters.MINIMUM_ISO_VOLUME
#: The dilution factor for the transfection reagent dilution (mastermix
#: step - as opposed to the final dilution factor of the reagent itself
#: which is specified as part of the experiment metadata).
REAGENT_MM_DILUTION_FACTOR = 2
#: The dilution factor for the dilution with cell suspension.
CELL_DILUTION_FACTOR = 7
#: The optimem dilution factor for miRNA molecule types.
MIRNA_OPTIMEM_DILUTION_FACTOR = 3
#: The optimem dilution factor for other molecule types.
STANDARD_OPTIMEM_DILUTION_FACTOR = 4
#: The default molecule type for mock positions.
DEFAULT_MOLECULE_TYPE = MOLECULE_TYPE_IDS.SIRNA
MOCK_NON_PARAMETERS = IsoRequestParameters.MOCK_NON_PARAMETERS \
+ [FINAL_CONCENTRATION]
@classmethod
def is_valid_mock_value(cls, value, parameter):
if not super(TransfectionParameters, cls).is_valid_mock_value(value,
parameter):
return False
if parameter in {cls.REAGENT_DIL_FACTOR, cls.OPTIMEM_DIL_FACTOR}:
if value is None: return True
return is_valid_number(value)
elif parameter == cls.REAGENT_NAME:
if value is None: return True
if not isinstance(value, basestring) or not len(value) > 2:
return False
return True
@classmethod
def calculate_iso_volume(cls, number_target_wells, number_replicates,
iso_reservoir_spec, optimem_dil_factor,
pipetting_specs):
"""
Calculates the ISO volume required to fill the given number
of target wells (assuming the given number of interplate replicates).
:param number_target_wells: The number of target wells in all
design racks of an experiment design.
:type number_target_wells: :class:`int`
:param number_replicates: The number of replicates.
:type number_replicates: :class:`int`
:param optimem_dil_factor: The optimem dilution factor depends on
molecule type or final concentration.
:type optimem_dil_factor: positive number
:param iso_reservoir_spec: The reservoir specs to be assumed.
:type iso_reservoir_spec:
:class:`thelma.entities.liquidtransfer.ReservoirSpecs`
:return: The ISO volume that should be ordered in the ISO to generate
an sufficient amount of mastermix solution.
:param pipetting_specs: Defines whether to use a static dead volume
or a dynamic (represents Biomek-transfer).
:type pipetting_specs: :class:`PipettingSpecs`
"""
required_volume = cls.\
calculate_mastermix_volume_from_target_well_number(
number_target_wells, number_replicates, iso_reservoir_spec,
pipetting_specs)
iso_volume = required_volume / (cls.REAGENT_MM_DILUTION_FACTOR \
* optimem_dil_factor)
min_volume = get_min_transfer_volume(pipetting_specs)
if iso_volume < min_volume: iso_volume = min_volume
return round_up(iso_volume)
@classmethod
def get_optimem_dilution_factor_from_molecule_type(cls, molecule_type):
"""
Returns the optimem dilution factor for a molecule type or molecule
type name.
:param molecule_type: The molecule types for the molecule design pool.
:type molecule_type: :class:`thelma.entities.moleculetype.MoleculeType`
or :class:`str` (molecule type ID)
:raises TypeError: For molecule types of the wrong class.
:raises ValueError: If the molecule type is unknown.
:return: The OptiMem dilution factor for this molecule type.
"""
if isinstance(molecule_type, MoleculeType):
mt_id = molecule_type.id
elif isinstance(molecule_type, basestring):
if MOLECULE_TYPE_IDS.is_known_type(molecule_type):
mt_id = molecule_type
else:
msg = 'Unknown molecule type name "%s".' % (molecule_type)
raise ValueError(msg)
else:
msg = 'The molecule types must be a %s object or a string ' \
'(obtained: %s).' % (MoleculeType.__class__.__name__,
molecule_type.__class__.__name__)
raise TypeError(msg)
if mt_id == MOLECULE_TYPE_IDS.MIRNA_INHI or \
mt_id == MOLECULE_TYPE_IDS.MIRNA_MIMI:
return cls.MIRNA_OPTIMEM_DILUTION_FACTOR
else:
return cls.STANDARD_OPTIMEM_DILUTION_FACTOR
@classmethod
def get_total_dilution_factor(cls, optimem_dilution_factor):
"""
The total dilution factor for the transfection preparation
(comprised of transfection reagent, OptiMem and cell suspension
dilution).
:param optimem_dilution_factor: The optimem dilution factor depends on
molecule type or final concentrations.
:type optimem_dilution_factor: positive number
:return: The total dilution factor.
"""
return cls.REAGENT_MM_DILUTION_FACTOR * cls.CELL_DILUTION_FACTOR \
* optimem_dilution_factor
@classmethod
def calculate_mastermix_volume_from_target_well_number(cls,
number_target_wells, number_replicates, iso_reservoir_spec,
pipetting_specs=None):
"""
Calculates the mastermix volume including transfection reagent.
(assuming the given number of target wells and interplate replicates).
:param number_target_wells: The number of target wells in all
design racks fo an experiment design.
:type number_target_wells: :class:`int`
:param number_replicates: The number of replicates.
:type number_replicates: :class:`int`
:param iso_reservoir_spec: The reservoir specs to be assumed.
:type iso_reservoir_spec:
:class:`thelma.entities.liquidtransfer.ReservoirSpecs`
:param pipetting_specs: Defines whether to use a static dead volume
or a dynamic dead volume correction (e.g. for Biomek).
:type pipetting_specs: :class:`PipettingSpecs`
:default pipetting_specs: *None* (with correction)
:return: The determined volume required to fill these wells.
"""
well_number = number_target_wells * number_replicates
if pipetting_specs is None or pipetting_specs.has_dynamic_dead_volume:
dead_volume = get_dynamic_dead_volume(
target_well_number=well_number,
reservoir_specs=iso_reservoir_spec)
else:
dead_volume = iso_reservoir_spec.min_dead_volume \
* VOLUME_CONVERSION_FACTOR
required_volume = well_number * cls.TRANSFER_VOLUME + dead_volume
return required_volume
@classmethod
def calculate_mastermix_volume_from_iso_volume(cls, iso_volume,
optimem_dil_factor):
"""
Returns the maximum volume of a mastermix (the volume of the complete
complex with all its ingredients).
:param iso_volume: The ISO volume.
:type iso_volume: positive number
:param optimem_dil_factor: The optimem dilution factor depends on
molecule type or final concentration.
:type optimem_dil_factor: positive number
:rtype: :class:`float`
"""
reagent_dilution_volume = cls.calculate_reagent_dilution_volume(
iso_volume=iso_volume, optimem_dil_factor=optimem_dil_factor)
return reagent_dilution_volume * 2
@classmethod
def calculate_reagent_dilution_volume(cls, iso_volume, optimem_dil_factor):
"""
Returns the reagent volume that is needed for an 1:2 dilution
with RNAi reagent.
:param iso_volume: An ISO volume.
:type iso_volume: numeric, positive
:param optimem_dil_factor: The optimem dilution factor depends on
molecule type or final concentration.
:type optimem_dil_factor: positive number
:return: The required volume.
"""
return float(iso_volume) * optimem_dil_factor
@classmethod
def calculate_initial_reagent_dilution(cls, reagent_dil_factor):
"""
Returns the initial reagent dilution (the dilution that has be
provided for the 1:2 dilution step).
:param reagent_dil_factor: The final reagent dilution factor.
:type reagent_dil_factor: :class:`int`
:return: The initial dilution factor.
"""
return reagent_dil_factor / \
(cls.REAGENT_MM_DILUTION_FACTOR * cls.CELL_DILUTION_FACTOR)
# TODO: review and replace
@classmethod
def get_critical_iso_concentration(cls, stock_concentration):
"""
Returns the critical ISO concentration in ul.
ISO concentrations that are larger than this value might cause slight
inaccuracies in the concentration when using the Biomek (due to the
transfer volume step width of 0.1 ul).
:param stock_concentration: The stock concentration for molecule design
pool in nM.
:type stock_concentration: positive number
:return: critical ISO concentration in ul.
"""
rs = get_reservoir_specs_standard_96()
std_96_min_dead_vol = rs.min_dead_volume * VOLUME_CONVERSION_FACTOR
min_biomek_transfer_vol = get_min_transfer_volume(
PIPETTING_SPECS_NAMES.BIOMEK)
crit_iso_conc = stock_concentration \
/ ((std_96_min_dead_vol + cls.MINIMUM_ISO_VOLUME) \
/ (std_96_min_dead_vol + cls.MINIMUM_ISO_VOLUME \
- min_biomek_transfer_vol))
return crit_iso_conc
# TODO: review and replace
@classmethod
def get_critical_final_concentration(cls, stock_concentration,
optimem_dil_factor=None):
"""
Returns the critical final concentration in ul.
Final concentrations that are larger than this value might cause slight
inaccuracies in the concentration when using the Biomek (due to the
transfer volume step width of 0.1 ul).
If you do not provide an OptiMem dilution factor, the dilution factor
is determined via the molecule type.
:param stock_concentration: The stock concentration for the molecule
design pool in nM.
:type stock_concentration: positive number
:param optimem_dil_factor: The optimem dilution factor depends on
molecule type or final concentration.
:type optimem_dil_factor: positive number
:param optimem_dil_factor: The optimem dilution factor depends on
molecule type or final concentration.
:type optimem_dil_factor: positive number
:default optimem_dil_factor: *None*
:return: critical final concentration in ul.
"""
crit_iso_conc = cls.get_critical_iso_concentration(stock_concentration)
# The ODF differs only in library experiment and these do not
# need ISO concentration checking.
total_df = cls.get_total_dilution_factor(optimem_dil_factor)
crit_final_conc = crit_iso_conc / total_df
return crit_final_conc
@classmethod
def get_layout_mock_optimem_molecule_type(cls, working_layout):
"""
Returns the molecule type for mock positions in a layout (to be used for
the determination of a optimem dilution factor). If there is only
one OptiMem factor, the optimem factor for this factor is returned.
Otherwise the function returns the optimem dilution factor for the
:attr:`DEFAULT_MOLECULE_TYPE`.
Make sure, the OptiMem dilution factors of the layout are set.
"""
fixed_optimem_dfs = set()
for wp in working_layout.working_positions():
if wp.is_fixed:
optimem_df = wp.optimem_dil_factor
fixed_optimem_dfs.add(optimem_df)
if len(fixed_optimem_dfs) == 1:
return list(fixed_optimem_dfs)[0]
else:
mock_mt = cls.DEFAULT_MOLECULE_TYPE
optimem_df = cls.get_optimem_dilution_factor_from_molecule_type(
mock_mt)
return optimem_df
@classmethod
def get_floating_placeholder(cls, num):
"""
Returns a value floating placeholder (suitable for recognition as
floating via :func:`get_position_type`).
:param num: a number for the placeholder
:type num: :class:`int`
"""
return '%s%03i' % (cls.FLOATING_INDICATOR, num)
class TransfectionPosition(IsoRequestPosition):
"""
This class represents a source position in an ISO layout. The target
positions are the target in the final cell (experiment) plate.
"""
#: The parameter set this working position is associated with.
PARAMETER_SET = TransfectionParameters
#: The delimiter for the different target infos.
POSITION_DELIMITER = '-'
def __init__(self, rack_position, molecule_design_pool=None,
position_type=None, reagent_name=None, reagent_dil_factor=None,
iso_volume=None, iso_concentration=None,
final_concentration=None, optimem_dil_factor=None):
"""
:param rack_position: The rack position.
:type rack_position: :class:`thelma.entities.rack.RackPosition`.
:param molecule_design_pool: The molecule design pool or placeholder for
the RNAi reagent.
:type molecule_design_pool: :class`int` (ID), :class:`str` (placeholder)
or :class:`thelma.entities.moleculedesign.StockSampleMoleculeDesign`
:param position_type: influences valid values for other parameters
:type position_type: :class:`str
:param reagent_name: The name of the transfection reagent.
:type reagent_name: :class:`str`
:param reagent_dil_factor: The final dilution factor of the
transfection reagent in the cell plate.
:type reagent_dil_factor: positive number, no unit
:param iso_volume: The volume requested by the stock management.
:type iso_volume: positive number, unit ul
:param iso_concentration: The concentration requested by the stock
management.
:type iso_concentration: positive number, unit nM
:param final_concentration: The final concentration of the RNAi
reagent in the cell plate.
:type final_concentration: positive number, unit nM
:param optimem_dil_factor: The dilution factor for the OptiMem dilution
(use only if you do not want to use the default factor).
:type optimem_dil_factor: positive number
"""
IsoRequestPosition.__init__(self, rack_position=rack_position,
molecule_design_pool=molecule_design_pool,
position_type=position_type, iso_volume=iso_volume,
iso_concentration=iso_concentration)
#: Stores the position in the cell plate that are filled by this
#: source positions (:class:`set` of
#: :class:`thelma.entities.rack.RackPosition` objects).
self.cell_plate_positions = set()
#: The name of the RNAi reagent.
self.reagent_name = reagent_name
#: The final dilution factor RNAi reagent in the cell plate.
self.reagent_dil_factor = get_converted_number(reagent_dil_factor)
#: The final concentration in the cell plate (experiment plate).
self.final_concentration = get_converted_number(final_concentration)
#: The optimem dilution factor set in library screenings (because
#: in this case it is depending on the final concentration instead
#: of depending on the molecule type).
self._optimem_dil_factor = optimem_dil_factor
tf_attrs = [('reagent name', self.reagent_name),
('reagent dilution factor', self.reagent_dil_factor),
('final concentration', self.final_concentration),
('optimem dilution factor', self._optimem_dil_factor)]
if self.is_untreated_type:
self._check_untreated_values(tf_attrs)
elif self.is_empty:
self._check_none_value(tf_attrs)
else:
if self.reagent_name is not None and \
(not isinstance(self.reagent_name, basestring) or \
len(self.reagent_name) < 2):
msg = 'The reagent name must be at least 2 characters long ' \
'if there is one (obtained: "%s")!' % (self.reagent_name)
raise ValueError(msg)
numericals = [tf_attrs[1], tf_attrs[3]]
if self.is_mock:
self._check_mock_values([tf_attrs[2]])
else:
numericals.append(tf_attrs[2])
self._check_numbers(numericals, allow_none=True)
@property
def hash_full(self):
"""
A string that can be used as hash value for comparison. This hash
covers all four parameters (molecule design pool, reagent name, reagent
concentration and final concentration) needed to make well
associations.
"""
if self.is_empty: return '%s' % (self.rack_position)
fc = self.final_concentration
if self.is_mock: fc = None
return '%s%s%s%s' % (self.molecule_design_pool_id,
self.reagent_name,
get_trimmed_string(self.reagent_dil_factor),
get_trimmed_string(fc))
@property
def hash_partial(self):
"""
A string that can be used as hash value. This hash covers only three
three of the four parameters (molecule design pool, reagent name
and reagent concentration). It is meant to enable comparison of
manual ISO definitions, in which the final concentraiton is not known.
"""
if self.is_empty: return '%s' % (self.rack_position)
return '%s%s%s' % (self.molecule_design_pool_id,
self.reagent_name,
get_trimmed_string(self.reagent_dil_factor))
@property
def optimem_dil_factor(self):
"""
The dilution factor depends on the molecule type (for most experiment
scenarios) or on the final concentration (for library screenings).
"""
return self._optimem_dil_factor
def store_optimem_dilution_factor(self):
"""
This method is meant for fixed position that can derive the OptiMem
dilution factor from the pool.
"""
self._optimem_dil_factor = TransfectionParameters.\
get_optimem_dilution_factor_from_molecule_type(
self.molecule_design_pool.molecule_type)
def set_optimem_dilution_factor(self, optimem_df):
"""
The OptiMem dilution factor must be a positive number.
The factor might only be set ones, except for fixed positions.
:raises AttributeError: If the factor has been set before.
:raises ValueError: If the factor is not a positive number.
"""
if not is_valid_number(optimem_df):
msg = 'The OptiMem dilution factor must be a positive number ' \
'(obtained: %s).' % (optimem_df)
raise ValueError(msg)
if not self._optimem_dil_factor is None and not self.is_fixed:
raise AttributeError('The OptiMem dilution factor has already ' \
'been set!')
self._optimem_dil_factor = optimem_df
def get_total_dilution_factor(self):
"""
Returns the total tranfecion dilution factor for this molecule type.
"""
if self._optimem_dil_factor is None: return None
return TransfectionParameters.get_total_dilution_factor(
optimem_dilution_factor=self._optimem_dil_factor)
def calculate_reagent_dilution_volume(self):
"""
Returns the reagent volume that has be added to this well.
Make sure, the OptiMem dilution factor of the postion is set.
"""
if self.is_empty or self.iso_volume is None: return None
return TransfectionParameters.calculate_reagent_dilution_volume(
self.iso_volume, self.optimem_dil_factor)
@classmethod
def create_library_position(cls, rack_position):
"""
Creates a transfection position representing a mock well.
:return: A transfection position.
"""
kw = dict(molecule_design_pool=LIBRARY_POSITION_TYPE,
rack_position=rack_position)
return TransfectionPosition(**kw)
@classmethod
def create_mock_position(cls, rack_position):
"""
Creates a transfection position representing a mock well.
:return: A transfection position.
"""
kw = dict(molecule_design_pool=MOCK_POSITION_TYPE,
rack_position=rack_position)
return TransfectionPosition(**kw)
@classmethod
def create_untreated_position(cls, rack_position, position_type):
"""
Creates a transfection position representing an untreated (empty) well.
:return: A transfection position.
"""
return TransfectionPosition(rack_position=rack_position,
molecule_design_pool=position_type,
reagent_name=position_type,
reagent_dil_factor=position_type,
final_concentration=position_type)
def copy(self):
"""
Returns a copy of this transfection position.
"""
tf_pos = TransfectionPosition(rack_position=self.rack_position,
molecule_design_pool=self.molecule_design_pool,
position_type=self.position_type,
reagent_name=self.reagent_name,
reagent_dil_factor=self.reagent_dil_factor,
iso_volume=self.iso_volume,
iso_concentration=self.iso_concentration,
final_concentration=self.final_concentration,
optimem_dil_factor=self._optimem_dil_factor)
return tf_pos
def _get_parameter_values_map(self):
"""
Returns a map containing the value for each parameter.
"""
parameters = dict()
parameters[self.PARAMETER_SET.MOLECULE_DESIGN_POOL] = \
self.molecule_design_pool
parameters[self.PARAMETER_SET.ISO_VOLUME] = self.iso_volume
parameters[self.PARAMETER_SET.ISO_CONCENTRATION] = \
self.iso_concentration
parameters[self.PARAMETER_SET.POS_TYPE] = self.position_type
parameters[self.PARAMETER_SET.REAGENT_NAME] = self.reagent_name
parameters[self.PARAMETER_SET.REAGENT_DIL_FACTOR] = \
self.reagent_dil_factor
parameters[self.PARAMETER_SET.FINAL_CONCENTRATION] = \
self.final_concentration
parameters[self.PARAMETER_SET.OPTIMEM_DIL_FACTOR] = \
self._optimem_dil_factor
return parameters
def __eq__(self, other):
if not IsoRequestPosition.__eq__(self, other): return False
if not self.is_empty:
if not self.reagent_name == other.reagent_name:
return False
if not self.reagent_dil_factor == other.reagent_dil_factor:
return False
if not (self.is_mock or self.is_empty):
if not self.final_concentration == other.final_concentration:
return False
return True
def __repr__(self):
str_format = '<%s rack position: %s, molecule design pool: %s ' \
'ISO volume: %s, ISO concentration: %s, reagent name %s, ' \
'reagent dilution factor: %s>'
params = (self.__class__.__name__, self.rack_position,
self.molecule_design_pool, self.iso_volume,
self.iso_concentration, self.reagent_name,
self.reagent_dil_factor)
return str_format % params
class TransfectionLayout(IsoRequestLayout):
"""
A working container for transfection positions. Transfection positions
contain data about the ISO, the mastermix and transfer parameters.
"""
POSITION_CLS = TransfectionPosition
def copy(self):
"""
Returns a copy of this layout.
"""
copied = TransfectionLayout(shape=self.shape)
for tf_pos in self._position_map.values():
copied_pos = tf_pos.copy()
copied.add_position(copied_pos)
return copied
def has_iso_concentrations(self):
"""
Returns *True* if there are ISO concentrations specified in this
transfection layout.
"""
for working_pos in self._position_map.values():
if working_pos.is_empty or working_pos.is_mock: continue
if not working_pos.iso_concentration is None: return True
return False
def has_iso_volumes(self):
"""
Returns *True* if there are ISO volumes specified in this
transfection layout.
"""
for working_position in self._position_map.values():
if working_position.is_empty: continue
if not working_position.iso_volume is None: return True
return False
def has_final_concentrations(self):
"""
Returns *True* there are final concentrations specified in the
this transfection layout.
"""
for working_pos in self._position_map.values():
if working_pos.is_empty or working_pos.is_mock: continue
if not working_pos.final_concentration is None: return True
return False
def create_merged_rack_layout(self, additional_trps, user):
"""
Returns a rack layout that contains the passed tags and all tags
of this layout.
:param additional_trps: non-transfection tags (as
:class:`TaggedRackPositionSet`) mapped onto their
rack positions set hash values
type additional_trps: :class:`dict`
:param user: The user creating the tag, usually the request of
the ISO request.
:type user: :class:`thelma.entities.user.User`
:return: the completed :class:`thelma.entities.racklayout.RackLayout`
"""
if len(additional_trps) < 1: return self.create_rack_layout()
# get transfection layout data
self.close()
trp_sets = self.create_tagged_rack_position_sets()
# get tagged rack positions sets, map tags onto hash values
trps_map = dict()
for trps in trp_sets:
trps_map[trps.rack_position_set.hash_value] = trps
# add tags
for hash_value, trps in additional_trps.iteritems():
if trps_map.has_key(hash_value):
tf_trps = trps_map[hash_value]
for tag in trps.tags:
tf_trps.add_tag(tag, user)
else:
trps_map[hash_value] = trps
return RackLayout(shape=self.shape,
tagged_rack_position_sets=trps_map.values())
@classmethod
def complete_rack_layout_with_screening_tags(cls, exp_rack_layout,
iso_request_rack_layout, user):
"""
Returns a rack layout with the transfection data of the layout
added to it. This method is used to create the experiment design rack
layouts for screening and library cases.
ISO data (ISO volume and ISO concentration) is excluded.
:param exp_rack_layout: The rack layout to complete.
:type exp_rack_layout: :class:`thelma.entities.racklayout.RackLayout`
:param iso_request_rack_layout: The rack layout of the ISO request
(which might contain customized tags).
:type iso_request_rack_layout:
:class:`thelma.entities.racklayout.RackLayout`
"""
trps_map = \
dict([(trps.rack_position_set.hash_value, trps)
for trps in exp_rack_layout.tagged_rack_position_sets])
excluded_parameters = [IsoRequestParameters.ISO_VOLUME,
IsoRequestParameters.ISO_CONCENTRATION]
for trps in iso_request_rack_layout.tagged_rack_position_sets:
hash_value = trps.rack_position_set
tags = []
for tag in trps.tags:
predicate = tag.predicate
if (predicate in excluded_parameters): continue
tags.append(tag)
if trps_map.has_key(hash_value):
trps = trps_map[hash_value]
for tag in tags:
trps.add_tag(tag, user)
elif len(tags) < 1:
# might be the case if there are only ISO volume or ISO
# concentration tags in a new tagged rack position set
continue
else:
trps = TaggedRackPositionSet(set(tags), trps.rack_position_set,
user)
trps_map[hash_value] = trps
return RackLayout(shape=exp_rack_layout.shape,
tagged_rack_position_sets=trps_map.values())
@staticmethod
def compare_ignoring_untreated_types(layout1, layout2):
"""
Compares two transfection layouts ignoring potential untreated
positions.
"""
if layout1.shape != layout2.shape: return False
for rack_pos in get_positions_for_shape(layout1.shape):
tf1 = layout1.get_working_position(rack_pos)
tf2 = layout2.get_working_position(rack_pos)
if tf1 is None and tf2 is None: continue
if tf1 is not None and (tf1.is_untreated_type or tf1.is_empty):
tf1 = None
if tf2 is not None and (tf2.is_untreated_type or tf2.is_empty):
tf2 = None
if tf1 is None and tf2 is None:
continue
elif not tf1 == tf2:
return False
return True
class TransfectionLayoutConverter(IsoRequestLayoutConverter):
"""
Converts a rack layout into a IdQuartetLayout. These layouts types are
only used to ensure layout uniqueness.
"""
NAME = 'Transfection Layout Converter'
PARAMETER_SET = TransfectionParameters
LAYOUT_CLS = TransfectionLayout
POSITION_CLS = TransfectionPosition
def __init__(self, rack_layout, is_iso_request_layout=True,
is_mastermix_template=False, parent=None):
"""
Constructor.
:param is_iso_request_layout: Defines if certain parameters are allowed
to be missing (final concentration, reaagent name,
reagent dil factor).
:type is_iso_request_layout: :class:`boolean`
:default is_iso_request_layout: True
:param bool is_mastermix_template: Defines if certain parameters are
allowed to be missing (ISO volume, ISO concentration, position
type). If *True*, \'is_iso_layout\' must be false.
:default is_mastermix_layout: False
"""
IsoRequestLayoutConverter.__init__(self, rack_layout, parent=parent)
#: Defines if certain parameters are allowed to be missing (final
#: concentration, reaagent name, reagent dil factor).
self.__is_iso_request_layout = is_iso_request_layout
#: Defines if certain parameters are allowed to be missing (ISO
#: volume, ISO concentration).
self.__is_mastermix_template = is_mastermix_template
# intermediate storage of invalid rack positions.
self.__invalid_dil_factor = None
self.__invalid_name = None
self.__invalid_final_concentration = None
self.__invalid_optimem_factor = None
self.__missing_reagent_name = None
self.__missing_reagent_df = None
self.__missing_final_conc = None
def reset(self):
"""
Resets all attributes except for the :attr:`rack_layout`.
"""
IsoRequestLayoutConverter.reset(self)
self.__invalid_dil_factor = []
self.__invalid_name = []
self.__invalid_final_concentration = []
self.__invalid_optimem_factor = []
self.__missing_reagent_name = []
self.__missing_reagent_df = []
self.__missing_final_conc = []
def _check_input(self):
IsoRequestLayoutConverter._check_input(self)
self._check_input_class('"is ISO request layout" flag',
self.__is_iso_request_layout, bool)
self._check_input_class('"is mastermix template" flag',
self.__is_mastermix_template, bool)
if not self.has_errors() and \
self.__is_mastermix_template and self.__is_iso_request_layout:
msg = 'The layout cannot be a mastermix layout and an ISO ' \
'request layout at the same time!'
self.add_error(msg)
def _initialize_parameter_validators(self):
"""
Initializes all parameter validators for the tools
:attr:`PARAMETER_SET`. Overwrite this method if you want to have
other validators.
"""
self._optional_parameters = set([self.PARAMETER_SET.OPTIMEM_DIL_FACTOR])
params = [self.PARAMETER_SET.MOLECULE_DESIGN_POOL,
self.PARAMETER_SET.REAGENT_NAME,
self.PARAMETER_SET.REAGENT_DIL_FACTOR,
self.PARAMETER_SET.POS_TYPE,
self.PARAMETER_SET.FINAL_CONCENTRATION,
self.PARAMETER_SET.ISO_VOLUME,
self.PARAMETER_SET.ISO_CONCENTRATION,
self.PARAMETER_SET.OPTIMEM_DIL_FACTOR]
if self.__is_iso_request_layout:
self._optional_parameters.add(
self.PARAMETER_SET.FINAL_CONCENTRATION)
else:
self._expect_iso_values = False
self._optional_parameters.add(self.PARAMETER_SET.POS_TYPE)
self._optional_parameters.add(self.PARAMETER_SET.ISO_VOLUME)
self._optional_parameters.add(self.PARAMETER_SET.ISO_CONCENTRATION)
if not self.__is_mastermix_template:
self._optional_parameters.add(self.PARAMETER_SET.REAGENT_NAME)
self._optional_parameters.add(
self.PARAMETER_SET.REAGENT_DIL_FACTOR)
self._parameter_validators = dict()
for parameter in params:
validator = self.PARAMETER_SET.create_validator_from_parameter(
parameter)
self._parameter_validators[parameter] = validator
def _get_position_init_values(self, parameter_map, rack_pos):
kw = IsoRequestLayoutConverter._get_position_init_values(self,
parameter_map, rack_pos)
if kw is None: return None # includes empty and untreated type pos
pos_type = kw['position_type']
pos_label = rack_pos.label
reagent_name = parameter_map[self.PARAMETER_SET.REAGENT_NAME]
reagent_df = parameter_map[self.PARAMETER_SET.REAGENT_DIL_FACTOR]
final_conc = parameter_map[self.PARAMETER_SET.FINAL_CONCENTRATION]
optimem_dil_factor = None
invalid = False
optimem_dil_factor = parameter_map[
self.PARAMETER_SET.OPTIMEM_DIL_FACTOR]
if optimem_dil_factor is not None and \
not is_valid_number(optimem_dil_factor):
info = '%s (%s)' % (pos_label, optimem_dil_factor)
self.__invalid_optimem_factor.append(info)
invalid = True
if reagent_name is None:
if self.__is_mastermix_template:
self.__missing_reagent_name.append(pos_label)
invalid = True
elif not isinstance(reagent_name, basestring) \
or len(reagent_name) < 2:
self.__invalid_name.append(pos_label)
invalid = True
if reagent_df is None:
if self.__is_mastermix_template:
self.__missing_reagent_df.append(pos_label)
invalid = True
elif not is_valid_number(reagent_df):
self.__invalid_dil_factor.append(pos_label)
invalid = True
if not self.__is_iso_request_layout and final_conc is None and \
not pos_type == IsoRequestParameters.MOCK_TYPE_VALUE:
self.__missing_final_conc.append(pos_label)
invalid = True
if not final_conc is None:
if pos_type == MOCK_POSITION_TYPE:
if not TransfectionPosition.is_valid_mock_value(final_conc,
self.PARAMETER_SET.FINAL_CONCENTRATION):
info = '%s (%s)' % (pos_label, final_conc)
self.__invalid_final_concentration.append(info)
invalid = True
elif not is_valid_number(final_conc):
info = '%s (%s)' % (pos_label, final_conc)
self.__invalid_final_concentration.append(info)
invalid = True
if invalid:
return None
else:
kw['reagent_name'] = reagent_name
kw['reagent_dil_factor'] = reagent_df
kw['final_concentration'] = final_conc
kw['optimem_dil_factor'] = optimem_dil_factor
return kw
def _record_errors(self):
"""
Records errors that habe been collected for rack positions.
"""
IsoRequestLayoutConverter._record_errors(self)
if len(self.__invalid_name) > 0:
msg = 'The following rack positions have invalid reagent names: ' \
'%s. A valid reagent name must be a string of at least 2 ' \
'characters length.' \
% (self._get_joined_str(self.__invalid_name))
self.add_error(msg)
if len(self.__invalid_dil_factor) > 0:
msg = 'The following rack positions have invalid reagent ' \
'dilution factors: %s. The reagent dilution factor must be ' \
'a positive number.' \
% (self._get_joined_str(self.__invalid_dil_factor))
self.add_error(msg)
if len(self.__invalid_final_concentration) > 0:
msg = 'The following rack positions have invalid final ' \
'concentrations: %s. The final concentration must be ' \
'a positive number.' \
% (self._get_joined_str(self.__invalid_final_concentration))
self.add_error(msg)
if len(self.__invalid_optimem_factor) > 0:
msg = 'The following rack positions have invalid OptiMem ' \
'dilution factors: %s. The OptiMem dilution factor must be ' \
'a positive number.' \
% (self._get_joined_str(self.__invalid_optimem_factor))
self.add_error(msg)
if len(self.__missing_reagent_name) > 0:
msg = 'The following rack positions do not have a reagent name: %s.' \
% (self._get_joined_str(self.__missing_reagent_name))
self.add_error(msg)
if len(self.__missing_reagent_df) > 0:
msg = 'The following rack positions do not have a reagent ' \
'dilution factor: %s.' \
% (self._get_joined_str(self.__missing_reagent_df))
self.add_error(msg)
if len(self.__missing_final_conc) > 0:
msg = 'The following rack positions do not have a final ' \
'concentration: %s.' \
% (self._get_joined_str(self.__missing_final_conc))
self.add_error(msg)
def _perform_layout_validity_checks(self, working_layout):
"""
Use this method to check the validity of the generated layout.
"""
IsoRequestLayoutConverter._perform_layout_validity_checks(self,
working_layout)
self.__check_optimem_factor(working_layout)
def __check_optimem_factor(self, working_layout):
"""
If there is an OptiMem dilution factor defined for one position, it
must be defined for all non-empty positions.
"""
optimem_factors = set()
for tf_pos in working_layout.working_positions():
if tf_pos.is_empty: continue
optimem_factors.add(tf_pos.optimem_dil_factor)
if len(optimem_factors) > 1 and None in optimem_factors:
msg = 'Some positions do not have an OptiMem dilution factor ' \
'although there are OptiMem dilution factors in this layout!'
self.add_error(msg)
class TransfectionSectorAssociator(IsoRequestSectorAssociator):
"""
This is a special rack sector determiner. It sorts the transfection
positions by final concentration.
It is assumed that the sorting of floating positions has not taken place
yet. Hence, pools for floating positions are replaced by the an
unspecific marker.
**Return Value:** A map containing the values for the different sectors.
"""
NAME = 'Transfection Rack Sector Associator'
SECTOR_ATTR_NAME = 'final_concentration'
LAYOUT_CLS = TransfectionLayout
def _get_molecule_design_pool_id(self, layout_pos):
"""
Floating pool placeholdes are replaced by an unspecific marker.
"""
if not layout_pos is None and layout_pos.is_floating:
return TransfectionParameters.FLOATING_TYPE_VALUE
else:
return IsoRequestSectorAssociator._get_molecule_design_pool_id(self,
layout_pos)
def _check_associated_sectors(self):
"""
Since all floating positions are regarded as the same pool, the default
sector association will not work if we do have controls to distinguish
sectors.
"""
if not self.regard_controls and self.number_sectors > 1:
self.__find_floating_only_associations()
else:
IsoRequestSectorAssociator._check_associated_sectors(self)
def __find_floating_only_associations(self):
"""
Since all floating are treated in the same way we exclude the
possibility of equal concentrations in a rack sectors if controls
are not regarded as in this case we have 2 ways to interpreted
findings (remember that the floating placeholder have not been
assigned yet).
Example: 4 floatings with the same contration can either be regarded
as 4 independent pools or as 1 pool in 4-fold replicate. As the first
case is more likely in our company we choose the this case for
interpretation. If scientists want a different interpretation
we have to adjust this manually.
"""
concentrations = dict()
present_sectors = []
for sector_index, conc in self._sector_concentrations.iteritems():
if conc is not None:
add_list_map_element(concentrations, conc, sector_index)
present_sectors.append(sector_index)
if len(self._associated_sectors) > 1:
msg = 'Unable to adjust floating position association ' \
'because basic assumptions are not met. This is ' \
'a programming error. Talk to IT, please.'
raise AssertionError(msg)
if len(present_sectors) > 1:
current_sets = []
if len(concentrations) == 1:
for sector_index in present_sectors:
current_sets.append([sector_index])
elif len(concentrations) > 1:
while len(concentrations) > 0:
current_set = []
del_conc = []
for conc, sectors in concentrations.iteritems():
sectors.sort()
si = sectors.pop(0)
current_set.append(si)
if len(sectors) < 1: del_conc.append(conc)
current_sets.append(current_set)
for conc in del_conc: del concentrations[conc]
else:
current_sets = [[present_sectors[0]]]
if self._are_valid_sets(current_sets):
self._associated_sectors = current_sets
class TransfectionAssociationData(IsoRequestAssociationData):
"""
A helper class determining and storing associated rack sectors, parent
sectors and sector concentrations (ISO and final) for an transfection
layout.
:Note: All attributes are immutable.
"""
ASSOCIATOR_CLS = TransfectionSectorAssociator
def __init__(self, layout, tool, regard_controls):
IsoRequestAssociationData.__init__(self, layout, tool,
regard_controls)
self.__iso_concentrations = None
self.__find_iso_concentrations(layout, regard_controls)
@property
def iso_concentrations(self):
"""
The ISO concentrations for the different rack sectors.
"""
return self.__iso_concentrations
def __find_iso_concentrations(self, transfection_layout, regard_controls):
# Finds the ISO concentration for each rack sector.
determiner = IsoRequestValueDeterminer(transfection_layout,
'iso_concentration',
self.number_sectors,
regard_controls)
self.__iso_concentrations = determiner.get_result()
if self.__iso_concentrations is None:
msg = ', '.join(determiner.get_messages())
raise ValueError(msg)
else:
self._remove_none_sectors(self.__iso_concentrations)
| |
'''
Created on Nov 17, 2015
@author: cphurley
'''
import logging
import os
import re
import random
import json
import time
import webapp2
import jinja2
from google.appengine.ext import ndb
from lib.mtg.setutil import SetUtil
from lib.db.user import User
from lib.db.draft import Draft
from lib.db.drafter import Drafter
from lib.db.pack import Pack
from lib.utils import make_secure_val, check_secure_val
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
class MainHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_json(self, d):
json_txt = json.dumps(d)
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
self.write(json_txt)
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key.integer_id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
if self.request.url.endswith('.json'):
self.format = 'json'
else:
self.format = 'html'
class MainPage(MainHandler):
def get(self):
setup_drafts = Draft.query(Draft.in_setup == True).fetch()
setup_ids = []
for draft in setup_drafts:
setup_ids.append(draft.key.integer_id())
# logging.error(setup_ids)
progress_drafts = Draft.query(Draft.in_progress == True).fetch()
progress_ids = []
for draft in progress_drafts:
progress_ids.append(draft.key.integer_id())
# logging.error(progress_ids)
done_drafts = Draft.query(Draft.is_done == True).fetch()
done_ids = []
for draft in done_drafts:
done_ids.append(draft.key.integer_id())
# logging.error(done_ids)
self.render('front.html',
setup_ids = setup_ids,
progress_ids = progress_ids,
done_ids = done_ids)
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class Signup(MainHandler):
def get(self):
self.render("signup-form.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username = self.username,
email = self.email)
if not valid_username(self.username):
params['error_username'] = "That's not a valid username."
have_error = True
if not valid_password(self.password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
if not valid_email(self.email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self):
#make sure the user doesn't already exist
u = User.by_name(self.username)
if u:
msg = 'That user already exists.'
self.render('signup-form.html', error_username = msg)
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.login(u)
self.redirect('/')
class Login(MainHandler):
def get(self):
self.render('login-form.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/')
else:
msg = 'Invalid login'
self.render('login-form.html', error = msg)
class Logout(MainHandler):
def get(self):
self.logout()
self.redirect('/')
class NewDraft(MainHandler):
def get(self):
if self.user:
self.render('new-draft.html', sets = SetUtil().data())
else:
self.redirect('/login')
def post(self):
name = self.request.get('name')
pack1 = self.request.get('pack1')
pack2 = self.request.get('pack2')
pack3 = self.request.get('pack3')
draft = Draft.create(coordinator_key=self.user.key, name=name,
pack_codes=[pack1, pack2, pack3])
draft.put()
self.redirect('/draft/%s' % str(draft.key.integer_id()))
class DraftPage(MainHandler):
def lookup_drafter(self, user_key, draft_key):
drafters_found = Drafter.query(Drafter.user_key == user_key,
Drafter.draft_key == draft_key).fetch(1)
if len(drafters_found) > 0:
return drafters_found[0]
def get(self, draft_id):
draft_key = ndb.Key('Draft', int(draft_id))
draft = draft_key.get()
if not draft:
self.error(404)
return
can_join = False
joined = False
is_coordinator = False
status = 'None'
direction = 'None'
# has_pack = False
pack = None
# pool = None
drafter = None
set_code = None
if self.user:
drafter = self.lookup_drafter(user_key=self.user.key,
draft_key=draft_key)
# if drafter:
# pool = drafter.picked_cards
if draft.in_progress:
status = 'in_progress'
# can_join = False
set_code = draft.get_current_set_code()
if draft.passing_right:
direction = 'Right'
else:
direction = 'Left'
#figure out if the logged in user is also a drafter and give them
#their stuff, using a query or something else?
# logging.error('drafter************************************')
# logging.error(drafter)
if drafter:
if len(drafter.pack_keys) > 0:
pack = drafter.pack_keys[0].get()
elif draft.pack_num == 0:
status = 'waiting_to_start'
if self.user:
can_join = True
if self.user.key in draft.user_keys:
joined = True
can_join = False
if self.user.key == draft.coordinator_key:
is_coordinator = True
else:
status = 'completed'
time_fmt = '%b %d, %Y'
draft_info = {'name':draft.name,
'created':draft.created.strftime(time_fmt),
'modified':draft.modified.strftime(time_fmt),
'packs':[],
'pack_num':draft.pack_num,
# 'current_pack_code':draft.pack_codes[draft.pack_num],
'status':status,
'users':[],
'drafters':[],
'can_join':can_join,
'joined':joined,
'is_coordinator':is_coordinator,
}
if direction:
draft_info['direction'] = direction
card_details = {}
if pack:
draft_info['pack'] = []
for card in pack.cards:
# logging.error(card)
# logging.error(set_code)
card_details[card] = SetUtil().get_card_details(
set_code=set_code, card_name=card)
draft_info['pack'].append(
SetUtil().get_card_details(
set_code=set_code, card_name=card))
if len(pack.cards) < 1:
#this should never happen but there seems to be a bug so
# I'm deleting the empty pack TODO: create a better way to
# fix this.
logging.error("pack should have at least one card but is empty")
logging.error("removing pack_key:")
logging.error(pack)
logging.error("from drafter:")
logging.error(drafter)
drafter.pack_keys.remove(pack)
for pack_code in draft.pack_codes:
draft_info['packs'].append(
{'code':pack_code,
'name':SetUtil().data()[pack_code]['name']} )
for drafter_key in draft.drafter_keys:
draft_info['drafters'].append(
{'name':drafter_key.get().user_key.get().name,
'num_packs':
self.lookup_drafter(drafter_key.get().user_key,
draft_key).get_num_packs_queued()
}
)
for user_key in draft.user_keys:
draft_info['users'].append(user_key.get().name)
if drafter:
draft_info['pool'] = drafter.picked_cards
# logging.error(draft.drafters)
if self.format is 'json':
self.render_json(draft_info)
else:
self.render('draft.html', draft_info=draft_info)
def post(self, draft_id):
draft_key = ndb.Key('Draft', int(draft_id))
draft = draft_key.get()
# logging.error('join_or_leave='+self.request.get('join_or_leave'))
# logging.error('start='+self.request.get('start'))
if self.user:
if self.request.get('join_or_leave') == 'join':
# logging.error('appending')
if draft.in_setup:
draft.user_keys.append(self.user.key)
draft.put()
else:
logging.error('cannot join draft when draft not in_setup')
if self.request.get('join_or_leave') == 'leave':
# logging.error('removing')
if draft.in_setup:
draft.user_keys.remove(self.user.key)
draft.put()
else:
logging.error('cannot leave draft when draft not in_setup')
if self.request.get('start') == 'start':
if draft.in_setup:
self.start_draft(draft)
# time.sleep(1)
else:
logging.error('cannot start draft when draft not in_setup')
if self.request.get('pick'):
if draft.in_progress:
picked_card = self.request.get('pick')
self.make_pick(draft=draft,
drafter=self.lookup_drafter(user_key=self.user.key,
draft_key=draft_key),
picked_card=picked_card)
if draft.num_picks_queued == 0:
draft_done = not self.next_pack(draft)
if draft_done:
draft.in_progress = False
draft.is_done = True
draft.put()
else:
logging.error('cannot make pick when draft not in_progress')
# time.sleep(1)
self.redirect('/draft/%s' % str(draft.key.integer_id()))
def start_draft(self, draft):
draft.in_setup = False
draft.in_progress = True
#shuffle user_keys to create random order
random.shuffle(draft.user_keys)
#create the individual drafter items
position = 0
drafter_entities = []
for user_key in draft.user_keys:
drafter_entities.append(Drafter.create(user_key=user_key,
draft_key=draft.key, position=position))
position += 1
draft.drafter_keys = ndb.put_multi(drafter_entities)
# logging.error('424: draft')
# logging.error(draft)
pack_entities = []
#get boosters for all the packs
for pack_code in draft.pack_codes:
packs = SetUtil().generate_boosters(
num=draft.num_drafters,
set_code=pack_code)
random.shuffle(packs)
for pack in packs:
pack_entities.append(Pack.create(draft_key=draft.key,
cards=pack))
draft.unopened_pack_keys = ndb.put_multi(pack_entities)
draft.put()
# time.sleep(5)
# logging.error('440: draft')
# logging.error(draft)
self.next_pack(draft)
#returns True if there is another pack to go to, False if not
def next_pack(self, draft):
#if there are enough unopened packs
if draft.num_unopened_packs >= draft.num_drafters:
draft.pack_num += 1
draft.passing_right = not draft.passing_right
# time.sleep(5) #wait before getting the drafter keys due to eventual consistency problems
# logging.error('448: draft')
# logging.error(draft)
# logging.error('454: draft.drafter_keys')
# logging.error(draft.drafter_keys)
drafter_keys = list(draft.drafter_keys) #make a copy because ndb does something wierd and converts the key to _BaseValue sometimes
# logging.error('457: drafter_keys')
# logging.error(drafter_keys)
for drafter_key in drafter_keys:
# logging.error('451: drafter_key=')
# logging.error(drafter_key)
drafter = drafter_key.get()
#give drafter a pack and remove it from the unopened pack list
new_pack = draft.unopened_pack_keys[0]
drafter.pack_keys.append(new_pack)
draft.unopened_pack_keys.remove(new_pack)
# logging.error('drafter.pack_keys[0].get()=')
# logging.error(drafter.pack_keys[0].get())
drafter.put()
draft.put()
# time.sleep(1)
return True
return False
def make_pick(self, draft, drafter, picked_card):
if len(drafter.pack_keys) > 0:
pack_key = drafter.pack_keys[0]
pack = pack_key.get()
if picked_card in pack.cards:
#drafter puts the card in pool
pack.cards.remove(picked_card)
drafter.picked_cards.append(picked_card)
#drafter passes the pack to the next player if there are cards
#left to pass, otherwise pack just gets removed
drafter.pack_keys.remove(pack_key)
if len(pack.cards) > 0:
recieving_drafter = None
if draft.passing_right:
recieving_position = drafter.position + 1
if recieving_position >= draft.num_drafters:
recieving_position = 0
else:
recieving_position = drafter.position - 1
if recieving_position < 0:
recieving_position = draft.num_drafters - 1
recieving_drafter = draft.drafter_keys[recieving_position].get()
recieving_drafter.pack_keys.append(pack_key)
recieving_drafter.put()
pack.put()
drafter.put()
draft.put()
return True #since we have successfully picked a card
else:
logging.error('picked card is not in current pack')
else:
logging.error('no pack to pick from')
return False #since we did not successfully pick a card
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of online extremely random forests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
TREE_PATHS_PREDICTION_KEY = 'tree_paths'
VARIANCE_PREDICTION_KEY = 'regression_variance'
EPSILON = 0.000001
class TensorForestRunOpAtEndHook(session_run_hook.SessionRunHook):
def __init__(self, op_dict):
"""Ops is a dict of {name: op} to run before the session is destroyed."""
self._ops = op_dict
def end(self, session):
for name in sorted(self._ops.keys()):
logging.info('{0}: {1}'.format(name, session.run(self._ops[name])))
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self,
early_stopping_rounds,
early_stopping_loss_threshold=None,
loss_op=None):
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_loss_threshold = early_stopping_loss_threshold
self.loss_op = loss_op
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
loss = (self.loss_op if self.loss_op is not None else
run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0])
return session_run_hook.SessionRunArgs(
{'global_step': training_util.get_global_step(),
'current_loss': loss})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Guard against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if (self.min_loss is None or current_loss <
(self.min_loss - self.min_loss * self.early_stopping_loss_threshold)):
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def get_default_head(params, weights_name, name=None):
if params.regression:
return head_lib.regression_head(
weight_column_name=weights_name,
label_dimension=params.num_outputs,
enable_centered_bias=False,
head_name=name)
else:
return head_lib.multi_class_head(
params.num_classes,
weight_column_name=weights_name,
enable_centered_bias=False,
head_name=name)
def get_model_fn(params,
graph_builder_class,
device_assigner,
feature_columns=None,
weights_name=None,
model_head=None,
keys_name=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
head_scope=None):
"""Return a model function given a way to construct a graph builder."""
if model_head is None:
model_head = get_default_head(params, weights_name)
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
if (isinstance(features, ops.Tensor) or
isinstance(features, sparse_tensor.SparseTensor)):
features = {'features': features}
if feature_columns:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
keys = None
if keys_name and keys_name in features:
keys = features.pop(keys_name)
# If we're doing eval, optionally ignore device_assigner.
# Also ignore device assigner if we're exporting (mode == INFER)
dev_assn = device_assigner
if (mode == model_fn_lib.ModeKeys.INFER or
(local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
dev_assn = None
graph_builder = graph_builder_class(params,
device_assigner=dev_assn)
logits, tree_paths, regression_variance = graph_builder.inference_graph(
features)
summary.scalar('average_tree_size', graph_builder.average_size())
# For binary classification problems, convert probabilities to logits.
# Includes hack to get around the fact that a probability might be 0 or 1.
if not params.regression and params.num_classes == 2:
class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
logits = math_ops.log(
math_ops.maximum(class_1_probs / math_ops.maximum(
1.0 - class_1_probs, EPSILON), EPSILON))
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_graph = None
training_hooks = []
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
with ops.control_dependencies([logits.op]):
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
# TensorForest's training graph isn't calculated directly from the loss
# like many other models.
def _train_fn(unused_loss):
return training_graph
model_ops = model_head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_fn,
logits=logits,
scope=head_scope)
# Ops are run in lexigraphical order of their keys. Run the resource
# clean-up op last.
all_handles = graph_builder.get_all_resource_handles()
ops_at_end = {
'9: clean up resources': control_flow_ops.group(
*[resource_variable_ops.destroy_resource_op(handle)
for handle in all_handles])}
if report_feature_importances:
ops_at_end['1: feature_importances'] = (
graph_builder.feature_importances())
training_hooks.append(TensorForestRunOpAtEndHook(ops_at_end))
if early_stopping_rounds:
training_hooks.append(
TensorForestLossHook(
early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
loss_op=model_ops.loss))
model_ops.training_hooks.extend(training_hooks)
if keys is not None:
model_ops.predictions[keys_name] = keys
if params.inference_tree_paths:
model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths
if params.regression:
model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance
return model_ops
return _model_fn
class TensorForestEstimator(estimator.Estimator):
"""An estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# Predict returns an iterable of dicts.
results = list(estimator.predict(x=x))
prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]
prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]
```
"""
def __init__(self,
params,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
early_stopping_loss_threshold=0.001,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False,
version=None,
head=None):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`. Can be overridden by version
kwarg.
config: `RunConfig` object to configure the runtime settings.
weight_column: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_column: A string naming one of the features to strip out and
pass through into the inference/eval results dict. Useful for
associating specific examples with their prediction.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default. Set to a Falsy value to disable
the default training hook.
early_stopping_loss_threshold: Percentage (as fraction) that loss must
improve by within early_stopping_rounds steps, otherwise training will
terminate.
num_trainers: Number of training jobs, which will partition trees
among them.
trainer_id: Which trainer this instance is.
report_feature_importances: If True, print out feature importances
during evaluation.
local_eval: If True, don't use a device assigner for eval. This is to
support some common setups where eval is done on a single machine, even
though training might be distributed.
version: Unused.
head: A heads_lib.Head object that calculates losses and such. If None,
one will be automatically created based on params.
Returns:
A `TensorForestEstimator` instance.
"""
super(TensorForestEstimator, self).__init__(
model_fn=get_model_fn(
params.fill(),
graph_builder_class,
device_assigner,
feature_columns=feature_columns,
model_head=head,
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
early_stopping_loss_threshold=early_stopping_loss_threshold,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def get_combined_model_fn(model_fns):
"""Get a combined model function given a list of other model fns.
The model function returned will call the individual model functions and
combine them appropriately. For:
training ops: tf.group them.
loss: average them.
predictions: concat probabilities such that predictions[*][0-C1] are the
probablities for output 1 (where C1 is the number of classes in output 1),
predictions[*][C1-(C1+C2)] are the probabilities for output 2 (where C2
is the number of classes in output 2), etc. Also stack predictions such
that predictions[i][j] is the class prediction for example i and output j.
This assumes that labels are 2-dimensional, with labels[i][j] being the
label for example i and output j, where forest j is trained using only
output j.
Args:
model_fns: A list of model functions obtained from get_model_fn.
Returns:
A ModelFnOps instance.
"""
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
model_fn_ops = []
for i in range(len(model_fns)):
with variable_scope.variable_scope('label_{0}'.format(i)):
sliced_labels = array_ops.slice(labels, [0, i], [-1, 1])
model_fn_ops.append(
model_fns[i](features, sliced_labels, mode))
training_hooks = []
for mops in model_fn_ops:
training_hooks += mops.training_hooks
predictions = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
# Flatten the probabilities into one dimension.
predictions[eval_metrics.INFERENCE_PROB_NAME] = array_ops.concat(
[mops.predictions[eval_metrics.INFERENCE_PROB_NAME]
for mops in model_fn_ops], axis=1)
predictions[eval_metrics.INFERENCE_PRED_NAME] = array_ops.stack(
[mops.predictions[eval_metrics.INFERENCE_PRED_NAME]
for mops in model_fn_ops], axis=1)
loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
loss = math_ops.reduce_sum(
array_ops.stack(
[mops.loss for mops in model_fn_ops])) / len(model_fn_ops)
train_op = None
if mode == model_fn_lib.ModeKeys.TRAIN:
train_op = control_flow_ops.group(
*[mops.train_op for mops in model_fn_ops])
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=training_hooks,
scaffold=None,
output_alternatives=None)
return _model_fn
class MultiForestMultiHeadEstimator(estimator.Estimator):
"""An estimator that can train a forest for a multi-headed problems.
This class essentially trains separate forests (each with their own
ForestHParams) for each output.
For multi-headed regression, a single-headed TensorForestEstimator can
be used to train a single model that predicts all outputs. This class can
be used to train separate forests for each output.
"""
def __init__(self,
params_list,
device_assigner=None,
model_dir=None,
feature_columns=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None,
weight_column=None,
keys_column=None,
feature_engineering_fn=None,
early_stopping_rounds=100,
num_trainers=1,
trainer_id=0,
report_feature_importances=False,
local_eval=False):
"""See TensorForestEstimator.__init__."""
model_fns = []
for i in range(len(params_list)):
params = params_list[i].fill()
model_fns.append(
get_model_fn(
params,
graph_builder_class,
device_assigner,
model_head=get_default_head(
params, weight_column, name='head{0}'.format(i)),
weights_name=weight_column,
keys_name=keys_column,
early_stopping_rounds=early_stopping_rounds,
num_trainers=num_trainers,
trainer_id=trainer_id,
report_feature_importances=report_feature_importances,
local_eval=local_eval,
head_scope='output{0}'.format(i)))
super(MultiForestMultiHeadEstimator, self).__init__(
model_fn=get_combined_model_fn(model_fns),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
| |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_luad.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_patient_luad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| |
# -*- coding: utf-8 -*-
import os
import pytest
import pandas as pd
import numpy as np
import scipy
from lifelines import (
NelsonAalenFitter,
KaplanMeierFitter,
CoxPHFitter,
CoxTimeVaryingFitter,
AalenAdditiveFitter,
WeibullFitter,
LogNormalFitter,
LogLogisticFitter,
WeibullAFTFitter,
ExponentialFitter,
AalenJohansenFitter,
BreslowFlemingHarringtonFitter,
)
from lifelines.tests.test_estimation import known_parametric_univariate_fitters
from lifelines.generate_datasets import generate_random_lifetimes, generate_hazard_rates
from lifelines.plotting import plot_lifetimes, cdf_plot, qq_plot, rmst_plot, add_at_risk_counts
from lifelines.datasets import (
load_waltons,
load_regression_dataset,
load_lcd,
load_panel_test,
load_stanford_heart_transplants,
load_rossi,
load_multicenter_aids_cohort_study,
load_nh4,
load_diabetes,
)
from lifelines.generate_datasets import cumulative_integral
from lifelines.calibration import survival_probability_calibration
@pytest.fixture()
def waltons():
return load_waltons()[["T", "E"]].iloc[:50]
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
class TestPlotting:
@pytest.fixture
def kmf(self):
return KaplanMeierFitter()
def setup_method(self, method):
pytest.importorskip("matplotlib")
from matplotlib import pyplot as plt
self.plt = plt
def test_parametric_univariate_fitters_has_hazard_plotting_methods(self, block, known_parametric_univariate_fitters):
positive_sample_lifetimes = np.arange(1, 100)
for fitter in known_parametric_univariate_fitters:
f = fitter().fit(positive_sample_lifetimes)
assert f.plot_hazard() is not None
self.plt.title("test_parametric_univariate_fitters_has_hazard_plotting_methods")
self.plt.show(block=block)
def test_parametric_univaraite_fitters_has_cumhazard_plotting_methods(self, block, known_parametric_univariate_fitters):
positive_sample_lifetimes = np.arange(1, 100)
for fitter in known_parametric_univariate_fitters:
f = fitter().fit(positive_sample_lifetimes)
assert f.plot_cumulative_hazard() is not None
self.plt.title("test_parametric_univaraite_fitters_has_cumhazard_plotting_methods")
self.plt.show(block=block)
def test_parametric_univariate_fitters_has_survival_plotting_methods(self, block, known_parametric_univariate_fitters):
positive_sample_lifetimes = np.arange(1, 100)
for fitter in known_parametric_univariate_fitters:
f = fitter().fit(positive_sample_lifetimes)
assert f.plot_survival_function() is not None
self.plt.title("test_parametric_univariate_fitters_has_survival_plotting_methods")
self.plt.show(block=block)
def test_negative_times_still_plots(self, block, kmf):
n = 40
T = np.linspace(-2, 3, n)
C = np.random.randint(2, size=n)
kmf.fit(T, C)
ax = kmf.plot()
self.plt.title("test_negative_times_still_plots")
self.plt.show(block=block)
return
def test_kmf_plotting(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
data2 = np.random.exponential(2, size=(200, 1))
data3 = np.random.exponential(4, size=(500, 1))
kmf.fit(data1, label="test label 1")
ax = kmf.plot()
kmf.fit(data2, label="test label 2")
kmf.plot(ax=ax)
kmf.fit(data3, label="test label 3")
kmf.plot(ax=ax)
self.plt.title("test_kmf_plotting")
self.plt.show(block=block)
return
def test_kmf_with_risk_counts(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
kmf.fit(data1)
kmf.plot(at_risk_counts=True)
self.plt.title("test_kmf_with_risk_counts")
self.plt.show(block=block)
def test_kmf_add_at_risk_counts_with_subplot(self, block, kmf):
T = np.random.exponential(10, size=(100))
E = np.random.binomial(1, 0.8, size=(100))
kmf.fit(T, E)
fig = self.plt.figure()
axes = fig.subplots(1, 2)
kmf.plot(ax=axes[0])
add_at_risk_counts(kmf, ax=axes[0])
kmf.plot(ax=axes[1])
self.plt.title("test_kmf_add_at_risk_counts_with_subplot")
self.plt.show(block=block)
def test_kmf_add_at_risk_counts_with_specific_rows(self, block, kmf):
T = np.random.exponential(10, size=(100))
E = np.random.binomial(1, 0.8, size=(100))
kmf.fit(T, E)
fig = self.plt.figure()
ax = fig.subplots(1, 1)
kmf.plot(ax=ax)
add_at_risk_counts(kmf, ax=ax, rows_to_show=["Censored", "At risk"])
self.plt.tight_layout()
self.plt.title("test_kmf_add_at_risk_counts_with_specific_rows")
self.plt.show(block=block)
def test_kmf_add_at_risk_counts_with_single_row_multi_groups(self, block, kmf):
T = np.random.exponential(10, size=(100))
E = np.random.binomial(1, 0.8, size=(100))
kmf_test = KaplanMeierFitter().fit(T, E, label="test")
T = np.random.exponential(15, size=(1000))
E = np.random.binomial(1, 0.6, size=(1000))
kmf_con = KaplanMeierFitter().fit(T, E, label="con")
fig = self.plt.figure()
ax = fig.subplots(1, 1)
kmf_test.plot(ax=ax)
kmf_con.plot(ax=ax)
ax.set_ylim([0.0, 1.1])
ax.set_xlim([0.0, 100])
ax.set_xlabel("Days")
ax.set_ylabel("Survival probability")
add_at_risk_counts(kmf_test, kmf_con, ax=ax, rows_to_show=["At risk"], ypos=-0.4)
self.plt.title("test_kmf_add_at_risk_counts_with_single_row_multi_groups")
self.plt.tight_layout()
self.plt.show(block=block)
def test_kmf_add_at_risk_counts_with_custom_subplot(self, block, kmf):
# https://github.com/CamDavidsonPilon/lifelines/issues/991#issuecomment-614427882
import lifelines
import matplotlib as mpl
from lifelines.datasets import load_waltons
plt = self.plt
waltons = load_waltons()
ix = waltons["group"] == "control"
img_no = 3
height = 4 * img_no
half_inch = 0.5 / height # in percent height
_fig = plt.figure(figsize=(6, height), dpi=100)
gs = mpl.gridspec.GridSpec(img_no, 1)
# plt.subplots_adjust(left=0.08, right=0.98, bottom=half_inch, top=1 - half_inch)
for i in range(img_no):
ax = plt.subplot(gs[i, 0])
kmf_control = lifelines.KaplanMeierFitter()
ax = kmf_control.fit(waltons.loc[ix]["T"], waltons.loc[ix]["E"], label="control").plot(ax=ax)
kmf_exp = lifelines.KaplanMeierFitter()
ax = kmf_exp.fit(waltons.loc[~ix]["T"], waltons.loc[~ix]["E"], label="exp").plot(ax=ax)
ax = lifelines.plotting.add_at_risk_counts(kmf_exp, kmf_control, ax=ax)
plt.subplots_adjust(hspace=0.6)
plt.title("test_kmf_add_at_risk_counts_with_custom_subplot")
plt.show(block=block)
def test_naf_plotting_with_custom_colours(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(500))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(color="r")
naf.fit(data2)
naf.plot(ax=ax, color="k")
self.plt.title("test_naf_plotting_with_custom_coloirs")
self.plt.show(block=block)
return
def test_ajf_plotting(self, block):
E = [0, 1, 1, 2, 2, 0]
T = [1, 2, 3, 4, 5, 6]
ajf = AalenJohansenFitter().fit(T, E, event_of_interest=1)
ajf.plot()
self.plt.title("test_ajf_plotting")
self.plt.show(block=block)
return
def test_ajf_plotting_no_confidence_intervals(self, block):
E = [0, 1, 1, 2, 2, 0]
T = [1, 2, 3, 4, 5, 6]
ajf = AalenJohansenFitter(calculate_variance=False).fit(T, E, event_of_interest=1)
ajf.plot(ci_show=False)
self.plt.title("test_ajf_plotting_no_confidence_intervals")
self.plt.show(block=block)
return
def test_ajf_plotting_with_add_count_at_risk(self, block):
E = [0, 1, 1, 2, 2, 0]
T = [1, 2, 3, 4, 5, 6]
ajf = AalenJohansenFitter().fit(T, E, event_of_interest=1)
ajf.plot(at_risk_counts=True)
self.plt.title("test_ajf_plotting_with_add_count_at_risk")
self.plt.show(block=block)
return
def test_aalen_additive_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline)
T[np.isinf(T)] = 10
C = np.random.binomial(1, 1.0, size=n)
X["T"] = T
X["E"] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, "T", "E")
ax = aaf.plot(iloc=slice(0, aaf.cumulative_hazards_.shape[0] - 100))
ax.set_xlabel("time")
ax.set_title("test_aalen_additive_plot")
self.plt.show(block=block)
return
def test_kmf_with_interval_censoring_plotting(self, block):
kmf = KaplanMeierFitter()
left, right = load_diabetes()["left"], load_diabetes()["right"]
kmf.fit_interval_censoring(left, right)
kmf.plot(color="r")
self.plt.show(block=block)
return
def test_aalen_additive_smoothed_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 150, 5000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline) + 0.1 * np.random.uniform(size=(n, 1))
C = np.random.binomial(1, 0.8, size=n)
X["T"] = T
X["E"] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, "T", "E")
ax = aaf.smoothed_hazards_(1).iloc[0 : aaf.cumulative_hazards_.shape[0] - 500].plot()
ax.set_xlabel("time")
ax.set_title("test_aalen_additive_smoothed_plot")
self.plt.show(block=block)
return
def test_naf_plotting_slice(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(loc=slice(0, None))
naf.fit(data2)
naf.plot(ax=ax, ci_force_lines=True, iloc=slice(100, 180))
self.plt.title("test_naf_plotting_slice")
self.plt.show(block=block)
return
def test_plot_lifetimes_calendar(self, block, waltons):
T, E = waltons["T"], waltons["E"]
current = 10
birthtimes = current * np.random.uniform(size=(T.shape[0],))
ax = plot_lifetimes(T, event_observed=E, entry=birthtimes)
assert ax is not None
self.plt.title("test_plot_lifetimes_calendar")
self.plt.show(block=block)
def test_plot_lifetimes_left_truncation(self, block, waltons):
T, E = waltons["T"], waltons["E"]
N = 20
current = 10
birthtimes = current * np.random.uniform(size=(T.shape[0],))
ax = plot_lifetimes(T, event_observed=E, entry=birthtimes, left_truncated=True)
assert ax is not None
self.plt.title("test_plot_lifetimes_left_truncation")
self.plt.show(block=block)
def test_MACS_data_with_plot_lifetimes(self, block):
df = load_multicenter_aids_cohort_study()
plot_lifetimes(
df["T"] - df["W"],
event_observed=df["D"],
entry=df["W"],
event_observed_color="#383838",
event_censored_color="#383838",
left_truncated=True,
)
self.plt.ylabel("Patient Number")
self.plt.xlabel("Years from AIDS diagnosis")
self.plt.title("test_MACS_data_with_plot_lifetimes")
self.plt.show(block=block)
def test_plot_lifetimes_relative(self, block, waltons):
T, E = waltons["T"], waltons["E"]
ax = plot_lifetimes(T, event_observed=E)
assert ax is not None
self.plt.title("test_plot_lifetimes_relative")
self.plt.show(block=block)
def test_naf_plot_cumulative_hazard(self, block):
data1 = np.random.exponential(5, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot()
naf.plot_cumulative_hazard(ax=ax, ci_force_lines=True)
self.plt.title("I should have plotted the same thing, but different styles + color!")
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwidth_2(self, block):
data1 = np.random.exponential(5, size=(2000, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=1.0, loc=slice(0, 7.0))
self.plt.title("test_naf_plot_cumulative_hazard_bandwidth_2")
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwith_1(self, block):
data1 = np.random.exponential(5, size=(2000, 1)) ** 2
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=5.0, iloc=slice(0, 1700))
self.plt.title("test_naf_plot_cumulative_hazard_bandwith_1")
self.plt.show(block=block)
return
def test_breslow_fleming_harrington_plotting(self, block):
T = 50 * np.random.exponential(1, size=(200, 1)) ** 2
bf = BreslowFlemingHarringtonFitter().fit(T)
bf.plot()
self.plt.title("test_breslow_fleming_harrington_plotting")
self.plt.show(block=block)
return
def test_weibull_plotting(self, block):
T = 50 * np.random.exponential(1, size=(200, 1)) ** 2
wf = WeibullFitter().fit(T, timeline=np.linspace(0, 5, 100))
wf.plot_hazard()
self.plt.title("test_weibull_plotting:hazard")
self.plt.show(block=block)
wf.plot_cumulative_hazard()
self.plt.title("test_weibull_plotting:cumulative_hazard")
self.plt.show(block=block)
return
def test_parametric_plotting_with_show_censors(self, block):
n = 200
T = (np.sqrt(50) * np.random.exponential(1, size=n)) ** 2
E = T < 100
T = np.minimum(T, 100)
wf = WeibullFitter().fit(T, E)
wf.plot_density(show_censors=True)
wf.plot_cumulative_density(show_censors=True)
self.plt.title("test_parametric_plotting_with_show_censors:cumulative_density")
self.plt.show(block=block)
wf.plot_survival_function(show_censors=True)
self.plt.title("test_parametric_plotting_with_show_censors:survival_function")
self.plt.show(block=block)
wf.plot_cumulative_hazard(show_censors=True)
self.plt.title("test_parametric_plotting_with_show_censors:cumulative_hazard")
self.plt.show(block=block)
wf.plot_density(show_censors=True)
self.plt.title("test_parametric_plotting_with_show_censors:density")
self.plt.show(block=block)
return
def test_label_can_be_changed_on_univariate_fitters(self, block):
T = np.random.exponential(5, size=(2000, 1)) ** 2
wf = WeibullFitter().fit(T, timeline=np.linspace(0, 5))
ax = wf.plot_hazard(label="abc")
wf.plot_cumulative_hazard(ax=ax, label="123")
self.plt.title("test_label_can_be_changed_on_univariate_fitters")
self.plt.show(block=block)
return
def test_show_censor_with_discrete_date(self, block, kmf):
T = np.random.binomial(20, 0.1, size=100)
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title("test_show_censor_with_discrete_date")
self.plt.show(block=block)
return
def test_show_censor_with_index_0(self, block, kmf):
T = np.random.binomial(20, 0.9, size=100) # lifelines should auto put a 0 in.
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title("test_show_censor_with_index_0")
self.plt.show(block=block)
return
def test_flat_style_with_custom_censor_styles(self, block, kmf):
data1 = np.random.exponential(10, size=200)
E = np.random.rand(200) < 0.8
kmf.fit(data1, E, label="test label 1")
kmf.plot(ci_force_lines=True, show_censors=True, censor_styles={"marker": "|", "mew": 1, "ms": 10})
self.plt.title("test_flat_style_no_censor")
self.plt.show(block=block)
return
def test_loglogs_plot(self, block, kmf):
data1 = np.random.exponential(10, size=200)
data2 = np.random.exponential(5, size=200)
kmf.fit(data1, label="test label 1")
ax = kmf.plot_loglogs()
kmf.fit(data2, label="test label 2")
ax = kmf.plot_loglogs(ax=ax)
self.plt.title("test_loglogs_plot")
self.plt.show(block=block)
return
def test_seaborn_doesnt_cause_kmf_plot_error(self, block, kmf, capsys):
import seaborn as sns
df = load_waltons()
T = df["T"]
E = df["E"]
kmf = KaplanMeierFitter()
kmf.fit(T, event_observed=E)
kmf.plot()
self.plt.title("test_seaborn_doesnt_cause_kmf_plot_error")
self.plt.show(block=block)
_, err = capsys.readouterr()
assert err == ""
def test_coxph_plotting(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot()
self.plt.title("test_coxph_plotting")
self.plt.show(block=block)
def test_coxph_plotting_with_hazards_ratios(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(hazard_ratios=True)
self.plt.title("test_coxph_plotting")
self.plt.show(block=block)
def test_coxph_plotting_with_subset_of_columns(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(columns=["var1", "var2"])
self.plt.title("test_coxph_plotting_with_subset_of_columns")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome(self, block):
df = load_rossi()
cp = CoxPHFitter()
cp.fit(df, "week", "arrest")
cp.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.title("test_coxph_plot_partial_effects_on_outcome")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_cumulative_hazard(self, block):
df = load_rossi()
cp = CoxPHFitter()
cp.fit(df, "week", "arrest")
cp.plot_partial_effects_on_outcome("age", [10, 50, 80], y="cumulative_hazard")
self.plt.title("test_coxph_plot_partial_effects_on_outcome")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_strata(self, block):
df = load_rossi()
cp = CoxPHFitter()
cp.fit(df, "week", "arrest", strata=["wexp"])
cp.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_strata")
self.plt.show(block=block)
def test_aft_plot_partial_effects_on_outcome_with_categorical(self, block):
df = load_rossi()
df["cat"] = np.random.choice(["a", "b", "c"], size=df.shape[0])
aft = WeibullAFTFitter()
aft.fit(df, "week", "arrest", formula="cat + age + fin")
aft.plot_partial_effects_on_outcome("cat", values=["a", "b", "c"])
self.plt.title("test_aft_plot_partial_effects_on_outcome_with_categorical")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_strata_and_complicated_dtypes(self, block):
# from https://github.com/CamDavidsonPilon/lifelines/blob/master/examples/Customer%20Churn.ipynb
churn_data = pd.read_csv(
"https://raw.githubusercontent.com/"
"treselle-systems/customer_churn_analysis/"
"master/WA_Fn-UseC_-Telco-Customer-Churn.csv"
)
churn_data = churn_data.set_index("customerID")
churn_data = churn_data.drop(["TotalCharges"], axis=1)
churn_data = churn_data.applymap(lambda x: "No" if str(x).startswith("No ") else x)
churn_data["Churn"] = churn_data["Churn"] == "Yes"
strata_cols = ["InternetService"]
cph = CoxPHFitter().fit(
churn_data,
"tenure",
"Churn",
formula="gender + SeniorCitizen + Partner + Dependents + MultipleLines + OnlineSecurity + OnlineBackup + DeviceProtection + TechSupport + Contract + PaperlessBilling + PaymentMethod + MonthlyCharges",
strata=strata_cols,
)
cph.plot_partial_effects_on_outcome("Contract", values=["Month-to-month", "One year", "Two year"], plot_baseline=False)
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_strata_and_complicated_dtypes")
self.plt.show(block=block)
def test_spline_coxph_plot_partial_effects_on_outcome_with_strata(self, block):
df = load_rossi()
cp = CoxPHFitter(baseline_estimation_method="spline", n_baseline_knots=2)
cp.fit(df, "week", "arrest", strata=["wexp"])
cp.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.title("test_spline_coxph_plot_partial_effects_on_outcome_with_strata")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_single_strata(self, block):
df = load_rossi()
cp = CoxPHFitter()
cp.fit(df, "week", "arrest", strata="paro")
cp.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_strata")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_nonnumeric_strata(self, block):
df = load_rossi()
df["strata"] = np.random.choice(["A", "B"], size=df.shape[0])
cp = CoxPHFitter()
cp.fit(df, "week", "arrest", strata="strata")
cp.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_single_strata")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_multiple_variables(self, block):
df = load_rossi()
cp = CoxPHFitter()
cp.fit(df, "week", "arrest")
cp.plot_partial_effects_on_outcome(["age", "prio"], [[10, 0], [50, 10], [80, 90]])
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_multiple_variables")
self.plt.show(block=block)
def test_coxph_plot_partial_effects_on_outcome_with_multiple_variables_and_strata(self, block):
df = load_rossi()
df["strata"] = np.random.choice(["A", "B"], size=df.shape[0])
cp = CoxPHFitter()
cp.fit(df, "week", "arrest", strata="strata")
cp.plot_partial_effects_on_outcome(["age", "prio"], [[10, 0], [50, 10], [80, 90]])
self.plt.title("test_coxph_plot_partial_effects_on_outcome_with_multiple_variables_and_strata")
self.plt.show(block=block)
def test_coxtv_plotting_with_subset_of_columns(self, block):
df = load_stanford_heart_transplants()
ctv = CoxTimeVaryingFitter()
ctv.fit(df, id_col="id", event_col="event")
ctv.plot(columns=["age", "year"])
self.plt.title("test_coxtv_plotting_with_subset_of_columns")
self.plt.show(block=block)
def test_coxtv_plotting(self, block):
df = load_stanford_heart_transplants()
ctv = CoxTimeVaryingFitter()
ctv.fit(df, id_col="id", event_col="event")
ctv.plot(fmt="o")
self.plt.title("test_coxtv_plotting")
self.plt.show(block=block)
def test_kmf_left_censorship_plots(self, block):
kmf = KaplanMeierFitter()
lcd_dataset = load_lcd()
alluvial_fan = lcd_dataset.loc[lcd_dataset["group"] == "alluvial_fan"]
basin_trough = lcd_dataset.loc[lcd_dataset["group"] == "basin_trough"]
kmf.fit_left_censoring(alluvial_fan["T"], alluvial_fan["E"], label="alluvial_fan")
ax = kmf.plot()
kmf.fit_left_censoring(basin_trough["T"], basin_trough["E"], label="basin_trough")
ax = kmf.plot(ax=ax)
self.plt.title("test_kmf_left_censorship_plots")
self.plt.show(block=block)
return
def test_aalen_additive_fit_no_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline), index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
X["T"] = T
X["E"] = np.random.binomial(1, 1, n)
X[np.isinf(X)] = 10
aaf = AalenAdditiveFitter()
aaf.fit(X, "T", "E")
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col])
self.plt.title("test_aalen_additive_fit_no_censor")
self.plt.show(block=block)
return
def test_aalen_additive_fit_with_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline), index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
T[np.isinf(T)] = 10
X["T"] = T
X["E"] = np.random.binomial(1, 0.99, n)
aaf = AalenAdditiveFitter()
aaf.fit(X, "T", "E")
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col])
self.plt.title("test_aalen_additive_fit_with_censor")
self.plt.show(block=block)
return
def test_weibull_aft_plotting(self, block):
df = load_regression_dataset()
aft = WeibullAFTFitter()
aft.fit(df, "T", "E")
aft.plot()
self.plt.tight_layout()
self.plt.title("test_weibull_aft_plotting")
self.plt.show(block=block)
def test_weibull_aft_plotting_with_subset_of_columns(self, block):
df = load_regression_dataset()
aft = WeibullAFTFitter()
aft.fit(df, "T", "E")
aft.plot(columns=["var1", "var2"])
self.plt.tight_layout()
self.plt.title("test_weibull_aft_plotting_with_subset_of_columns")
self.plt.show(block=block)
def test_weibull_aft_plot_partial_effects_on_outcome(self, block):
df = load_rossi()
aft = WeibullAFTFitter()
aft.fit(df, "week", "arrest")
aft.plot_partial_effects_on_outcome("age", [10, 50, 80])
self.plt.tight_layout()
self.plt.title("test_weibull_aft_plot_partial_effects_on_outcome")
self.plt.show(block=block)
def test_weibull_aft_plot_partial_effects_on_outcome_with_multiple_columns(self, block):
df = load_rossi()
aft = WeibullAFTFitter()
aft.fit(df, "week", "arrest")
aft.plot_partial_effects_on_outcome(["age", "prio"], [[10, 0], [50, 10], [80, 50]])
self.plt.tight_layout()
self.plt.title("test_weibull_aft_plot_partial_effects_on_outcome_with_multiple_columns")
self.plt.show(block=block)
def test_left_censorship_cdf_plots(self, block):
df = load_nh4()
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit_left_censoring(df["NH4.mg.per.L"], ~df["Censored"])
ax = cdf_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_left_censorship_cdf_plots")
self.plt.show(block=block)
def test_right_censorship_cdf_plots(self, block):
df = load_rossi()
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit(df["week"], df["arrest"])
ax = cdf_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_right_censorship_cdf_plots")
self.plt.show(block=block)
def test_qq_plot_left_censoring(self, block):
df = load_nh4()
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit_left_censoring(df["NH4.mg.per.L"], ~df["Censored"])
ax = qq_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_qq_plot_left_censoring")
self.plt.show(block=block)
def test_qq_plot_left_censoring2(self, block):
df = load_lcd()
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit_left_censoring(df["T"], df["E"])
ax = qq_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_qq_plot_left_censoring2")
self.plt.show(block=block)
def test_qq_plot_left_censoring_with_known_distribution(self, block):
N = 300
T_actual = scipy.stats.fisk(8, 0, 1).rvs(N)
MIN_0 = np.percentile(T_actual, 5)
MIN_1 = np.percentile(T_actual, 10)
T = T_actual.copy()
ix = np.random.randint(3, size=N)
T = np.where(ix == 0, np.maximum(T, MIN_0), T)
T = np.where(ix == 1, np.maximum(T, MIN_1), T)
E = T_actual == T
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit_left_censoring(T, E)
ax = qq_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_qq_plot_left_censoring_with_known_distribution")
self.plt.show(block=block)
def test_qq_plot_with_weights_and_entry(self, block):
from lifelines.utils import survival_events_from_table
df = pd.DataFrame(index=[60, 171, 263, 427, 505, 639])
df["death"] = [1, 1, 1, 0, 1, 0]
df["censored"] = [0, 0, 0, 3, 0, 330]
T, E, W = survival_events_from_table(df, observed_deaths_col="death", censored_col="censored")
wf = WeibullFitter().fit(T, E, weights=W, entry=0.0001 * np.ones_like(T))
ax = qq_plot(wf)
self.plt.suptitle("test_qq_plot_with_weights_and_entry")
self.plt.show(block=block)
def test_qq_plot_right_censoring_with_known_distribution(self, block):
N = 3000
T_actual = scipy.stats.fisk(8, 0, 1).rvs(N)
C = scipy.stats.fisk(8, 0, 1).rvs(N)
E = T_actual < C
T = np.minimum(T_actual, C)
fig, axes = self.plt.subplots(2, 2, figsize=(9, 5))
axes = axes.reshape(4)
for i, model in enumerate([WeibullFitter(), LogNormalFitter(), LogLogisticFitter(), ExponentialFitter()]):
model.fit(T, E)
ax = qq_plot(model, ax=axes[i])
assert ax is not None
self.plt.suptitle("test_qq_plot_right_censoring_with_known_distribution")
self.plt.show(block=block)
def test_rmst_plot_with_single_model(self, block):
waltons = load_waltons()
kmf = KaplanMeierFitter().fit(waltons["T"], waltons["E"])
rmst_plot(kmf, t=40.0)
self.plt.title("test_rmst_plot_with_single_model")
self.plt.show(block=block)
def test_rmst_plot_with_two_model(self, block):
waltons = load_waltons()
ix = waltons["group"] == "control"
kmf_con = KaplanMeierFitter().fit(waltons.loc[ix]["T"], waltons.loc[ix]["E"], label="control")
kmf_exp = KaplanMeierFitter().fit(waltons.loc[~ix]["T"], waltons.loc[~ix]["E"], label="exp")
rmst_plot(kmf_con, model2=kmf_exp, t=40.0)
self.plt.title("test_rmst_plot_with_two_model")
self.plt.show(block=block)
def test_hide_ci_from_legend(self, block):
waltons = load_waltons()
kmf = KaplanMeierFitter().fit(waltons["T"], waltons["E"])
ax = kmf.plot(ci_show=True, ci_only_lines=True, ci_legend=False)
ax.legend(title="Legend title")
self.plt.title("test_hide_ci_from_legend")
self.plt.show(block=block)
def test_logx_plotting(self, block):
waltons = load_waltons()
kmf = KaplanMeierFitter().fit(np.exp(waltons["T"]), waltons["E"], timeline=np.logspace(0, 40))
ax = kmf.plot(logx=True)
wf = WeibullFitter().fit(np.exp(waltons["T"]), waltons["E"], timeline=np.logspace(0, 40))
wf.plot_survival_function(logx=True, ax=ax)
self.plt.title("test_logx_plotting")
self.plt.show(block=block)
def test_survival_probability_calibration(self, block):
rossi = load_rossi()
cph = CoxPHFitter().fit(rossi, "week", "arrest")
survival_probability_calibration(cph, rossi, 25)
self.plt.title("test_survival_probability_calibration")
self.plt.show(block=block)
def test_survival_probability_calibration_on_out_of_sample_data(self, block):
rossi = load_rossi()
rossi = rossi.sample(frac=1.0)
cph = CoxPHFitter().fit(rossi.loc[:300], "week", "arrest")
survival_probability_calibration(cph, rossi.loc[300:], 25)
self.plt.title("test_survival_probability_calibration_on_out_of_sample_data")
self.plt.show(block=block)
def test_at_risk_looks_right_when_scales_are_magnitudes_of_order_larger(self, block):
T1 = list(map(lambda v: v.right, pd.cut(np.arange(32000), 100, retbins=False)))
T2 = list(map(lambda v: v.right, pd.cut(np.arange(9000), 100, retbins=False)))
T3 = list(map(lambda v: v.right, pd.cut(np.arange(900), 100, retbins=False)))
T4 = list(map(lambda v: v.right, pd.cut(np.arange(90), 100, retbins=False)))
T5 = list(map(lambda v: v.right, pd.cut(np.arange(9), 100, retbins=False)))
kmf1 = KaplanMeierFitter().fit(T1, label="Category A")
kmf2 = KaplanMeierFitter().fit(T2, label="Category")
kmf3 = KaplanMeierFitter().fit(T3, label="CatB")
kmf4 = KaplanMeierFitter().fit(T4, label="Categ")
kmf5 = KaplanMeierFitter().fit(T5, label="Categowdary B")
ax = kmf1.plot()
ax = kmf2.plot(ax=ax)
ax = kmf3.plot(ax=ax)
ax = kmf4.plot(ax=ax)
ax = kmf5.plot(ax=ax)
add_at_risk_counts(kmf1, kmf2, kmf3, kmf5, ax=ax)
self.plt.title("test_at_risk_looks_right_when_scales_are_magnitudes_of_order_larger")
self.plt.tight_layout()
self.plt.show(block=block)
def test_at_risk_looks_right_when_scales_are_magnitudes_of_order_larger_single_attribute(self, block):
T1 = list(map(lambda v: v.right, pd.cut(np.arange(32000), 100, retbins=False)))
T2 = list(map(lambda v: v.right, pd.cut(np.arange(9000), 100, retbins=False)))
T3 = list(map(lambda v: v.right, pd.cut(np.arange(900), 100, retbins=False)))
T4 = list(map(lambda v: v.right, pd.cut(np.arange(90), 100, retbins=False)))
T5 = list(map(lambda v: v.right, pd.cut(np.arange(9), 100, retbins=False)))
kmf1 = KaplanMeierFitter().fit(T1, label="Category A")
kmf2 = KaplanMeierFitter().fit(T2, label="Category")
kmf3 = KaplanMeierFitter().fit(T3, label="CatB")
kmf4 = KaplanMeierFitter().fit(T4, label="Categ")
kmf5 = KaplanMeierFitter().fit(T5, label="Categowdary B")
ax = kmf1.plot()
ax = kmf2.plot(ax=ax)
ax = kmf3.plot(ax=ax)
ax = kmf4.plot(ax=ax)
ax = kmf5.plot(ax=ax)
add_at_risk_counts(kmf1, kmf2, kmf3, kmf4, kmf5, ax=ax, rows_to_show=["At risk"])
self.plt.title("test_at_risk_looks_right_when_scales_are_magnitudes_of_order_larger")
self.plt.tight_layout()
self.plt.show(block=block)
| |
import numpy as np
import os
import pickle
import pandas as pd
from scipy.io import loadmat
from pylearn2.format.target_format import OneHotFormatter
from scipy.signal import butter, filtfilt
from sklearn import preprocessing
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix, DefaultViewConverter
class CHBMIT(DenseDesignMatrix):
# These are representative channel MATLAB index (it needs to be subtracted by 1 before using in python)
rep_channel_matlab_idx = {
1: np.asarray([7,8,11,13,14,21,22]),
3: np.asarray([3,4,6,16,19]),
5: np.asarray([4,5,7,8,11,12,17,18]),
8: np.asarray([6,7,8,10,11,17,18]),
10: np.asarray([2,3,19,20,21]),
20: np.asarray([1,2,3,19,20,21,24,25,26,27,28])
}
def __init__(self, patient_id, which_set, preprocessor_path, data_dir, transform, window_size, batch_size,
specified_files=None, leave_one_out_file=None, axes=('b', 0, 1, 'c'), default_seed=0):
"""
The CHBMIT dataset customized for leave-one-file-out cross validation.
Parameters
----------
patient_id : int
Patient ID.
which_set : string
Name used to specify which partition of the dataset to be loaded (e.g., 'train', 'valid', or 'test').
If not specified, all data will be loaded.
preprocessor_path : string
File path to store the scaler for pre-processing the EEG data.
data_dir : string
Directory that store the source EEG data.
transform : string
Specify how to transform the data. ('multiple_channels' | 'single_channel')
window_size : int
Size of each sample.
batch_size : int
Size of the batch, used for zero-padding to make the the number samples dividable by the batch size.
specified_files : dictionary
Dictionary to specified which files are used for training, validation and testing.
leave_one_out_file : int
Index of the withheld file.
axes : tuple
axes of the DenseDesignMatrix.
default_seed : int, optional
Seed for random.
For preprocessing, see more in
https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/datasets/preprocessing.py
For customizing dataset, see more in
https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/icml_2013_wrepl/emotions/emotions_dataset.py
"""
self.patient_id = patient_id
self.data_dir = data_dir
self.preprocessor_path = preprocessor_path
self.window_size = window_size
self.n_classes = 2
self.default_seed = default_seed
self.transform = transform
self.specified_files = specified_files
self.leave_one_out_file = leave_one_out_file
self.batch_size = batch_size
raw_X, raw_y = self._load_data(which_set=which_set)
self.raw_X = raw_X
self.raw_y = raw_y
# Filter representative channels
if not(self.rep_channel_matlab_idx.get(patient_id) is None):
# Map the representative MATLAB index to python index
# Also the raw_data read from the .mat file has already removed inactive channels
# So we need to search for the match original index with MATLAB index
# Then transfer to the python index
self.rep_channel_python_idx = np.empty(0, dtype=int)
for ch in self.rep_channel_matlab_idx[patient_id]:
if ch in self.used_channel_matlab_idx:
ch_python_idx = np.where(ch == self.used_channel_matlab_idx)[0]
self.rep_channel_python_idx = np.append(self.rep_channel_python_idx, ch_python_idx)
else:
raise Exception('There is no representative channel ' + str(ch) + ' in the input data.')
assert np.all(self.used_channel_matlab_idx[self.rep_channel_python_idx] ==
self.rep_channel_matlab_idx[patient_id])
raw_X = raw_X[self.rep_channel_python_idx, :]
self.n_channels = self.rep_channel_python_idx.size
print 'Used channel MATLAB index:', self.used_channel_matlab_idx
print 'Representative channel MATLAB index:', self.rep_channel_matlab_idx[patient_id]
print 'Representative channel Python index:', self.rep_channel_python_idx
self.sample_shape = [self.window_size, 1, self.n_channels]
self.sample_size = np.prod(self.sample_shape)
# Preprocessing
if which_set == 'train':
scaler = preprocessing.StandardScaler()
scaler = scaler.fit(raw_X.transpose())
with open(self.preprocessor_path, 'w') as f:
pickle.dump(scaler, f)
scaled_X = scaler.transform(raw_X.transpose()).transpose()
else:
with open(self.preprocessor_path) as f:
scaler = pickle.load(f)
scaled_X = scaler.transform(raw_X.transpose()).transpose()
# Transform data into format usable by the network
if self.transform == 'multiple_channels':
X, y, view_converter = self._transform_multi_channel_data(X=scaled_X, y=raw_y)
elif self.transform == 'single_channel':
X, y, view_converter = self._transform_single_channel_data(X=scaled_X, y=raw_y)
else:
raise Exception('Invalid transform mode.')
# Zero-padding if the batch size is not compatible
extra = (batch_size - X.shape[0]) % batch_size
assert (X.shape[0] + extra) % batch_size == 0
if extra > 0:
X = np.concatenate((X, np.zeros((extra, X.shape[1]),
dtype=float)),
axis=0)
y = np.concatenate((y, np.zeros((extra, y.shape[1]),
dtype=int)),
axis=0)
assert X.shape[0] % batch_size == 0
assert y.size % batch_size == 0
# Initialize DenseDesignMatrix
DenseDesignMatrix.__init__(self, X=X, y=y,
view_converter=view_converter,
axes=('b', 0, 1, 'c'))
def _load_data(self, which_set):
# Get seizure files
seizure_files_df = pd.read_table(os.path.join(self.data_dir, 'RECORDS-WITH-SEIZURES.txt'),
sep=' ', names=['filename', 'period'], header=None)
if self.patient_id <10:
search_str = 'chb0' + str(self.patient_id)
else:
search_str = 'chb' + str(self.patient_id)
seizure_files = seizure_files_df['filename'][seizure_files_df['filename'].str.contains(search_str)]
seizure_files = seizure_files.str.replace('.edf', '_mod.mat', case=False).values
print 'Seizure files\n', seizure_files
# Train, cv and test files
if not (self.specified_files is None):
train_files = seizure_files[self.specified_files['train_files']]
cv_files = seizure_files[self.specified_files['cv_files']]
test_files = seizure_files[self.specified_files['test_files']]
elif not (self.leave_one_out_file is None):
train_files = np.setdiff1d(seizure_files, seizure_files[self.leave_one_out_file])
cv_files = seizure_files[self.leave_one_out_file:self.leave_one_out_file+1]
test_files = seizure_files[self.leave_one_out_file:self.leave_one_out_file+1]
else:
np.random.seed(self.default_seed)
permute_files = np.random.permutation(seizure_files)
train_files = permute_files[:-2]
cv_files = permute_files[-2:-1]
test_files = permute_files[-1:]
print 'Train files\n', train_files
print 'CV files\n', cv_files
print 'Test files\n', test_files
print ''
if which_set == 'train':
print("Loading training data...")
files = train_files
elif which_set == 'valid':
print("Loading validation data...")
files = cv_files
elif which_set == 'test':
print("Loading test data...")
files = test_files
else:
raise ("Invalid set")
print files
sampling_rate = -1
n_channels = -1
X = None
y = np.empty(0, dtype=int)
seizure_seconds = np.empty(0, dtype=int)
total_seconds = 0
channel_labels = None
used_channel_matlab_idx = None
for f in files:
mat = loadmat(self.data_dir + '/' + f)
# Number of channels
if n_channels == -1:
n_channels = mat['X'].shape[0]
assert n_channels == mat['X'].shape[0]
# Channel labels
if channel_labels is None:
channel_labels = np.asarray([lb[0][0] for lb in mat['labels']])
assert np.all(channel_labels == np.asarray([lb[0][0] for lb in mat['labels']]))
# Channel index (MATLAB index, start from 1, not 0) used to filter active channels from the source files
if used_channel_matlab_idx is None:
used_channel_matlab_idx = mat['used_channel_idx'][0]
assert np.all(used_channel_matlab_idx == mat['used_channel_idx'][0])
# Sampling rate
if sampling_rate == -1:
sampling_rate = mat['sampling_rate'][0, 0]
assert sampling_rate == mat['sampling_rate'][0, 0]
# EEG data
if X is None:
X = mat['X']
else:
X = np.concatenate((X, mat['X']), axis=1)
# Seizure labels
y = np.append(y, mat['y'][0, :])
# Store index of seizure seconds
seizure_seconds = np.append(seizure_seconds, mat['seizure_second'][0, :] + total_seconds)
# Collect total seconds
total_seconds = total_seconds + (mat['X'].shape[1] / (sampling_rate * 1.0))
assert total_seconds == X.shape[1] / sampling_rate
# Zero-padding if the window size is not compatible
extra = (self.window_size - X.shape[1]) % self.window_size
assert (X.shape[1] + extra) % self.window_size == 0
if extra > 0:
X = np.concatenate((X, np.zeros((X.shape[0], extra),
dtype=float)),
axis=1)
y = np.append(y, np.zeros(extra, dtype=int))
assert X.shape[1] % self.window_size == 0
assert y.size % self.window_size == 0
# Store metadata
self.sampling_rate = sampling_rate
self.n_channels = n_channels
self.seizure_seconds = seizure_seconds
self.total_seconds = total_seconds
self.channel_labels = channel_labels
self.used_channel_matlab_idx = used_channel_matlab_idx
print 'Seizure seconds:', self.seizure_seconds
return X, y
def _partition_data(self, X, y, partition_size):
partition_size = max(1, partition_size)
X_parts = np.asarray([X[:, i:i + partition_size] for i in range(0, X.shape[1], partition_size)])
y_parts = np.asarray([y[i:i + partition_size] for i in range(0, y.size, partition_size)])
return X_parts, y_parts
def _transform_multi_channel_data(self, X, y):
# Data partitioning
parted_X, parted_y = self._partition_data(X=X, y=y, partition_size=self.window_size)
transposed_X = np.transpose(parted_X, [0, 2, 1])
converted_X = np.reshape(transposed_X, (transposed_X.shape[0],
transposed_X.shape[1],
1,
transposed_X.shape[2]))
# Create view converter
view_converter = DefaultViewConverter(shape=self.sample_shape,
axes=('b', 0, 1, 'c'))
# Convert data into a design matrix
view_converted_X = view_converter.topo_view_to_design_mat(converted_X)
assert np.all(converted_X == view_converter.design_mat_to_topo_view(view_converted_X))
# Format the target into proper format
sum_y = np.sum(parted_y, axis=1)
sum_y[sum_y > 0] = 1
one_hot_formatter = OneHotFormatter(max_labels=self.n_classes)
hot_y = one_hot_formatter.format(sum_y)
return view_converted_X, hot_y, view_converter
def _transform_single_channel_data(self, X, y):
windowed_X = np.reshape(X, (-1, self.window_size))
windowed_y = np.reshape(y, (-1, self.window_size))
# Format the target into proper format
sum_y = np.sum(windowed_y, axis=1)
sum_y[sum_y > 0] = 1
# Duplicate the labels for all channels
dup_y = np.tile(sum_y, self.n_channels)
one_hot_formatter = OneHotFormatter(max_labels=self.n_classes)
hot_y = one_hot_formatter.format(dup_y)
return windowed_X, hot_y, None
if __name__ == '__main__':
dataset = CHBMIT(patient_id=1,
which_set='train',
preprocessor_path='../models/scaler.pkl',
data_dir='/Users/akara/Workspace/data/chbmit',
transform='single_channel',
window_size=256,
batch_size=20)
# dataset = CHBMIT(patient_id=1,
# which_set='train',
# preprocessor_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# transform='single_channel',
# specified_files={
# 'train_files': np.asarray([0,1,2,3,4,5]),
# 'cv_files': np.asarray([6]),
# 'test_files': np.asarray([6])
# },
# window_size=256,
# batch_size=20)
# dataset = CHBMIT(patient_id=1,
# which_set='train',
# preprocessor_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# transform='single_channel',
# leave_one_out_file=4,
# window_size=256,
# batch_size=20)
# from pylearn2ext.chbmit_eeg_dataset import ChbMitDatasetSDAE
# dataset2 = ChbMitDatasetSDAE(patient_id=1,
# which_set='train',
# scaler_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# sample_size_second=1,
# batch_size=20)
#
# assert np.all(dataset.X == dataset2.X)
| |
import calendar
import math
import re
from datetime import datetime, timedelta, date
import pandas as pd
import pytz
from catalyst.exchange.exchange_errors import InvalidHistoryFrequencyError, \
InvalidHistoryFrequencyAlias
def get_date_from_ms(ms):
"""
The date from the number of miliseconds from the epoch.
Parameters
----------
ms: int
Returns
-------
datetime
"""
return datetime.fromtimestamp(ms / 1000.0)
def get_seconds_from_date(date):
"""
The number of seconds from the epoch.
Parameters
----------
date: datetime
Returns
-------
int
"""
epoch = datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return int((date - epoch).total_seconds())
def get_delta(periods, data_frequency):
"""
Get a time delta based on the specified data frequency.
Parameters
----------
periods: int
data_frequency: str
Returns
-------
timedelta
"""
return timedelta(minutes=periods) \
if data_frequency == 'minute' else timedelta(days=periods)
def get_periods_range(freq, start_dt=None, end_dt=None, periods=None):
"""
Get a date range for the specified parameters.
Parameters
----------
start_dt: datetime
end_dt: datetime
freq: str
Returns
-------
DateTimeIndex
"""
if freq == 'minute':
freq = 'T'
elif freq == 'daily':
freq = 'D'
if start_dt is not None and end_dt is not None and periods is None:
return pd.date_range(start_dt, end_dt, freq=freq)
elif periods is not None and (start_dt is not None or end_dt is not None):
_, unit_periods, unit, _ = get_frequency(freq)
adj_periods = periods * unit_periods
# TODO: standardize time aliases to avoid any mapping
unit = 'd' if unit == 'D' else 'h' if unit == 'H' else 'm'
delta = pd.Timedelta(adj_periods, unit)
if start_dt is not None:
return pd.date_range(
start=start_dt,
end=start_dt + delta,
freq=freq,
closed='left',
)
else:
return pd.date_range(
start=end_dt - delta,
end=end_dt,
freq=freq,
)
else:
raise ValueError(
'Choose only two parameters between start_dt, end_dt '
'and periods.'
)
def get_periods(start_dt, end_dt, freq):
"""
The number of periods in the specified range.
Parameters
----------
start_dt: datetime
end_dt: datetime
freq: str
Returns
-------
int
"""
return len(get_periods_range(start_dt=start_dt, end_dt=end_dt, freq=freq))
def get_start_dt(end_dt, bar_count, data_frequency, include_first=True):
"""
The start date based on specified end date and data frequency.
Parameters
----------
end_dt: datetime
bar_count: int
data_frequency: str
include_first
Returns
-------
datetime
"""
periods = bar_count
if periods > 1:
delta = get_delta(periods, data_frequency)
start_dt = end_dt - delta
if not include_first:
start_dt += get_delta(1, data_frequency)
else:
start_dt = end_dt
return start_dt
def get_period_label(dt, data_frequency):
"""
The period label for the specified date and frequency.
Parameters
----------
dt: datetime
data_frequency: str
Returns
-------
str
"""
if data_frequency == 'minute':
return '{}-{:02d}'.format(dt.year, dt.month)
else:
return '{}'.format(dt.year)
def get_month_start_end(dt, first_day=None, last_day=None):
"""
The first and last day of the month for the specified date.
Parameters
----------
dt: datetime
first_day: datetime
last_day: datetime
Returns
-------
datetime, datetime
"""
month_range = calendar.monthrange(dt.year, dt.month)
if first_day:
month_start = first_day
else:
month_start = pd.to_datetime(datetime(
dt.year, dt.month, 1, 0, 0, 0, 0
), utc=True)
if last_day:
month_end = last_day
else:
month_end = pd.to_datetime(datetime(
dt.year, dt.month, month_range[1], 23, 59, 0, 0
), utc=True)
if month_end > pd.Timestamp.utcnow():
month_end = pd.Timestamp.utcnow().floor('1D')
return month_start, month_end
def get_year_start_end(dt, first_day=None, last_day=None):
"""
The first and last day of the year for the specified date.
Parameters
----------
dt: datetime
first_day: datetime
last_day: datetime
Returns
-------
datetime, datetime
"""
year_start = first_day if first_day \
else pd.to_datetime(date(dt.year, 1, 1), utc=True)
year_end = last_day if last_day \
else pd.to_datetime(date(dt.year, 12, 31), utc=True)
if year_end > pd.Timestamp.utcnow():
year_end = pd.Timestamp.utcnow().floor('1D')
return year_start, year_end
def get_frequency(freq, data_frequency=None, supported_freqs=['D', 'H', 'T']):
"""
Takes an arbitrary candle size (e.g. 15T) and converts to the lowest
common denominator supported by the data bundles (e.g. 1T). The data
bundles only support 1T and 1D frequencies. If another frequency
is requested, Catalyst must request the underlying data and resample.
Notes
-----
We're trying to use Pandas convention for frequency aliases.
Parameters
----------
freq: str
data_frequency: str
Returns
-------
str, int, str, str
"""
if data_frequency is None:
data_frequency = 'daily' if freq.upper().endswith('D') else 'minute'
if freq == 'minute':
unit = 'T'
candle_size = 1
elif freq == 'daily':
unit = 'D'
candle_size = 1
else:
freq_match = re.match(r'([0-9].*)?(m|M|d|D|h|H|T)', freq, re.M | re.I)
if freq_match:
candle_size = int(freq_match.group(1)) if freq_match.group(1) \
else 1
unit = freq_match.group(2)
else:
raise InvalidHistoryFrequencyError(frequency=freq)
# TODO: some exchanges support H and W frequencies but not bundles
# Find a way to pass-through these parameters to exchanges
# but resample from minute or daily in backtest mode
# see catalyst/exchange/ccxt/ccxt_exchange.py:242 for mapping between
# Pandas offet aliases (used by Catalyst) and the CCXT timeframes
if unit.lower() == 'd':
unit = 'D'
alias = '{}D'.format(candle_size)
if data_frequency == 'minute':
data_frequency = 'daily'
elif unit.lower() == 'm' or unit == 'T':
unit = 'T'
alias = '{}T'.format(candle_size)
data_frequency = 'minute'
elif unit.lower() == 'h':
data_frequency = 'minute'
if 'H' in supported_freqs:
unit = 'H'
alias = '{}H'.format(candle_size)
else:
candle_size = candle_size * 60
alias = '{}T'.format(candle_size)
else:
raise InvalidHistoryFrequencyAlias(freq=freq)
return alias, candle_size, unit, data_frequency
def from_ms_timestamp(ms):
return pd.to_datetime(ms, unit='ms', utc=True)
def get_epoch():
return pd.to_datetime('1970-1-1', utc=True)
def get_candles_number_from_minutes(unit, candle_size, minutes):
"""
Get the number of bars needed for the given time interval
in minutes.
Notes
-----
Supports only "T", "D" and "H" units
Parameters
----------
unit: str
candle_size : int
minutes: int
Returns
-------
int
"""
if unit == "T":
res = (float(minutes) / candle_size)
elif unit == "H":
res = (minutes / 60.0) / candle_size
else: # unit == "D"
res = (minutes / 1440.0) / candle_size
return int(math.ceil(res))
| |
"""
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
from homeassistant.components.nest import (
DATA_NEST, SIGNAL_NEST_UPDATE, DOMAIN as NEST_DOMAIN)
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_ECO, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW,
SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE)
from homeassistant.const import (
TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
NEST_MODE_HEAT_COOL = 'heat-cool'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nest thermostat.
No longer in use.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Nest climate device based on a config entry."""
temp_unit = hass.config.units.temperature_unit
thermostats = await hass.async_add_job(hass.data[DATA_NEST].thermostats)
all_devices = [NestThermostat(structure, device, temp_unit)
for structure, device in thermostats]
async_add_entities(all_devices, True)
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self.device = device
self._fan_list = [STATE_ON, STATE_AUTO]
# Set the default supported features
self._support_flags = (SUPPORT_TARGET_TEMPERATURE |
SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE)
# Not all nest devices support cooling and heating remove unused
self._operation_list = [STATE_OFF]
# Add supported nest thermostat features
if self.device.can_heat:
self._operation_list.append(STATE_HEAT)
if self.device.can_cool:
self._operation_list.append(STATE_COOL)
if self.device.can_heat and self.device.can_cool:
self._operation_list.append(STATE_AUTO)
self._support_flags = (self._support_flags |
SUPPORT_TARGET_TEMPERATURE_HIGH |
SUPPORT_TARGET_TEMPERATURE_LOW)
self._operation_list.append(STATE_ECO)
# feature of device
self._has_fan = self.device.has_fan
if self._has_fan:
self._support_flags = (self._support_flags | SUPPORT_FAN_MODE)
# data attributes
self._away = None
self._location = None
self._name = None
self._humidity = None
self._target_temperature = None
self._temperature = None
self._temperature_scale = None
self._mode = None
self._fan = None
self._eco_temperature = None
self._is_locked = None
self._locked_temperature = None
self._min_temperature = None
self._max_temperature = None
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update device state."""
await self.async_update_ha_state(True)
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE,
async_update_state)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def unique_id(self):
"""Return unique ID for this device."""
return self.device.serial
@property
def device_info(self):
"""Return information about the device."""
return {
'identifiers': {
(NEST_DOMAIN, self.device.device_id),
},
'name': self.device.name_long,
'manufacturer': 'Nest Labs',
'model': "Thermostat",
'sw_version': self.device.software_version,
}
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_scale
@property
def current_temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
return self._mode
if self._mode == NEST_MODE_HEAT_COOL:
return STATE_AUTO
return STATE_UNKNOWN
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._mode not in (NEST_MODE_HEAT_COOL, STATE_ECO):
return self._target_temperature
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self._mode == STATE_ECO:
return self._eco_temperature[0]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[0]
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self._mode == STATE_ECO:
return self._eco_temperature[1]
if self._mode == NEST_MODE_HEAT_COOL:
return self._target_temperature[1]
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
import nest
temp = None
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if self._mode == NEST_MODE_HEAT_COOL:
if target_temp_low is not None and target_temp_high is not None:
temp = (target_temp_low, target_temp_high)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
try:
if temp is not None:
self.device.target = temp
except nest.nest.APIError as api_error:
_LOGGER.error("An error occurred while setting temperature: %s",
api_error)
# restore target temperature
self.schedule_update_ha_state(True)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
device_mode = operation_mode
elif operation_mode == STATE_AUTO:
device_mode = NEST_MODE_HEAT_COOL
else:
device_mode = STATE_OFF
_LOGGER.error(
"An error occurred while setting device mode. "
"Invalid operation mode: %s", operation_mode)
self.device.mode = device_mode
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self._has_fan:
# Return whether the fan is on
return STATE_ON if self._fan else STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
if self._has_fan:
return self._fan_list
return None
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
if self._has_fan:
self.device.fan = fan_mode.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
return self._min_temperature
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
return self._max_temperature
def update(self):
"""Cache value from Python-nest."""
self._location = self.device.where
self._name = self.device.name
self._humidity = self.device.humidity
self._temperature = self.device.temperature
self._mode = self.device.mode
self._target_temperature = self.device.target
self._fan = self.device.fan
self._away = self.structure.away == 'away'
self._eco_temperature = self.device.eco_temperature
self._locked_temperature = self.device.locked_temperature
self._min_temperature = self.device.min_temperature
self._max_temperature = self.device.max_temperature
self._is_locked = self.device.is_locked
if self.device.temperature_scale == 'C':
self._temperature_scale = TEMP_CELSIUS
else:
self._temperature_scale = TEMP_FAHRENHEIT
| |
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import exceptions
from neutron import context
from neutron import manager
from neutron.plugins.common import constants
from neutron_lbaas.db.loadbalancer import loadbalancer_db
from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client
from neutron_lbaas.services.loadbalancer.drivers.netscaler \
import netscaler_driver
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer
LBAAS_DRIVER_CLASS = ('neutron_lbaas.services.loadbalancer.drivers'
'.netscaler.netscaler_driver'
'.NetScalerPluginDriver')
NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers'
'.netscaler.ncc_client'
'.NSClient')
LBAAS_PROVIDER_NAME = 'netscaler'
LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' %
(LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS))
#Test data
TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d'
TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee'
TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c'
TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180'
TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e'
TESTPOOL_SNATIP_ADDRESS = '10.0.0.50'
TESTPOOL_SNAT_PORT = {
'id': TESTPOOL_PORT_ID,
'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}]
}
TESTVIP_IP = '10.0.1.100'
TESTMEMBER_IP = '10.0.0.5'
class TestLoadBalancerPluginBase(test_db_loadbalancer
.LoadBalancerPluginDbTestCase):
def setUp(self):
# mock the NSClient class (REST client)
client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start()
#mock the REST methods of the NSClient class
self.client_mock_instance = client_mock_cls.return_value
self.create_resource_mock = self.client_mock_instance.create_resource
self.create_resource_mock.side_effect = mock_create_resource_func
self.update_resource_mock = self.client_mock_instance.update_resource
self.update_resource_mock.side_effect = mock_update_resource_func
self.retrieve_resource_mock = (self.client_mock_instance
.retrieve_resource)
self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func
self.remove_resource_mock = self.client_mock_instance.remove_resource
self.remove_resource_mock.side_effect = mock_remove_resource_func
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=LBAAS_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestNetScalerPluginDriver(TestLoadBalancerPluginBase):
"""Unit tests for the NetScaler LBaaS driver module."""
def setUp(self):
mock.patch.object(netscaler_driver, 'LOG').start()
super(TestNetScalerPluginDriver, self).setUp()
self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = (
netscaler_driver.NetScalerPluginDriver(self.plugin_instance))
self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME]
self.context = context.get_admin_context()
def test_create_vip(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testvip = self._build_testvip_contents(subnet['subnet'],
pool['pool'])
expectedvip = self._build_expectedvip_contents(
testvip,
subnet['subnet'])
# mock the LBaaS plugin update_status().
self._mock_update_status()
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test
self.driver.create_vip(self.context, testvip)
# First, assert that create_resource was called once
# with expected params.
self.create_resource_mock.assert_called_once_with(
None,
netscaler_driver.VIPS_RESOURCE,
netscaler_driver.VIP_RESOURCE,
expectedvip)
#Finally, assert that the vip object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
expectedvip['id'],
constants.ACTIVE)
def test_create_vip_without_connection(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testvip = self._build_testvip_contents(subnet['subnet'],
pool['pool'])
expectedvip = self._build_expectedvip_contents(
testvip,
subnet['subnet'])
errorcode = ncc_client.NCCException.CONNECTION_ERROR
self.create_resource_mock.side_effect = (
ncc_client.NCCException(errorcode))
# mock the plugin's update_status()
self._mock_update_status()
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_vip(self.context, testvip)
# First, assert that update_resource was called once
# with expected params.
self.create_resource_mock.assert_called_once_with(
None,
netscaler_driver.VIPS_RESOURCE,
netscaler_driver.VIP_RESOURCE,
expectedvip)
#Finally, assert that the vip object is in ERROR state
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
testvip['id'],
constants.ERROR)
def test_update_vip(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.vip(pool=pool, subnet=subnet) as vip:
updated_vip = self._build_updated_testvip_contents(
vip['vip'],
subnet['subnet'],
pool['pool'])
expectedvip = self._build_updated_expectedvip_contents(
updated_vip,
subnet['subnet'],
pool['pool'])
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test
self.driver.update_vip(self.context, updated_vip,
updated_vip)
vip_resource_path = "%s/%s" % (
(netscaler_driver.VIPS_RESOURCE,
vip['vip']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(
None,
vip_resource_path,
netscaler_driver.VIP_RESOURCE,
expectedvip))
#Finally, assert that the vip object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Vip,
vip['vip']['id'],
constants.ACTIVE)
def test_delete_vip(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with contextlib.nested(
self.vip(pool=pool, subnet=subnet),
mock.patch.object(self.driver.plugin, '_delete_db_vip')
) as (vip, mock_delete_db_vip):
mock_delete_db_vip.return_value = None
#reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test
self.driver.delete_vip(self.context, vip['vip'])
vip_resource_path = "%s/%s" % (
(netscaler_driver.VIPS_RESOURCE,
vip['vip']['id']))
# Assert that remove_resource() was called once
# with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, vip_resource_path))
def test_create_pool(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.return_value = TESTPOOL_SNAT_PORT
testpool = self._build_testpool_contents(subnet['subnet'])
expectedpool = self._build_expectedpool_contents(testpool,
subnet['subnet'])
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# mock the plugin's update_status()
self._mock_update_status()
# execute the method under test
self.driver.create_pool(self.context, testpool)
# First, assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(None,
netscaler_driver.POOLS_RESOURCE,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
expectedpool['id'],
constants.ACTIVE)
def test_create_pool_with_error(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.return_value = TESTPOOL_SNAT_PORT
errorcode = ncc_client.NCCException.CONNECTION_ERROR
self.create_resource_mock.side_effect = (ncc_client
.NCCException(errorcode))
testpool = self._build_testpool_contents(subnet['subnet'])
expectedpool = self._build_expectedpool_contents(testpool,
subnet['subnet'])
# mock the plugin's update_status()
self._mock_update_status()
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_pool(self.context, testpool)
# Also assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(None,
netscaler_driver.POOLS_RESOURCE,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is in ERROR state
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
expectedpool['id'],
constants.ERROR)
def test_create_pool_with_snatportcreate_failure(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'),
mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'),
mock.patch.object(self.driver.plugin._core_plugin, 'create_port')
) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port):
mock_get_subnet.return_value = subnet['subnet']
mock_get_ports.return_value = None
mock_create_port.side_effect = exceptions.NeutronException()
testpool = self._build_testpool_contents(subnet['subnet'])
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.assertRaises(exceptions.NeutronException,
self.driver.create_pool,
self.context, testpool)
def test_update_pool(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
updated_pool = self._build_updated_testpool_contents(
pool['pool'],
subnet['subnet'])
expectedpool = self._build_updated_expectedpool_contents(
updated_pool,
subnet['subnet'])
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test.
self.driver.update_pool(self.context, pool['pool'],
updated_pool)
pool_resource_path = "%s/%s" % (
(netscaler_driver.POOLS_RESOURCE,
pool['pool']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(None,
pool_resource_path,
netscaler_driver.POOL_RESOURCE,
expectedpool))
#Finally, assert that the pool object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Pool,
pool['pool']['id'],
constants.ACTIVE)
def test_delete_pool(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with contextlib.nested(
self.pool(provider=LBAAS_PROVIDER_NAME),
mock.patch.object(self.driver.plugin._core_plugin,
'delete_port'),
mock.patch.object(self.driver.plugin._core_plugin,
'get_ports'),
mock.patch.object(self.driver.plugin,
'get_pools'),
mock.patch.object(self.driver.plugin,
'_delete_db_pool')
) as (pool, mock_delete_port, mock_get_ports, mock_get_pools,
mock_delete_db_pool):
mock_delete_port.return_value = None
mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}]
mock_get_pools.return_value = []
mock_delete_db_pool.return_value = None
#reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test.
self.driver.delete_pool(self.context, pool['pool'])
pool_resource_path = "%s/%s" % (
(netscaler_driver.POOLS_RESOURCE,
pool['pool']['id']))
# Assert that delete_resource was called
# once with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, pool_resource_path))
def test_create_member(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin,
'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testmember = self._build_testmember_contents(pool['pool'])
expectedmember = self._build_expectedmember_contents(
testmember)
# mock the plugin's update_status()
self._mock_update_status()
#reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_member(self.context, testmember)
# First, assert that create_resource was called once
# with expected params.
(self.create_resource_mock
.assert_called_once_with(
None,
netscaler_driver.POOLMEMBERS_RESOURCE,
netscaler_driver.POOLMEMBER_RESOURCE,
expectedmember))
#Finally, assert that the member object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Member,
expectedmember['id'],
constants.ACTIVE)
def test_update_member(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.member(pool_id=pool['pool']['id']) as member:
updatedmember = (self._build_updated_testmember_contents(
member['member']))
expectedmember = (self
._build_updated_expectedmember_contents(
updatedmember))
# mock the plugin's update_status()
self._mock_update_status()
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test
self.driver.update_member(self.context,
member['member'],
updatedmember)
member_resource_path = "%s/%s" % (
(netscaler_driver.POOLMEMBERS_RESOURCE,
member['member']['id']))
# First, assert that update_resource was called once
# with expected params.
(self.update_resource_mock
.assert_called_once_with(
None,
member_resource_path,
netscaler_driver.POOLMEMBER_RESOURCE,
expectedmember))
#Finally, assert that the member object is now ACTIVE
self.mock_update_status_obj.assert_called_once_with(
mock.ANY,
loadbalancer_db.Member,
member['member']['id'],
constants.ACTIVE)
def test_delete_member(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with contextlib.nested(
self.member(pool_id=pool['pool']['id']),
mock.patch.object(self.driver.plugin, '_delete_db_member')
) as (member, mock_delete_db_member):
mock_delete_db_member.return_value = None
# reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test
self.driver.delete_member(self.context,
member['member'])
member_resource_path = "%s/%s" % (
(netscaler_driver.POOLMEMBERS_RESOURCE,
member['member']['id']))
# Assert that delete_resource was called once
# with expected params.
(self.remove_resource_mock
.assert_called_once_with(None, member_resource_path))
def test_create_pool_health_monitor(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
testhealthmonitor = self._build_testhealthmonitor_contents(
pool['pool'])
expectedhealthmonitor = (
self._build_expectedhealthmonitor_contents(
testhealthmonitor))
with mock.patch.object(self.driver.plugin,
'update_pool_health_monitor') as mhm:
# reset the create_resource() mock
self.create_resource_mock.reset_mock()
# execute the method under test.
self.driver.create_pool_health_monitor(self.context,
testhealthmonitor,
pool['pool']['id'])
# First, assert that create_resource was called once
# with expected params.
resource_path = "%s/%s/%s" % (
netscaler_driver.POOLS_RESOURCE,
pool['pool']['id'],
netscaler_driver.MONITORS_RESOURCE)
(self.create_resource_mock
.assert_called_once_with(
None,
resource_path,
netscaler_driver.MONITOR_RESOURCE,
expectedhealthmonitor))
# Finally, assert that the healthmonitor object is
# now ACTIVE.
(mhm.assert_called_once_with(
mock.ANY,
expectedhealthmonitor['id'],
pool['pool']['id'],
constants.ACTIVE, ""))
def test_update_pool_health_monitor(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with self.health_monitor(
pool_id=pool['pool']['id']
) as (health_monitor):
updatedhealthmonitor = (
self._build_updated_testhealthmonitor_contents(
health_monitor['health_monitor']))
expectedhealthmonitor = (
self._build_updated_expectedhealthmonitor_contents(
updatedhealthmonitor))
with mock.patch.object(self.driver.plugin,
'update_pool_health_monitor')as mhm:
# reset the update_resource() mock
self.update_resource_mock.reset_mock()
# execute the method under test.
self.driver.update_pool_health_monitor(
self.context,
health_monitor['health_monitor'],
updatedhealthmonitor,
pool['pool']['id'])
monitor_resource_path = "%s/%s" % (
(netscaler_driver.MONITORS_RESOURCE,
health_monitor['health_monitor']['id']))
# First, assert that update_resource was called once
# with expected params.
self.update_resource_mock.assert_called_once_with(
None,
monitor_resource_path,
netscaler_driver.MONITOR_RESOURCE,
expectedhealthmonitor)
#Finally, assert that the member object is now ACTIVE
(mhm.assert_called_once_with(
mock.ANY,
health_monitor['health_monitor']['id'],
pool['pool']['id'],
constants.ACTIVE, ""))
def test_delete_pool_health_monitor(self):
with contextlib.nested(
self.subnet(),
mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet')
) as (subnet, mock_get_subnet):
mock_get_subnet.return_value = subnet['subnet']
with self.pool(provider=LBAAS_PROVIDER_NAME) as pool:
with contextlib.nested(
self.health_monitor(pool_id=pool['pool']['id']),
mock.patch.object(self.driver.plugin,
'_delete_db_pool_health_monitor')
) as (health_monitor, mock_delete_db_monitor):
mock_delete_db_monitor.return_value = None
# reset the remove_resource() mock
self.remove_resource_mock.reset_mock()
# execute the method under test.
self.driver.delete_pool_health_monitor(
self.context,
health_monitor['health_monitor'],
pool['pool']['id'])
monitor_resource_path = "%s/%s/%s/%s" % (
netscaler_driver.POOLS_RESOURCE,
pool['pool']['id'],
netscaler_driver.MONITORS_RESOURCE,
health_monitor['health_monitor']['id'])
# Assert that delete_resource was called once
# with expected params.
self.remove_resource_mock.assert_called_once_with(
None,
monitor_resource_path)
def _build_testvip_contents(self, subnet, pool):
vip_obj = dict(id=TESTVIP_ID,
name='testvip',
description='a test vip',
tenant_id=self._tenant_id,
subnet_id=subnet['id'],
address=TESTVIP_IP,
port_id=TESTVIP_PORT_ID,
pool_id=pool['id'],
protocol='HTTP',
protocol_port=80,
connection_limit=1000,
admin_state_up=True,
status='PENDING_CREATE',
status_description='')
return vip_obj
def _build_expectedvip_contents(self, testvip, subnet):
expectedvip = dict(id=testvip['id'],
name=testvip['name'],
description=testvip['description'],
tenant_id=testvip['tenant_id'],
subnet_id=testvip['subnet_id'],
address=testvip['address'],
network_id=subnet['network_id'],
port_id=testvip['port_id'],
pool_id=testvip['pool_id'],
protocol=testvip['protocol'],
protocol_port=testvip['protocol_port'],
connection_limit=testvip['connection_limit'],
admin_state_up=testvip['admin_state_up'])
return expectedvip
def _build_updated_testvip_contents(self, testvip, subnet, pool):
#update some updateable fields of the vip
testvip['name'] = 'udpated testvip'
testvip['description'] = 'An updated version of test vip'
testvip['connection_limit'] = 2000
return testvip
def _build_updated_expectedvip_contents(self, testvip, subnet, pool):
expectedvip = dict(name=testvip['name'],
description=testvip['description'],
connection_limit=testvip['connection_limit'],
admin_state_up=testvip['admin_state_up'],
pool_id=testvip['pool_id'])
return expectedvip
def _build_testpool_contents(self, subnet):
pool_obj = dict(id=TESTPOOL_ID,
name='testpool',
description='a test pool',
tenant_id=self._tenant_id,
subnet_id=subnet['id'],
protocol='HTTP',
vip_id=None,
admin_state_up=True,
lb_method='ROUND_ROBIN',
status='PENDING_CREATE',
status_description='',
members=[],
health_monitors=[],
health_monitors_status=None,
provider=LBAAS_PROVIDER_NAME)
return pool_obj
def _build_expectedpool_contents(self, testpool, subnet):
expectedpool = dict(id=testpool['id'],
name=testpool['name'],
description=testpool['description'],
tenant_id=testpool['tenant_id'],
subnet_id=testpool['subnet_id'],
network_id=subnet['network_id'],
protocol=testpool['protocol'],
vip_id=testpool['vip_id'],
lb_method=testpool['lb_method'],
snat_ip=TESTPOOL_SNATIP_ADDRESS,
port_id=TESTPOOL_PORT_ID,
admin_state_up=testpool['admin_state_up'])
return expectedpool
def _build_updated_testpool_contents(self, testpool, subnet):
updated_pool = dict(testpool.items())
updated_pool['name'] = 'udpated testpool'
updated_pool['description'] = 'An updated version of test pool'
updated_pool['lb_method'] = 'LEAST_CONNECTIONS'
updated_pool['admin_state_up'] = True
updated_pool['provider'] = LBAAS_PROVIDER_NAME
updated_pool['status'] = 'PENDING_UPDATE'
updated_pool['status_description'] = ''
updated_pool['members'] = []
updated_pool["health_monitors"] = []
updated_pool["health_monitors_status"] = None
return updated_pool
def _build_updated_expectedpool_contents(self, testpool, subnet):
expectedpool = dict(name=testpool['name'],
description=testpool['description'],
lb_method=testpool['lb_method'],
admin_state_up=testpool['admin_state_up'])
return expectedpool
def _build_testmember_contents(self, pool):
member_obj = dict(
id=TESTMEMBER_ID,
tenant_id=self._tenant_id,
pool_id=pool['id'],
address=TESTMEMBER_IP,
protocol_port=8080,
weight=2,
admin_state_up=True,
status='PENDING_CREATE',
status_description='')
return member_obj
def _build_expectedmember_contents(self, testmember):
expectedmember = dict(
id=testmember['id'],
tenant_id=testmember['tenant_id'],
pool_id=testmember['pool_id'],
address=testmember['address'],
protocol_port=testmember['protocol_port'],
weight=testmember['weight'],
admin_state_up=testmember['admin_state_up'])
return expectedmember
def _build_updated_testmember_contents(self, testmember):
updated_member = dict(testmember.items())
updated_member.update(
weight=3,
admin_state_up=True,
status='PENDING_CREATE',
status_description=''
)
return updated_member
def _build_updated_expectedmember_contents(self, testmember):
expectedmember = dict(weight=testmember['weight'],
pool_id=testmember['pool_id'],
admin_state_up=testmember['admin_state_up'])
return expectedmember
def _build_testhealthmonitor_contents(self, pool):
monitor_obj = dict(
id=TESTMONITOR_ID,
tenant_id=self._tenant_id,
type='TCP',
delay=10,
timeout=5,
max_retries=3,
admin_state_up=True,
pools=[])
pool_obj = dict(status='PENDING_CREATE',
status_description=None,
pool_id=pool['id'])
monitor_obj['pools'].append(pool_obj)
return monitor_obj
def _build_expectedhealthmonitor_contents(self, testhealthmonitor):
expectedmonitor = dict(id=testhealthmonitor['id'],
tenant_id=testhealthmonitor['tenant_id'],
type=testhealthmonitor['type'],
delay=testhealthmonitor['delay'],
timeout=testhealthmonitor['timeout'],
max_retries=testhealthmonitor['max_retries'],
admin_state_up=(
testhealthmonitor['admin_state_up']))
return expectedmonitor
def _build_updated_testhealthmonitor_contents(self, testmonitor):
updated_monitor = dict(testmonitor.items())
updated_monitor.update(
delay=30,
timeout=3,
max_retries=5,
admin_state_up=True
)
return updated_monitor
def _build_updated_expectedhealthmonitor_contents(self, testmonitor):
expectedmonitor = dict(delay=testmonitor['delay'],
timeout=testmonitor['timeout'],
max_retries=testmonitor['max_retries'],
admin_state_up=testmonitor['admin_state_up'])
return expectedmonitor
def _mock_update_status(self):
#patch the plugin's update_status() method with a mock object
self.mock_update_status_patcher = mock.patch.object(
self.driver.plugin,
'update_status')
self.mock_update_status_obj = self.mock_update_status_patcher.start()
def mock_create_resource_func(*args, **kwargs):
return 201, {}
def mock_update_resource_func(*args, **kwargs):
return 202, {}
def mock_retrieve_resource_func(*args, **kwargs):
return 200, {}
def mock_remove_resource_func(*args, **kwargs):
return 200, {}
| |
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/gitian.sigs.git'])
if not os.path.isdir('bitsend-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin-core/bitsend-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('bitsend'):
subprocess.check_call(['git', 'clone', 'https://github.com/LIMXTEC/BitSend.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('bitsend-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../bitsend/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsend='+args.commit, '--url', 'bitsend='+args.url, '../bitsend/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../bitsend/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/bitsend-*.tar.gz build/out/src/bitsend-*.tar.gz ../bitsend-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsend='+args.commit, '--url', 'bitsend='+args.url, '../bitsend/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../bitsend/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/bitsend-*-win-unsigned.tar.gz inputs/bitsend-win-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/bitsend-*.zip build/out/bitsend-*.exe ../bitsend-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsend='+args.commit, '--url', 'bitsend='+args.url, '../bitsend/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../bitsend/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/bitsend-*-osx-unsigned.tar.gz inputs/bitsend-osx-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/bitsend-*.tar.gz build/out/bitsend-*.dmg ../bitsend-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../bitsend/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../bitsend/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/bitsend-*win64-setup.exe ../bitsend-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/bitsend-*win32-setup.exe ../bitsend-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../bitsend/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../bitsend/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/bitsend-osx-signed.dmg ../bitsend-binaries/'+args.version+'/bitsend-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../bitsend/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../bitsend/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../bitsend/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../bitsend/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../bitsend/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/LIMXTEC/BitSend', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
args.commit = ('' if args.commit else 'v') + args.version
print(args.commit)
if args.setup:
setup()
os.chdir('bitsend')
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| |
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rcParams
import scipy.stats
from scipy.special import gamma
import datetime
from pathlib import Path
rcParams.update({'figure.autolayout': True})
# Set a larger default size for plots
rcParams['figure.figsize'] = [12, 8]
rcParams['savefig.format'] = 'png'
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
class ParameterError(Exception):
def __init__(self, *args):
default_str = 'Values for "beta" and "eta" not found; Run the "fit" method or assign values explicitly.'
super().__init__(default_str, *args)
# convenience functions
def _weibull_ticks(y, _):
# Round to 6 decimal places to deal w/roundoff error from exp function
ycoord = round(100 * (1 - np.exp(-np.exp(y))),6)
# Format to only as many digits past the decimal as needed
for i in range(0, 6):
if ycoord == round(ycoord, i):
tick_fmt = '{:.' + '{0}'.format(i) + 'f}%'
return tick_fmt.format(100 * (1 - np.exp(-np.exp(y))))
return '{:.6f}%'.format(100 * (1 - np.exp(-np.exp(y))))
def _ftolnln(f):
return np.log(-np.log(1.0 - np.asarray(f)))
class Analysis:
r"""
Calculates and plots data points and curves for a standard 2-parameter Weibull for analyzing life data.
:param data: A list or numpy array of life data, i.e. ``[127, 234, 329, 444]``
:param suspended: A list or numpy array of suspensions as boolean values, i.e. ``[False, False, True, True]``. At any point which indicates ``True`` means that the test was stopped - or that the item was removed from the test - before the item failed.
:param unit: The unit ('hour', 'minute', 'cycle', etc.). This is used to add some useful information to the visualizations. For instance, if the unit is ``hour``, then the x-axis will be labed in hours.
:ivar beta: The current value of the shape parameter, :math:`\beta`. This value is initially set to ``None``. The proper value for ``beta`` will be calculated on call to the ``fit()`` method. The user may also set this value directly.
:ivar eta: The current value of the scale parameter, :math:`\eta`. This value is initially set to ``None``. The proper value for ``beta`` will be calculated on call to the ``fit()`` method. The user may also set this value directly.
:ivar _fit_test: Basic statistics regarding the results of ``fit()``, such as :math:`R^2` and P-value.
"""
def __init__(self, data: list, suspended: list=None, unit: str='cycle'):
self.x_unit = unit
self._fit_test = None
self.beta, self.eta, self.tzero = None, None, None
self.analyst, self.company = None, None
self.plot_title = ''
dat = pd.DataFrame({'data': data})
dat.index = np.arange(1, len(dat) + 1)
# a suspension is when a unit is removed from test before it has failed
if not suspended:
dat['susp'] = [False if x else True for x in data]
dat['data'].fillna(dat['data'].max(), inplace=True)
else:
dat['susp'] = suspended
if dat['susp'].all():
raise ValueError('Data must contain at least one observed event')
dat.sort_values('data', inplace=True)
dat['rank'] = np.arange(1, len(dat) + 1)
dat['f_rank'] = np.nan
dat.loc[dat['susp'] == False, 'f_rank'] = np.arange(1,
len(dat[dat['susp'] == False]) + 1)
di = dat['susp'] == False
dat.loc[di, 'med_rank'] = self._med_ra(dat.loc[di, 'f_rank'])
dat['reverse_rank'] = dat['rank'].values[::-1]
self.data = dat
logger.debug('\n{}'.format(self.data))
self._calc_adjrank()
def _calc_adjrank(self):
dat = self.data
dat['adj_rank'] = np.nan
fdat = dat[dat['susp'] == False]
N = len(fdat)
padj = [0]
for i in range(N):
n = fdat.index[i]
pn = (fdat.loc[n, 'reverse_rank'] * padj[-1] +
(len(dat) + 1.)) / (fdat.loc[n, 'reverse_rank'] + 1)
padj.append(pn)
dat.loc[n, 'adj_rank'] = pn
dat['adjm_rank'] = self._med_ra(dat['adj_rank'])
def _med_ra(self, i):
"""Calculate median rank using Benard's approximation."""
i = np.asarray(i)
med_rank = (i - 0.3) / (len(i) + 0.4)
return med_rank
def _linear_regression(self):
r"""
Calculate :math:`\beta` and :math:`\eta` using a curve fit of the supplied data.
:return: None
"""
x0 = np.log(self.data.dropna()['data'].values)
y = _ftolnln(self.data.dropna()['adjm_rank'])
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(y, x0)
beta = 1.0/slope
x_intercept = - intercept / beta
eta = np.exp(-x_intercept/slope)
self.beta = beta
self.eta = eta
logger.debug('beta: {:.2f}, eta: {:.2f}'.format(self.beta, self.eta))
self._fit_test = pd.Series({'r_squared': r_value ** 2, 'p_value': p_value, 'fit method': 'linear regression'})
def _maximum_likelihood_estimation(self):
r"""
Calculate :math:`\beta` and :math:`\eta` using the maximum likelihood estimation method.
:return: None
"""
data = self.data[['data', 'susp']].copy()
df_failed = data[data.susp == False].copy()
dtf_failed = df_failed["data"].values
df_failed["ln_x_div_r"] = df_failed.apply(lambda s: np.log(s['data'])/len(df_failed), axis=1)
dtf_all = self.data['data'].values
# use Newton-Rhapson method for estimating the shape parameter
# give initial value for the shape paramter:
shape = (((6.0 / np.pi ** 2)
* (np.sum(np.log(dtf_all) ** 2)
- ((np.sum(np.log(dtf_all))) ** 2) / dtf_all.size))
/ (dtf_all.size - 1)) ** -0.5
# 10 iterations of the newton-rhapson method
for i in range(1, 11):
a = np.sum(np.log(dtf_failed) * 1.0) / dtf_failed.size
b = np.sum(dtf_all ** shape)
c = np.sum((dtf_all ** shape) * np.log(dtf_all))
h = np.sum((dtf_all ** shape) * (np.log(dtf_all)) ** 2)
shape = shape + (a + (1.0 / shape) - (c / b)) / ((1.0 / shape ** 2) + ((b * h) - c ** 2) / b ** 2)
shape = max(shape, 0.005)
scale = (np.sum((dtf_all ** shape) / len(df_failed))) ** (1 / shape)
self.beta = shape
self.eta = scale
self._fit_test = pd.Series({'fit method': 'maximum likelihood estimation'})
def _confidence(self, confidence=0.95):
r"""
Calculate confidence intervals for :math:`\beta` and :math:`\eta` using the Fisher Matrix method.
:return: None
"""
# following the procedure as shown on page 54 of Weibull Analysis by Brian Dodson
data = self.data[['data', 'susp']].copy().sort_values('susp')
uncensored = data[data['susp'] == False]
censored = data[data['susp'] == True]
# step 3
def calc(t):
first_term = self.beta / self.eta ** 2
second_term = ((t/self.eta) ** self.beta) * (self.beta / self.eta ** 2) * (self.beta + 1)
return first_term - second_term
data['step3'] = uncensored['data'].apply(func=calc)
def calc(t):
first_term = -1.0 / (self.beta ** 2)
second_term = ((t / self.eta) ** self.beta) * (np.log(t / self.eta) ** 2)
return first_term - second_term
data['step4'] = uncensored['data'].apply(func=calc)
def calc(t):
first_term = -1.0 / self.eta
second_term = ((t / self.eta) ** self.beta) * (1.0 / self.eta) * (self.beta * np.log(t / self.eta) + 1.0)
return first_term + second_term
data['step5'] = uncensored['data'].apply(func=calc)
def calc(t):
return -((t / self.eta) ** self.beta) * (self.beta / (self.eta ** 2)) * (self.beta + 1.0)
data['step6'] = censored['data'].apply(func=calc)
def calc(t):
return -((t / self.eta) ** self.beta) * (np.log(t / self.eta) ** 2)
data['step7'] = censored['data'].apply(func=calc)
def calc(t):
return ((t / self.eta) ** self.beta) * (1.0 / self.eta) * ((self.beta * np.log(t / self.eta)) + 1.0)
data['step8'] = censored['data'].apply(func=calc)
f11 = -np.sum(data['step3']) - np.sum(data['step6'].replace(np.nan, 0))
f12 = -np.sum(data['step5']) - np.sum(data['step8'].replace(np.nan, 0))
f22 = -np.sum(data['step4']) - np.sum(data['step7'].replace(np.nan, 0))
f = np.ndarray(shape=(2, 2), buffer=np.array([f11, f12, f12, f22]))
fprime = np.linalg.inv(f)
nd = scipy.stats.norm
k_index = (1.0 - confidence)/2 + confidence
k = nd.ppf(k_index)
beta_lower = self.beta / (np.e ** (k * np.sqrt(fprime[1, 1]) / self.beta))
beta_upper = self.beta * np.e ** (k * np.sqrt(fprime[1, 1]) / self.beta)
eta_lower = self.eta / (np.e ** (k * np.sqrt(fprime[0, 0]) / self.eta))
eta_upper = self.eta * np.e ** (k * np.sqrt(fprime[0, 0]) / self.eta)
self._fit_test['confidence'] = confidence
self._fit_test['beta lower limit'] = beta_lower
self._fit_test['beta nominal'] = self.beta
self._fit_test['beta upper limit'] = beta_upper
self._fit_test['eta lower limit'] = eta_lower
self._fit_test['eta nominal'] = self.eta
self._fit_test['eta upper limit'] = eta_upper
def fit(self, method: str='lr', confidence_level: float=0.9):
r"""
Calculate :math:`\beta` and :math:`\eta` using a linear regression
or using the maximum likelihood method, depending on the 'method' value.
:param method: 'lr' for linear estimation or 'mle' for maximum likelihood estimation
:param confidence_level: A number between 0.001 and 0.999 which expresses the confidence levels desired. This confidence level is reflected in all subsequent actions, especially in plots, and can also affect several internal variables which are shown in ``stats``.
:return: None
"""
if method not in ['lr', 'mle']:
raise ValueError('The method specified must be '
'linear regression "lr" or maximum '
'likelihood estimation "mle"')
if method is 'lr':
if len(self.data) >= 15:
logger.warning('the maximum likelihood method is likely '
'to yield better results with {} data points'.format(len(self.data)))
self._linear_regression()
elif method is 'mle':
if len(self.data) < 15:
logger.warning('the linear regression method is likely '
'to yield better results with {} data points'.format(len(self.data)))
self._maximum_likelihood_estimation()
self._confidence(confidence_level)
def probplot(self, show: bool=True, file_name: str=None,
watermark_text=None, **kwargs):
r"""
Generate a probability plot. Use this to show the data points plotted with
the beta and eta values.
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param watermark_text: the text to include on the plot as a watermark
:param kwargs: valid matplotlib options
:return: None
"""
if not self.eta or not self.beta:
raise ParameterError
# Check if tzero has been defined & use it if it has [future placeholder]
tzero = 0
# if self.tzero != None: tzero = self.tzero
susp = any(self.data['susp'])
# The user can define their own title w/the 'plot_title' property
# If it's a zero-length string, one wasn't specified, so use the Default
if self.plot_title == '': self.plot_title = 'Weibull Probability Plot'
# If the title is defined as 'None' then don't display a title at all
if self.plot_title is not None: plt.title(self.plot_title)
ax = plt.gca()
plt.xlabel('{}s'.format(self.x_unit))
plt.ylabel('Unreliability, F(t) = 1 - R(t) (%)')
# Apply formatted tick marks to the y-axis
formatter = mpl.ticker.FuncFormatter(_weibull_ticks)
ax.yaxis.set_major_formatter(formatter)
# Gray Grid Lines
major_grid_color = '#BFBFBF'
minor_grid_color = '#DFDFDF'
# Define values for y-axis ticks & major grid lines & plot them
yt_F = np.array([0.000001, 0.000005, 0.00001, 0.00005, 0.0001,
0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3,
0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999,
0.9999, 0.99999, 0.999999])
yt_lnF = np.log(-np.log(1 - yt_F))
plt.yticks(yt_lnF)
ax.yaxis.grid(color=major_grid_color, linewidth=0.5)
ax.xaxis.grid(which='both', color=major_grid_color, linewidth=0.5)
# Define values for y-axis minor grid lines & plot them
y_minor_grid_lines = [0.000002, 0.000003, 0.000004, 0.000006,
0.000007, 0.000008, 0.000009, 0.00002, 0.00003,
0.00004, 0.00006, 0.00007, 0.00008, 0.00009,
0.0002, 0.0003, 0.0004, 0.0006, 0.0007, 0.0008,
0.0009, 0.002, 0.003, 0.004, 0.006, 0.007,
0.008, 0.009, 0.02, 0.03, 0.04, 0.06, 0.07,
0.08, 0.09, 0.12, 0.14, 0.16, 0.18, 0.25, 0.35,
0.45, 0.55, 0.75, 0.85, 0.97]
for p in y_minor_grid_lines:
pt = np.log(-np.log(1 - p))
plt.semilogx(ax.get_xlim(), [pt, pt], color=minor_grid_color,
linewidth=0.5)
# Highlight the 'Characteristic Life' (Eta) line at 63.2%
plt.semilogx(ax.get_xlim(), [0,0], color='#FF3F3F', linestyle='dashed',
label = u'Characteristic Life (\u03B7) @ 63.2%')
plt.semilogx([self.eta, self.eta], [-15, 0], color='#FF3F3F',
linestyle='dashed')
# Initially set y-axis lower limit based on the min data rank value
if susp:
plotymin = np.nanmin(_ftolnln(self.data['adjm_rank']))
else:
plotymin = np.nanmin(_ftolnln(self.data['med_rank']))
# Adjust y-axis lower limit using 'yt_F' values as breakpoints
plotymin_tmp = plotymin
for yt_Fval in yt_F:
if np.log(-np.log(1 - yt_Fval)) < plotymin:
plotymin_tmp = np.log(-np.log(1 - yt_Fval))
else:
plotymin = plotymin_tmp
break
# Initially set y-axis upper limit based on the max data rank value
if susp:
plotymax = np.nanmax(_ftolnln(self.data['adjm_rank']))
else:
plotymax = np.nanmax(_ftolnln(self.data['med_rank']))
# Adjust y-axis upper limit using 'yt_F' values as breakpoints
for yt_Fval in yt_F:
if np.log(-np.log(1 - yt_Fval)) > plotymax:
plotymax = np.log(-np.log(1 - yt_Fval))
break
# Ensure the y-axis shows values from 1% to 99% at least
plotymin = min([plotymin, np.log(-np.log(1 - 0.01))])
plotymax = max([plotymax, np.log(-np.log(1 - 0.99))])
# Determine the min & max values for the x-axis
# - May need to change tzero to self.tzero once it is implemented
plotxmin = 10 ** np.floor(np.log10(min(np.nanmin(self.data['data']),
self.eta * (np.exp(plotymin) ** (1/self.beta)) + tzero)))
plotxmax = 10 ** np.ceil(np.log10(max(np.nanmax(self.data['data']),
self.eta * (np.exp(plotymax) ** (1/self.beta)) + tzero)))
# Set the x & y axis limits
plt.ylim(plotymin, plotymax)
plt.xlim(plotxmin, plotxmax)
if susp:
plt.semilogx(self.data['data'],
_ftolnln(self.data['adjm_rank']), 'o')
else:
plt.semilogx(self.data['data'],
_ftolnln(self.data['med_rank']), 'o')
#----------------------------------------------------------------------
# Need to account for 'scaled' 3-parameter Weibulls (when implemented)
if tzero == 0:
# Calculate the y value endpoints for the line fit
y_ideal = [np.log(-np.log(1 - yt_F[0])),
np.log(-np.log(1 - yt_F[-1]))]
else:
# Calculate points for the unscaled 3-parameter line fit (curve)
# - NEED TO CHECK THIS WHEN 3-PARAMETER WEIBULL CODE IS ADDED
p0 = np.log(-np.log(1 - yt_F[0]))
dp = (np.log(-np.log(1 - yt_F[-1])) - p0) / 1000
y_ideal = [(p * dp + p0) for p in range(0, 1001)]
# Calculate the x values for the specified y values
x_ideal = self.eta * (np.exp(y_ideal) ** (1/self.beta)) + tzero
#----------------------------------------------------------------------
# Define other values to display in Legend
rsquared = self.r_squared
numfail = self.failures
numsusp = self.suspensions
# Date & Time stamps when the plot was generated
timestamp = datetime.datetime.today().strftime("%b %d, %Y\n%H:%M:%S")
# Construct the Annotation with all pertinent information
# Start with Beta & Eta values
annotation = u'\u03B2 = {:.03f}\n\u03B7 = {:.01f}'.format(
self.beta, self.eta)
# Only show the t0 term if it is non-zero
if tzero != 0 and tzero is not None:
annotation += u'\nt\u2080 = {:.01f}'.format(tzero)
# Append R squared value (if defined), number of failures & suspensions
if rsquared is not None:
annotation += u'\nR\u00b2 = {:.02f}%'.format(rsquared * 100)
annotation += u'\n{0} Failures\n{1} Suspensions\n'.format(
numfail, numsusp)
# Append names of the analyst & company, w/date & time stamp @ bottom
# - Need a property so analyst & company names can be defined
analyst_name = self.analyst
company_name = self.company
if analyst_name is not None:
annotation += u'\n{0}'.format(analyst_name)
if company_name is not None:
annotation += u'\n{0}'.format(company_name)
annotation += u'\n{0}'.format(timestamp)
# Define a string for the type of Weibull fit & append the 'fit method'
if tzero == 0:
weibull_type = '2-Parameter Weibull'
else:
weibull_type = '3-Parameter Weibull'
weibull_type += ',\n' + self._fit_test['fit method'].title()
plt.semilogx(x_ideal, y_ideal, label = weibull_type)
# Create the legend & apply formatting
leg = plt.legend(frameon=True, loc='upper left', framealpha=1.0)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_facecolor('white')
leg.get_frame().set_boxstyle("round, pad=0., rounding_size=0.")
# Create an Annotation text box & apply formatting
annotation_box = mpl.offsetbox.AnchoredText(annotation,
prop=dict(size=10), frameon=True,
loc='lower right')
annotation_box.patch.set_boxstyle("round, pad=0., rounding_size=0.")
annotation_box.patch.set_linewidth(0.5)
ax.add_artist(annotation_box)
if watermark_text:
ymin, _ = ax.get_ylim()
xmin, _ = ax.get_xlim()
plt.annotate(watermark_text, xy=(xmin, ymin), alpha=0.15,
rotation=0, fontsize=50)
if file_name:
# Check the filename extension
file_suff = Path(file_name).suffix
if file_suff != '.png':
if len(file_suff) > 0 and len(file_suff) <= 5:
# Replace original filename extension w/the correct one
file_name = file_name.replace(file_suff, '.png')
else:
# Assume longer "suffixes" occur if there are periods in
# the filename, so append the suffix, don't replace it
# - Also handles cases where no suffix is present
file_name += '.png'
plt.savefig(file_name, format='png')
if show:
plt.show()
def pdf(self, show: bool=True, file_name: str=None,
watermark_text=None):
r"""
Plot the probability density function
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param watermark_text: the text to include as a watermark
:return: None
"""
if not self.eta or not self.beta:
raise ParameterError
x = np.linspace(0.01, self.eta*5, 1000)
y = scipy.stats.weibull_min.pdf(x, self.beta, 0, self.eta)
self._plot_prob(x, y,
show=show, file_name=file_name,
title='Probability Density Function',
y_label='probability/{}'.format(self.x_unit),
watermark_text=watermark_text)
def sf(self, show: bool=True, file_name: str=None,
watermark_text=None):
r"""
Plot the survival function
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param watermark_text: the text to include as a watermark
:return: None
"""
if not self.eta or not self.beta:
raise ParameterError
x = np.linspace(0.01, self.eta * 5, 1000)
y = scipy.stats.weibull_min.sf(x, self.beta, 0, self.eta)
y = y[y > 0.00001]
x = x[: len(y)]
if self._fit_test is not None:
betas = np.linspace(self._fit_test['beta lower limit'],
self._fit_test['beta upper limit'],
10)
etas = np.linspace(self._fit_test['eta lower limit'],
self._fit_test['eta upper limit'],
10)
min_y = y
max_y = y
for beta in betas:
for eta in etas:
values = scipy.stats.weibull_min.sf(x,
beta,
0,
eta)
min_y = np.minimum(min_y, values)
max_y = np.maximum(max_y, values)
else:
min_y = None
max_y = None
self._plot_prob(x, y, min_y, max_y,
show=show, file_name=file_name,
title='Survival Function',
y_label='probability of survival',
watermark_text=watermark_text)
def hazard(self, show: bool=True, file_name: str=None,
watermark_text=None):
r"""
Plot the hazard (CDF) function
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param watermark_text: the text to include as a watermark
:return: None
"""
self.cdf(show, file_name,
watermark_text=watermark_text)
def cdf(self, show: bool=True, file_name: str=None,
watermark_text=None):
r"""
Plot the cumulative distribution function
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param watermark_text: the text to include as a watermark
:return: None
"""
if not self.eta or not self.beta:
raise ParameterError
x = np.linspace(0.01, self.eta * 5, 1000)
y = scipy.stats.weibull_min.cdf(x, self.beta, 0, self.eta)
y = y[y < 0.9999]
x = x[: len(y)]
if self._fit_test is not None:
betas = np.linspace(self._fit_test['beta lower limit'],
self._fit_test['beta upper limit'],
10)
etas = np.linspace(self._fit_test['eta lower limit'],
self._fit_test['eta upper limit'],
10)
min_y = y
max_y = y
for beta in betas:
for eta in etas:
values = scipy.stats.weibull_min.cdf(x,
beta,
0,
eta)
min_y = np.minimum(min_y, values)
max_y = np.maximum(max_y, values)
else:
min_y = None
max_y = None
self._plot_prob(x, y, min_y, max_y,
show, file_name,
title='Hazard Function',
y_label='probability of failure',
watermark_text=watermark_text)
def fr(self, show: bool=True, file_name: str=None,
watermark_text=None):
r"""
Plot failure rate as a function of cycles
:param show: True if the item is to be shown now, False if other elements to be added later
:param file_name: if file_name is stated, then the probplot will be saved as a PNG
:param watermark_text: the text to include as a watermark
:return: None
"""
if not self.eta or not self.beta:
raise ParameterError
x = np.linspace(0.01, self.eta * 2, 1000)
y = (self.beta / self.eta) * (x / self.eta) ** (self.beta - 1)
if self._fit_test is not None:
betas = np.linspace(self._fit_test['beta lower limit'],
self._fit_test['beta upper limit'],
10)
etas = np.linspace(self._fit_test['eta lower limit'],
self._fit_test['eta upper limit'],
10)
min_y = y
max_y = y
for beta in betas:
for eta in etas:
values = (beta / eta) * (x / eta) ** (beta - 1)
min_y = np.minimum(min_y, values)
max_y = np.maximum(max_y, values)
else:
min_y = None
max_y = None
self._plot_prob(x, y, min_y, max_y,
show=show, file_name=file_name,
title='Failure Rate',
y_label='failures/{}'.format(self.x_unit),
watermark_text=watermark_text)
def _plot_prob(self, x: list, y: list,
min_y: list=None, max_y: list=None,
show: bool=True, file_name: str=None,
title: str=None, y_label: str='probability',
watermark_text=None):
r"""
Base plot function used for the density function plotting
:param x: the x values
:param y: the y values
:param min_y: the minimum y values (used to shade confidence limits)
:param max_y: the maximum y values (used to shade confidence limits)
:param show: True if the plot is to be shown, false if otherwise
:param file_name: the file name to be passed to ``matplotlib.pyplot.savefig``
:param title: the plot title
:param y_label: the y-axis label
:param watermark_text: the text to include as a watermark
:return: None
"""
if min_y is not None and max_y is not None:
if len(min_y) > 0 and len(max_y) > 0:
plt.fill_between(x, min_y, max_y, alpha=0.25)
plt.plot(x, y, label='beta: {:.02f}\neta: {:.01f}'.format(self.beta,
self.eta))
plt.legend()
plt.xlim(0)
plt.ylim(0)
plt.xlabel('{}s'.format(self.x_unit))
plt.ylabel(y_label)
ax = plt.gca()
ax.grid(True, which='both')
if title:
plt.title(title)
if watermark_text:
ymin, _ = ax.get_ylim()
xmin, _ = ax.get_xlim()
plt.annotate(watermark_text, xy=(xmin, ymin), alpha=0.15, rotation=0, fontsize=50)
if file_name:
plt.savefig(file_name)
if show:
plt.show()
def b(self, percent_failed: (float, str)=10.0):
r"""
Calculate the B-life value
:param percent_failed: the number of elements that have failed as a percent (i.e. 10)
:return: the life in cycles/hours/etc.
"""
if not self.eta or not self.beta:
raise ParameterError
pf = float(percent_failed)
if not 0.1 <= pf <= 99.0:
raise ValueError('portion_failed must be between 0.001 and 0.999 (inclusive)')
return scipy.stats.weibull_min.ppf(pf / 100, self.beta, 0, self.eta)
@property
def mean(self):
r"""
Calculates and returns mean life (aka, the MTTF) is the integral of the reliability function between 0 and inf,
.. math::
MTTF = \eta \Gamma(\frac{1}{\beta} + 1)
where gamma function, :math:`\Gamma`, is evaluated at :math:`\frac{1}{\beta+1}`
:return: the mean life of the product
"""
if not self.eta or not self.beta:
raise ParameterError
return self.eta * gamma(1.0/self.beta + 1)
@property
def mttf(self):
r"""
Calculates and returns mean time between failures (MTTF)
:return: the mean time to failure
"""
if not self.eta or not self.beta:
raise ParameterError
return self.mean
@property
def median(self):
r"""
Calculates and returns median life of the product
:return: The median life
"""
if not self.eta or not self.beta:
raise ParameterError
return scipy.stats.weibull_min.ppf(0.5, self.beta, 0, self.eta)
@property
def characteristic_life(self):
r"""
Returns the current characteristic life of the product, aka :math:`\eta`
:return: the characteristic life of the product
"""
if not self.eta or not self.beta:
raise ParameterError
return self.eta
@property
def r_squared(self):
"""
Returns the r squared value of the fit test
:return: the r squared value
"""
return self._fit_test.get('r_squared')
@property
def failures(self):
"""
Returns the number of failures in the data set.
:return: the number of failures in the data set
"""
return sum([1 for susp in self.data['susp'] if susp is False])
@property
def suspensions(self):
"""
Returns the number of suspensions in the data set.
:return: the number of suspensions in the data set
"""
return sum([1 for susp in self.data['susp'] if susp is True])
@property
def stats(self):
r"""
Returns the fit statistics, confidence limits, etc
:return: a pandas series containing the fit statistics
"""
data = self._fit_test
data['mean life'] = self.mean
data['median life'] = self.median
data['b10 life'] = self.b(10)
return data
@property
def analyst(self):
return self.__analyst
@analyst.setter
def analyst(self, analyst_name):
self.__analyst = analyst_name
@property
def company(self):
return self.__company
@company.setter
def company(self, company_name):
self.__company = company_name
@property
def plot_title(self):
return self.__plot_title
@plot_title.setter
def plot_title(self, plot_title):
self.__plot_title = plot_title
class Design:
"""
Will determine the required test time required given the number of units
under test and the target cycles OR it will determine the number of units
given the test time and the target cycles.
:param target_cycles: The target number of cycles/minutes/hours
:param reliability: The fraction of units still running after target_cycles, 0.001 to 0.999
:param confidence_level: The fractional level of confidence, 0.001 to 0.999
:param expected_beta: The anticipated level of beta - often worse-case - based on historical data or other assumptions
"""
def __init__(self, target_cycles: (int, float),
reliability: float=0.9, confidence_level: float=0.9,
expected_beta: float=2.0):
if not 0.001 <= reliability <= 0.999:
raise ValueError('The reliability must be between 0.01 and 0.99')
if not 0.001 <= confidence_level <= 0.999:
raise ValueError('The confidence level must be between 0.01 and 0.99')
self.target_cycles = target_cycles
self.reliability = reliability
self.confidence_level = confidence_level
self.beta = expected_beta
def num_of_units(self, test_cycles: (int, float)):
"""
Design a test, calculating the number of units required to run for the test duration / cycles in order to prove the reliability at target_cycles.
:return: The number of units required
"""
b = -np.log(self.reliability)
c = b ** (1.0 / self.beta)
ee = self.target_cycles / c
units = np.log(1.0 - self.confidence_level) / (-(test_cycles / ee) ** self.beta)
return units
def num_of_cycles(self, number_of_units: int):
"""
Design a test, calculating the test duration/cycles to prove the required reliability at target_cycles.
:return: the required duration or cycles
"""
b = -np.log(self.reliability)
c = b ** (1.0 / self.beta)
ee = self.target_cycles / c
cycles = (-np.log((1.0 - self.confidence_level) ** (1.0 / number_of_units))) ** (1.0 / self.beta) * ee
return cycles
class Weibayes:
"""
Weibayes-style analysis of the data with a confidence level and beta.
:param data: The data for each unit
:param confidence_level: The fractional level of confidence, 0.001 to 0.999
:param beta: The shape parameter
"""
def __init__(self, data: list, confidence_level: float=None, beta: float=2.0):
if not 0.001 < confidence_level < 0.999:
raise ValueError('confidence level must be between 0.01 and 0.99')
self.data = np.asarray(data)
self.beta = np.float(beta)
self.confidence_level, self.r = None, None
self.blife = None
self._set_confidence_level(confidence_level)
def _set_confidence_level(self, confidence_level):
cl = np.float(confidence_level)
alpha = 1.0 - cl
r = -np.log(alpha)
self.confidence_level = cl
self.r = r
self._calc()
self._calc_icdf()
self._calc_cdf()
def _calc(self):
etaseries = np.empty((1, len(self.data)))
etaseries[0, :] = ((self.data ** self.beta) / self.r)
self.etaseries = etaseries
self.eta = etaseries.sum(1) ** (1 / self.beta)
def _calc_cdf(self):
"""
calculates the cumulative distribution function, saves within self.cdf
:return: None
"""
tmin = 10 ** (np.floor(np.log10(self.icdf.min())) - 1)
tmax = 10 ** (np.floor(np.log10(self.icdf.max())) + 1)
self.cdf_x = np.linspace(tmin, tmax, 1000)
self.cdf = np.empty((len(self.eta), len(self.cdf_x)))
for n, eta in enumerate(self.eta):
self.cdf[n, :] = 1 - np.exp(- (self.cdf_x / eta) ** self.beta)
def _calc_icdf(self):
"""
Calculates the inverse cumulative distribution function
:return: None
"""
self.icdf_x = np.arange(.0001, .99, .0001)
self.icdf = np.empty((len(self.eta), len(self.icdf_x)))
tmp = pd.DataFrame(index=self.icdf_x)
self.icdf[0, :] = self.eta * np.log(1.0 / (1.0 - self.icdf_x)) ** (1.0 / self.beta)
tmp[self.confidence_level] = self.icdf[0]
self.blife = tmp.T # transpose
self.blife.index.name = 'B'
def plot(self, confidence_level: float=None, show: bool=True, file_name: str=None):
"""
Plot the linear plot line.
:confidence_level: the desired confidence level
:show: True if the plot is to be shown
:file_name: Save the plot as "file_name"
"""
if confidence_level:
self._set_confidence_level(confidence_level)
plt.semilogx(self.cdf_x, _ftolnln(self.cdf[0]))
axis = plt.gca()
axis.grid(True, which='both')
formatter = mpl.ticker.FuncFormatter(_weibull_ticks)
axis.yaxis.set_major_formatter(formatter)
yt_F = np.array([0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.95, 0.99])
yt_lnF = _ftolnln(yt_F)
plt.yticks(yt_lnF)
plt.ylim(yt_lnF[1], yt_lnF[-1])
plt.xlim(self.cdf_x.min(), self.cdf_x.max())
self._plot_annotate()
plt.ylabel('failure rate')
plt.xlabel('cycles')
if file_name:
plt.savefig(file_name)
if show:
plt.show()
def _plot_annotate(self):
ax = plt.gca()
ax.text(0.02, 0.95, 'beta: {:.0f}'.format(self.beta), transform=ax.transAxes)
ax.text(.02, .90,
'eta: {:.03g}'.format(self.eta[0]),
transform=ax.transAxes)
ax.text(.02, .85,
'confidence level: {}'.format(self.confidence_level),
transform=ax.transAxes)
def b(self, b_spec: int=10, confidence_level: float=None):
"""
Calculates the B-life
:param b_spec: the B-specification (for instance, '10')
:param confidence_level: the confidence level (usually between 0.01 and 0.99)
:return: the B life
"""
if not 1 <= b_spec <= 99:
raise ValueError('b_spec must be between 1 and 99 (inclusive)')
if confidence_level and not 0.001 < confidence_level < 0.999:
raise ValueError('confidence level must be between 0.01 and 0.99')
if confidence_level:
self._set_confidence_level(confidence_level)
b_spec_decimal = b_spec / 100.0
return float(self.blife[b_spec_decimal].T)
| |
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Tests the robustness and resiliency of vtworkers."""
from collections import namedtuple
import urllib
import urllib2
import logging
import unittest
from vtdb import keyrange_constants
import environment
import tablet
import utils
KEYSPACE_ID_TYPE = keyrange_constants.KIT_UINT64
class ShardTablets(namedtuple('ShardTablets', 'master replicas rdonlys')):
"""ShardTablets is a container for all the tablet.Tablets of a shard.
`master` should be a single Tablet, while `replicas` and `rdonlys` should be
lists of Tablets of the appropriate types.
"""
@property
def all_tablets(self):
"""Returns a list of all the tablets of the shard.
Does not guarantee any ordering on the returned tablets.
"""
return [self.master] + self.replicas + self.rdonlys
@property
def replica(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.replicas:
return self.replicas[0]
else:
return None
@property
def rdonly(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.rdonlys:
return self.rdonlys[0]
else:
return None
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1])
shard_0_tablets = ShardTablets(
shard_0_master, [shard_0_replica], [shard_0_rdonly1])
shard_1_tablets = ShardTablets(
shard_1_master, [shard_1_replica], [shard_1_rdonly1])
def init_keyspace():
"""Creates a `test_keyspace` keyspace with a sharding key."""
utils.run_vtctl(
['CreateKeyspace', '-sharding_column_name', 'keyspace_id',
'-sharding_column_type', KEYSPACE_ID_TYPE, 'test_keyspace'])
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
init_keyspace()
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestBaseSplitClone(unittest.TestCase):
"""Abstract test base class for testing the SplitClone worker."""
def run_shard_tablets(
self, shard_name, shard_tablets, create_table=True):
"""Handles all the necessary work for initially running a shard's tablets.
This encompasses the following steps:
1. (optional) Create db
2. Starting vttablets and let themselves init them
3. Waiting for the appropriate vttablet state
4. Force reparent to the master tablet
5. RebuildKeyspaceGraph
7. (optional) Running initial schema setup
Args:
shard_name: the name of the shard to start tablets in
shard_tablets: an instance of ShardTablets for the given shard
create_table: boolean, True iff we should create a table on the tablets
"""
# Start tablets.
#
# Specifying 'target_tablet_type' enables the health check
# i.e. tablets will be automatically returned to the serving graph
# after a SplitClone or SplitDiff.
#
# NOTE: The future master has to be started with type 'replica'.
shard_tablets.master.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name)
for tablet in shard_tablets.replicas:
tablet.start_vttablet(
wait_for_state=None, target_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name)
for tablet in shard_tablets.rdonlys:
tablet.start_vttablet(
wait_for_state=None, target_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard=shard_name)
# Block until tablets are up and we can enable replication.
# All tables should be NOT_SERVING until we run InitShardMaster.
for tablet in shard_tablets.all_tablets:
tablet.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(
['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
shard_tablets.master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# Enforce a health check instead of waiting for the next periodic one.
# (saves up to 1 second execution time on average)
for tablet in shard_tablets.replicas:
utils.run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'replica'])
for tablet in shard_tablets.rdonlys:
utils.run_vtctl(['RunHealthCheck', tablet.tablet_alias, 'rdonly'])
# Wait for tablet state to change after starting all tablets. This allows
# us to start all tablets at once, instead of sequentially waiting.
# NOTE: Replication has to be enabled first or the health check will
# set a a replica or rdonly tablet back to NOT_SERVING.
for tablet in shard_tablets.all_tablets:
tablet.wait_for_vttablet_state('SERVING')
create_table_sql = (
'create table worker_test('
'id bigint unsigned,'
'msg varchar(64),'
'keyspace_id bigint(20) unsigned not null,'
'primary key (id),'
'index by_msg (msg)'
') Engine=InnoDB'
)
if create_table:
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_sql,
'test_keyspace'],
auto_log=True)
def copy_schema_to_destination_shards(self):
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
def _insert_values(self, tablet, id_offset, msg, keyspace_id, num_values):
"""Inserts values into MySQL along with the required routing comments.
Args:
tablet: the Tablet instance to modify.
id: the value of `id` column.
msg: the value of `msg` column.
keyspace_id: the value of `keyspace_id` column.
"""
# For maximum performance, multiple values are inserted in one statement.
# However, when the statements are too long, queries will timeout and
# vttablet will kill them. Therefore, we chunk it into multiple statements.
def chunks(full_list, n):
"""Yield successive n-sized chunks from full_list."""
for i in xrange(0, len(full_list), n):
yield full_list[i:i+n]
max_chunk_size = 100*1000
k = utils.uint64_to_hex(keyspace_id)
for chunk in chunks(range(1, num_values+1), max_chunk_size):
logging.debug('Inserting values for range [%d, %d].', chunk[0], chunk[-1])
values_str = ''
for i in chunk:
if i != chunk[0]:
values_str += ','
values_str += "(%d, '%s', 0x%x)" % (id_offset + i, msg, keyspace_id)
tablet.mquery(
'vt_test_keyspace', [
'begin',
'insert into worker_test(id, msg, keyspace_id) values%s '
'/* vtgate:: keyspace_id:%s */' % (values_str, k),
'commit'],
write=True)
def insert_values(
self, tablet, num_values, num_shards, offset=0, keyspace_id_range=2**64):
"""Inserts simple values, one for each potential shard.
Each row is given a message that contains the shard number, so we can easily
verify that the source and destination shards have the same data.
Args:
tablet: the Tablet instance to modify.
num_values: The number of values to insert.
num_shards: the number of shards that we expect to have.
offset: amount that we should offset the `id`s by. This is useful for
inserting values multiple times.
keyspace_id_range: the number of distinct values that the keyspace id
can have.
"""
shard_width = keyspace_id_range / num_shards
shard_offsets = [i * shard_width for i in xrange(num_shards)]
for shard_num in xrange(num_shards):
self._insert_values(
tablet,
shard_offsets[shard_num] + offset,
'msg-shard-%d' % shard_num,
shard_offsets[shard_num],
num_values)
def assert_shard_data_equal(
self, shard_num, source_tablet, destination_tablet):
"""Asserts source and destination tablets have identical shard data.
Args:
shard_num: The shard number of the shard that we want to verify.
source_tablet: Tablet instance of the source shard.
destination_tablet: Tablet instance of the destination shard.
"""
select_query = (
'select * from worker_test where msg="msg-shard-%s" order by id asc' %
shard_num)
# Make sure all the right rows made it from the source to the destination
source_rows = source_tablet.mquery('vt_test_keyspace', select_query)
destination_rows = destination_tablet.mquery(
'vt_test_keyspace', select_query)
self.assertEqual(source_rows, destination_rows)
# Make sure that there are no extra rows on the destination
count_query = 'select count(*) from worker_test'
destination_count = destination_tablet.mquery(
'vt_test_keyspace', count_query)[0][0]
self.assertEqual(destination_count, len(destination_rows))
def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets):
"""Runs a vtworker SplitDiff on the given keyspace/shard.
Sets all former rdonly slaves back to rdonly.
Args:
keyspace_shard: keyspace/shard to run SplitDiff on (string)
source_tablets: ShardTablets instance for the source shard
destination_tablets: ShardTablets instance for the destination shard
"""
_ = source_tablets, destination_tablets
logging.debug('Running vtworker SplitDiff for %s', keyspace_shard)
_, _ = utils.run_vtworker(
['-cell', 'test_nj', 'SplitDiff',
keyspace_shard], auto_log=True)
def setUp(self):
"""Creates shards, starts the tablets, and inserts some data."""
try:
self.run_shard_tablets('0', all_shard_tablets)
# create the split shards
self.run_shard_tablets(
'-80', shard_0_tablets, create_table=False)
self.run_shard_tablets(
'80-', shard_1_tablets, create_table=False)
logging.debug(
'Start inserting initial data: %s rows', utils.options.num_insert_rows)
self.insert_values(shard_master, utils.options.num_insert_rows, 2)
logging.debug(
'Done inserting initial data, waiting for replication to catch up')
utils.wait_for_replication_pos(shard_master, shard_rdonly1)
logging.debug('Replication on source rdonly tablet is caught up')
except:
self.tearDown()
def tearDown(self):
"""Does the minimum to reset topology and tablets to their initial states.
When benchmarked, this seemed to take around 30% of the time of
(setupModule + tearDownModule).
FIXME(aaijazi): doing this in parallel greatly reduces the time it takes.
See the kill_tablets method in tablet.py.
"""
utils.run_vtctl(['ListAllTablets', 'test_nj'])
for shard_tablet in [all_shard_tablets, shard_0_tablets, shard_1_tablets]:
for tablet in shard_tablet.all_tablets:
tablet.reset_replication()
tablet.clean_dbs()
tablet.kill_vttablet()
# we allow failures here as some tablets will be gone sometimes
# (the master tablets after an emergency reparent)
utils.run_vtctl(['DeleteTablet', '-allow_master', tablet.tablet_alias],
auto_log=True, raise_on_error=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for shard in ['0', '-80', '80-']:
utils.run_vtctl(
['DeleteShard', 'test_keyspace/%s' % shard], auto_log=True)
class TestBaseSplitCloneResiliency(TestBaseSplitClone):
"""Tests that the SplitClone worker is resilient to particular failures."""
def setUp(self):
try:
super(TestBaseSplitCloneResiliency, self).setUp()
self.copy_schema_to_destination_shards()
except:
self.tearDown()
def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
"""Verifies that vtworker can successfully copy data for a SplitClone.
Order of operations:
1. Run a background vtworker
2. Wait until the worker successfully resolves the destination masters.
3. Reparent the destination tablets
4. Wait until the vtworker copy is finished
5. Verify that the worker was forced to reresolve topology and retry writes
due to the reparent.
6. Verify that the data was copied successfully to both new shards
Args:
mysql_down: boolean, True iff we expect the MySQL instances on the
destination masters to be down.
Raises:
AssertionError if things didn't go as expected.
"""
worker_proc, worker_port, _ = utils.run_vtworker_bg(
['--cell', 'test_nj',
'SplitClone',
'--source_reader_count', '1',
'--destination_pack_count', '1',
'--destination_writer_count', '1',
'--strategy=-populate_blp_checkpoint',
'test_keyspace/0'],
auto_log=True)
if mysql_down:
# If MySQL is down, we wait until resolving at least twice (to verify that
# we do reresolve and retry due to MySQL being down).
worker_vars = utils.poll_for_vars(
'vtworker', worker_port,
'WorkerDestinationActualResolves >= 2',
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 2)
self.assertNotEqual(
worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
logging.debug('Worker has resolved at least twice, starting reparent now')
# Original masters have no running MySQL, so need to force the reparent
utils.run_vtctl(
['EmergencyReparentShard', 'test_keyspace/-80',
shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['EmergencyReparentShard', 'test_keyspace/80-',
shard_1_replica.tablet_alias], auto_log=True)
else:
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerDestinationActualResolves >= 1',
condition_fn=lambda v: v.get('WorkerDestinationActualResolves') >= 1)
logging.debug('Worker has resolved at least once, starting reparent now')
utils.run_vtctl(
['PlannedReparentShard', 'test_keyspace/-80',
shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', 'test_keyspace/80-',
shard_1_replica.tablet_alias], auto_log=True)
logging.debug('Polling for worker state')
# There are a couple of race conditions around this, that we need
# to be careful of:
#
# 1. It's possible for the reparent step to take so long that the
# worker will actually finish before we get to the polling
# step. To workaround this, the test takes a parameter to
# increase the number of rows that the worker has to copy (with
# the idea being to slow the worker down).
#
# 2. If the worker has a huge number of rows to copy, it's
# possible for the polling to timeout before the worker has
# finished copying the data.
#
# You should choose a value for num_insert_rows, such that this test passes
# for your environment (trial-and-error...)
worker_vars = utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cleaning up',
condition_fn=lambda v: v.get('WorkerState') == 'cleaning up',
# We know that vars should already be ready, since we read them earlier
require_vars=True,
# We're willing to let the test run for longer to make it less flaky.
# This should still fail fast if something goes wrong with vtworker,
# because of the require_vars flag above.
timeout=5*60)
# Verify that we were forced to reresolve and retry.
self.assertGreater(worker_vars['WorkerDestinationActualResolves'], 1)
self.assertGreater(worker_vars['WorkerDestinationAttemptedResolves'], 1)
self.assertNotEqual(
worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
utils.wait_procs([worker_proc])
# Make sure that everything is caught up to the same replication point
self.run_split_diff('test_keyspace/-80', all_shard_tablets, shard_0_tablets)
self.run_split_diff('test_keyspace/80-', all_shard_tablets, shard_1_tablets)
self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
def test_reparent_during_worker_copy(self):
"""Simulates a destination reparent during a worker SplitClone copy.
The SplitClone command should be able to gracefully handle the reparent and
end up with the correct data on the destination.
Note: this test has a small possibility of flaking, due to the timing issues
involved. It's possible for the worker to finish the copy step before the
reparent succeeds, in which case there are assertions that will fail. This
seems better than having the test silently pass.
"""
self.verify_successful_worker_copy_with_reparent()
class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency):
def setUp(self):
"""Shuts down MySQL on the destination masters.
Also runs base setup.
"""
try:
logging.debug('Starting base setup for MysqlDownDuringWorkerCopy')
super(TestMysqlDownDuringWorkerCopy, self).setUp()
logging.debug('Starting MysqlDownDuringWorkerCopy-specific setup')
utils.wait_procs(
[shard_0_master.shutdown_mysql(),
shard_1_master.shutdown_mysql()])
logging.debug('Finished MysqlDownDuringWorkerCopy-specific setup')
except:
self.tearDown()
def tearDown(self):
"""Restarts the MySQL processes that were killed during the setup."""
logging.debug('Starting MysqlDownDuringWorkerCopy-specific tearDown')
utils.wait_procs(
[shard_0_master.start_mysql(),
shard_1_master.start_mysql()])
logging.debug('Finished MysqlDownDuringWorkerCopy-specific tearDown')
super(TestMysqlDownDuringWorkerCopy, self).tearDown()
logging.debug('Finished base tearDown for MysqlDownDuringWorkerCopy')
def test_mysql_down_during_worker_copy(self):
"""This test simulates MySQL being down on the destination masters."""
self.verify_successful_worker_copy_with_reparent(mysql_down=True)
class TestVtworkerWebinterface(unittest.TestCase):
def setUp(self):
# Run vtworker without any optional arguments to start in interactive mode.
self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
def tearDown(self):
utils.kill_sub_process(self.worker_proc)
def test_webinterface(self):
worker_base_url = 'http://localhost:%d' % int(self.worker_port)
# Wait for /status to become available.
timeout = 10
while True:
done = False
try:
urllib2.urlopen(worker_base_url + '/status').read()
done = True
except:
pass
if done:
break
timeout = utils.wait_step(
'worker /status webpage must be available', timeout)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
# Run Ping command.
try:
urllib2.urlopen(
worker_base_url + '/Debugging/Ping',
data=urllib.urlencode({'message': 'pong'})).read()
raise Exception('Should have thrown an HTTPError for the redirect.')
except urllib2.HTTPError as e:
self.assertEqual(e.code, 307)
# Verify that the command logged something and its available at /status.
status = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
"Ping command was called with message: 'pong'", status,
'Command did not log output to /status')
# Reset the job.
urllib2.urlopen(worker_base_url + '/reset').read()
status_after_reset = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
'This worker is idle.', status_after_reset,
'/status does not indicate that the reset was successful')
def add_test_options(parser):
parser.add_option(
'--num_insert_rows', type='int', default=3000,
help='The number of rows, per shard, that we should insert before '
'resharding for this test.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations:
"""ClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.connectedvmware.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
cluster_name: str,
body: Optional["_models.Cluster"] = None,
**kwargs: Any
) -> "_models.Cluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'Cluster')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Cluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
cluster_name: str,
body: Optional["_models.Cluster"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.Cluster"]:
"""Implements cluster PUT method.
Create Or Update cluster.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param cluster_name: Name of the cluster.
:type cluster_name: str
:param body: Request payload.
:type body: ~azure.mgmt.connectedvmware.models.Cluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.connectedvmware.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> "_models.Cluster":
"""Gets a cluster.
Implements cluster GET method.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param cluster_name: Name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.connectedvmware.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
async def update(
self,
resource_group_name: str,
cluster_name: str,
body: Optional["_models.ResourcePatch"] = None,
**kwargs: Any
) -> "_models.Cluster":
"""Updates a cluster.
API to update certain properties of the cluster resource.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param cluster_name: Name of the cluster.
:type cluster_name: str
:param body: Resource properties to update.
:type body: ~azure.mgmt.connectedvmware.models.ResourcePatch
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.connectedvmware.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ResourcePatch')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_name: str,
force: Optional[bool] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force is not None:
query_parameters['force'] = self._serialize.query("force", force, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cluster_name: str,
force: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an cluster.
Implements cluster DELETE method.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param cluster_name: Name of the cluster.
:type cluster_name: str
:param force: Whether force delete was specified.
:type force: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
force=force,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{clusterName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ClustersList"]:
"""Implements GET clusters in a subscription.
List of clusters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClustersList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.connectedvmware.models.ClustersList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClustersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ClustersList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ConnectedVMwarevSphere/clusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ClustersList"]:
"""Implements GET clusters in a resource group.
List of clusters in a resource group.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClustersList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.connectedvmware.models.ClustersList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClustersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ClustersList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/clusters'} # type: ignore
| |
from sys import argv
import random
import re
import nltk
import operator
from nltk.corpus import cmudict
from nltk.probability import LidstoneProbDist
script, book = argv
e = cmudict.entries()
d = cmudict.dict()
banned_end_words = ['the', 'a', 'an', 'at', 'been', 'in', 'of', 'to', 'by', 'my',
'too', 'not', 'and', 'but', 'or', 'than', 'then', 'no', 'o',
'for', 'so', 'which', 'their', 'on', 'your', 'as', 'has',
'what', 'is', 'nor']
print "importing source text..."
f = open(book)
print "reading source text..."
t = f.read()
print "tokenizing words..."
w = nltk.word_tokenize(t)
def make_word_list():
print "making word list..."
word_list = []
for i in w:
try:
d[i.lower()]
except KeyError:
pass
else:
if i.lower() == "'s":
pass
elif i[-1] == ".":
pass
else:
word_list.append((i.lower(), d[i.lower()][0]))
return word_list
word_list = make_word_list()
def valid_words():
print "extracting words from word list..."
vw = []
for (x, y) in word_list:
vw.append(x)
return vw
vw = valid_words()
def unique(s):
print "making unique word list..."
u = []
for x in s:
if x not in u:
u.append(x)
else:
pass
return u
word_list_u = unique(word_list)
def sylcount(s):
try:
d[s]
except KeyError:
return None
else:
if len(d[s]) <= 1:
sj = ''.join(d[s][0])
sl = re.split('0|1|2', sj)
return len(sl) - 1
else:
sj0 = ''.join(d[s][0])
sl0 = re.split('0|1|2', sj0)
sj1 = ''.join(d[s][1])
sl1 = re.split('0|1|2', sj1)
if len(sl1) < len(sl0):
return len(sl1) - 1
else:
return len(sl0) - 1
def line_sylcount(line):
count = 0
for word in line:
count += sylcount(word)
return count
def meter(word):
pron = d[word]
m1 = []
m2 = []
mx = []
if len(pron) == 1:
for i in pron[0]:
if '0' in i:
m1.append(0)
elif '1' in i:
m1.append(1)
elif '2' in i:
m1.append(2)
else:
pass
mx = [m1]
elif len(pron) >= 2:
for i in pron[0]:
if '0' in i:
m1.append(0)
elif '1' in i:
m1.append(1)
elif '2' in i:
m1.append(2)
else:
pass
for i in pron[1]:
if '0' in i:
m2.append(0)
elif '1' in i:
m2.append(1)
elif '2' in i:
m2.append(2)
else:
pass
mx = [m1, m2]
m = []
if len(mx) == 1:
w0 = reduce(operator.mul, mx[0], 1)
if w0 >= 2:
for i in mx[0]:
if i == 1:
m.append('u')
elif i == 2:
m.append('s')
elif w0 == 1:
for i in mx[0]:
m.append('s')
elif w0 == 0:
for i in mx[0]:
if i == 0:
m.append('u')
elif i == 1 or i == 2:
m.append('s')
elif len(mx) == 2:
w0 = reduce(operator.mul, mx[0], 1)
w1 = reduce(operator.mul, mx[1], 1)
if w0 >= 2 and w1 >= 2:
for (i, j) in zip(mx[0], mx[1]):
if i * j == 1:
m.append('u')
elif i * j == 4:
m.append('s')
elif i * j == 2:
m.append('x')
elif w0 == 1 and w1 == 1:
for (i, j) in zip(mx[0], mx[1]):
m.append('s')
elif w0 == 0 and w1 == 0:
for (i, j) in zip(mx[0], mx[1]):
if i == j and i * j >= 1:
m.append('s')
elif i != j and i * j == 0:
m.append('x')
elif i == j and i * j == 0:
m.append('u')
elif w0 >= 2 and w1 == 0:
for (i, j) in zip(mx[0], mx[1]):
if i == 1 and j == 0:
m.append('u')
elif i == 2 and j == 0:
m.append('x')
elif i == 1 and j == 1:
m.append('x')
elif i == 1 and j == 2:
m.append('x')
elif i == 2 and j == 1:
m.append('s')
elif i == 2 and j == 2:
m.append('s')
elif w0 == 0 and w1 >= 2:
for (i, j) in zip(mx[0], mx[1]):
if i == 0 and j == 1:
m.append('u')
elif i == 0 and j == 2:
m.append('x')
elif i == 1 and j == 1:
m.append('x')
elif i == 2 and j == 1:
m.append('x')
elif i == 1 and j == 2:
m.append('s')
elif i == 2 and j == 2:
m.append('s')
elif w0 == 1 and w1 >= 2:
for (i, j) in zip(mx[0], mx[1]):
if j == 1:
m.append('x')
elif j == 2:
m.append('s')
elif w0 >= 2 and w1 == 1:
for (i, j) in zip(mx[0], mx[1]):
if i == 1:
m.append('x')
elif i == 2:
m.append('s')
elif w0 == 1 and w1 == 0:
for (i, j) in zip(mx[0], mx[1]):
if j == 0:
m.append('x')
elif j == 1:
m.append('s')
elif j == 2:
m.append('s')
elif w0 == 0 and w1 == 1:
for (i, j) in zip(mx[0], mx[1]):
if i == 0:
m.append('x')
if i == 1:
m.append('s')
if i == 2:
m.append('s')
return m
def strip_numbers(x):
xj = '.'.join(x)
xl = re.split('0|1|2', xj)
xjx = ''.join(xl)
xlx = xjx.split('.')
return xlx
def last_stressed_vowel(word):
if len(d[word]) <= 1:
pron = d[word][0]
else:
p0 = d[word][0]
p1 = d[word][1]
sj0 = ''.join(p0)
sl0 = re.split('0|1|2', sj0)
sj1 = ''.join(p1)
sl1 = re.split('0|1|2', sj1)
if len(sl1) < len(sl0):
pron = p1
else:
pron = p0
mtr = meter(word)
vowel_index = []
if len(mtr) == 1:
lsv = -1
elif mtr[-1] == 's' or mtr[-1] == 'x':
lsv = -1
elif mtr[-2] == 's' or mtr[-3] == 'x':
lsv = -2
elif mtr[-3] == 's' or mtr[-3] == 'x':
lsv = -3
elif mtr[-4] == 's' or mtr[-4] == 'x':
lsv = -4
elif mtr[-5] == 's' or mtr[-5] == 'x':
lsv = -5
elif mtr[-6] == 's' or mtr[-6] == 'x':
lsv = -6
elif mtr[-7] == 's' or mtr[-7] == 'x':
lsv = -7
elif mtr[-8] == 's' or mtr[-8] == 'x':
lsv = -8
elif mtr[-9] == 's' or mtr[-9] == 'x':
lsv = -9
elif mtr[-10] == 's' or mtr[-10] == 'x':
lsv = -10
else:
lsv = -1
for i in pron:
if '0' in i or '1' in i or '2' in i:
vowel_index.append(pron.index(i))
else:
continue
return vowel_index[lsv]
def rhyme_finder(word):
rhyming_words = []
if len(d[word]) <= 1:
pron = d[word][0]
else:
p0 = d[word][0]
p1 = d[word][1]
sj0 = ''.join(p0)
sl0 = re.split('0|1|2', sj0)
sj1 = ''.join(p1)
sl1 = re.split('0|1|2', sj1)
if len(sl1) < len(sl0):
pron = p1
else:
pron = p0
pron = strip_numbers(pron)
lsv = last_stressed_vowel(word)
rhyme_part = pron[lsv:]
lrp = len(rhyme_part) * -1
for (x, y) in word_list_u:
ps = strip_numbers(y)
if ps[lrp:] == rhyme_part and ps[lrp-1:] != pron[lsv-1:]:
rhyming_words.append(x)
else:
pass
rw = [i for i in rhyming_words if not i == word]
rw2 = [j for j in rw if not j in banned_end_words]
return rw2
print "building content model..."
estimator = lambda fdist, bins: LidstoneProbDist(fdist, 0.2)
content_model = nltk.NgramModel(5, vw, estimator=estimator)
def generate():
sw1 = random.randint(0, len(vw) - 10)
sw2 = sw1 + 10
starting_words = vw[sw1:sw2]
line_1 = content_model.generate(10, starting_words)
line_1 = line_1[-10:]
line_2 = content_model.generate(10, line_1)
line_2 = line_2[-10:]
line_3 = content_model.generate(9, line_2)
line_3 = line_3[-9:]
line_4 = content_model.generate(9, line_3)
line_4 = line_4[-9:]
line_5 = content_model.generate(10, line_4)
line_5 = line_5[-10:]
line_6 = content_model.generate(10, line_5)
line_6 = line_6[-10:]
line_7 = content_model.generate(9, line_6)
line_7 = line_7[-9:]
line_8 = content_model.generate(9, line_7)
line_8 = line_8[-9:]
line_9 = content_model.generate(10, line_8)
line_9 = line_9[-10:]
line_10 = content_model.generate(10, line_9)
line_10 = line_10[-10:]
line_11 = content_model.generate(9, line_10)
line_11 = line_11[-9:]
line_12 = content_model.generate(9, line_11)
line_12 = line_12[-9:]
line_13 = content_model.generate(10, line_12)
line_13 = line_13[-10:]
line_14 = content_model.generate(9, line_13)
line_14 = line_14[-9:]
lines = [line_1, line_2, line_3, line_4, line_5, line_6,
line_7, line_8, line_9, line_10, line_11, line_12,
line_13, line_14]
return lines
def couplet(x, y, lines):
line_1 = lines[x]
line_2 = lines[y]
end_word_1 = line_1.pop()
while end_word_1 in banned_end_words:
end_word_1 = random.choice(vw)
rhyming_words = rhyme_finder(end_word_1)
while rhyming_words == []:
end_word_1 = random.choice(vw)
while end_word_1 in banned_end_words:
end_word_1 = random.choice(vw)
rhyming_words = rhyme_finder(end_word_1)
end_word_2 = random.choice(rhyming_words)
for _ in range(9):
if line_sylcount(line_1) + sylcount(end_word_1) == 10:
break
else:
over = line_sylcount(line_1) + sylcount(end_word_1) - 10
for i in reversed(line_1):
if sylcount(i) <= over:
line_1.remove(i)
break
else:
continue
for _ in range(9):
if line_sylcount(line_2) + sylcount(end_word_2) == 10:
break
else:
over = line_sylcount(line_2) + sylcount(end_word_2) - 10
for i in reversed(line_2):
if sylcount(i) <= over:
line_2.remove(i)
break
else:
continue
line_1.append(end_word_1)
line_2.append(end_word_2)
return [line_1, line_2]
def couplet_checker():
lines = generate()
c1 = couplet(0, 2, lines)
c2 = couplet(1, 3, lines)
c3 = couplet(4, 6, lines)
c4 = couplet(5, 7, lines)
c5 = couplet(8, 10, lines)
c6 = couplet(9, 11, lines)
c7 = couplet(12, 13, lines)
while line_sylcount(c1[0]) != 10 or line_sylcount(c1[1]) != 10 or \
line_sylcount(c2[0]) != 10 or line_sylcount(c2[1]) != 10 or \
line_sylcount(c3[0]) != 10 or line_sylcount(c3[1]) != 10 or \
line_sylcount(c4[0]) != 10 or line_sylcount(c4[1]) != 10 or \
line_sylcount(c5[0]) != 10 or line_sylcount(c5[1]) != 10 or \
line_sylcount(c6[0]) != 10 or line_sylcount(c6[1]) != 10 or \
line_sylcount(c7[0]) != 10 or line_sylcount(c7[1]) != 10:
lines1 = generate()
c1 = couplet(0, 2, lines1)
c2 = couplet(1, 3, lines1)
c3 = couplet(4, 6, lines1)
c4 = couplet(5, 7, lines1)
c5 = couplet(8, 10, lines1)
c6 = couplet(9, 11, lines1)
c7 = couplet(12, 13, lines1)
return [c1[0], c2[0], c1[1], c2[1], c3[0], c4[0], c3[1], c4[1],
c5[0], c6[0], c5[1], c6[1], c7[0], c7[1]]
def sonnetizer():
s = couplet_checker()
l1 = ' '.join(s[0])
l2 = ' '.join(s[1])
l3 = ' '.join(s[2])
l4 = ' '.join(s[3])
l5 = ' '.join(s[4])
l6 = ' '.join(s[5])
l7 = ' '.join(s[6])
l8 = ' '.join(s[7])
l9 = ' '.join(s[8])
l10 = ' '.join(s[9])
l11 = ' '.join(s[10])
l12 = ' '.join(s[11])
l13 = ' '.join(s[12])
l14 = ' '.join(s[13])
sonnet = [l1, l2, l3, l4, l5, l6, l7, l8,
l9, l10, l11, l12, l13, l14]
return '\n'.join(sonnet) + '\n' + '\n'
print "assembling sonnets...\n\n"
for i in range(10):
print str(i + 1) + '.'
print sonnetizer()
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
# Predefined provider network types.
# You can add or override these entries by extra_provider_types
# in the settings.
PROVIDER_TYPES = {
'local': {
'display_name': _('Local'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'flat': {
'display_name': _('Flat'),
'require_physical_network': True,
'require_segmentation_id': False,
},
'vlan': {
'display_name': _('VLAN'),
'require_physical_network': True,
'require_segmentation_id': True,
},
'gre': {
'display_name': _('GRE'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'vxlan': {
'display_name': _('VXLAN'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'geneve': {
'display_name': _('Geneve'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'midonet': {
'display_name': _('MidoNet'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'uplink': {
'display_name': _('MidoNet Uplink'),
'require_physical_network': False,
'require_segmentation_id': False,
},
}
# Predefined valid segmentation ID range per network type.
# You can add or override these entries by segmentation_id_range
# in the settings.
SEGMENTATION_ID_RANGE = {
'vlan': (1, 4094),
'gre': (1, (2 ** 32) - 1),
'vxlan': (1, (2 ** 24) - 1),
'geneve': (1, (2 ** 24) - 1),
}
# DEFAULT_PROVIDER_TYPES is used when ['*'] is specified
# in supported_provider_types. This list contains network types
# supported by Neutron ML2 plugin reference implementation.
# You can control enabled network types by
# supported_provider_types setting.
DEFAULT_PROVIDER_TYPES = ['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve']
class CreateNetwork(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
required=False)
tenant_id = forms.ThemableChoiceField(label=_("Project"))
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
net_profile_id = forms.ChoiceField(label=_("Network Profile"),
required=False,
widget=widget)
network_type = forms.ChoiceField(
label=_("Provider Network Type"),
help_text=_("The physical mechanism by which the virtual "
"network is implemented."),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'network_type'
}))
physical_network = forms.CharField(
max_length=255,
label=_("Physical Network"),
help_text=_("The name of the physical network over which the "
"virtual network is implemented."),
initial='default',
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
segmentation_id = forms.IntegerField(
label=_("Segmentation ID"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
admin_state = forms.ThemableChoiceField(
choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
shared = forms.BooleanField(label=_("Shared"),
initial=False, required=False)
external = forms.BooleanField(label=_("External Network"),
initial=False, required=False)
@classmethod
def _instantiate(cls, request, *args, **kwargs):
return cls(request, *args, **kwargs)
def __init__(self, request, *args, **kwargs):
super(CreateNetwork, self).__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, has_more = api.keystone.tenant_list(request)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
self.fields['tenant_id'].choices = tenant_choices
if api.neutron.is_port_profiles_supported():
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
if api.neutron.is_extension_supported(request, 'provider'):
neutron_settings = getattr(settings,
'OPENSTACK_NEUTRON_NETWORK', {})
self.seg_id_range = SEGMENTATION_ID_RANGE.copy()
seg_id_range = neutron_settings.get('segmentation_id_range')
if seg_id_range:
self.seg_id_range.update(seg_id_range)
self.provider_types = PROVIDER_TYPES.copy()
extra_provider_types = neutron_settings.get('extra_provider_types')
if extra_provider_types:
self.provider_types.update(extra_provider_types)
self.nettypes_with_seg_id = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_segmentation_id']]
self.nettypes_with_physnet = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_physical_network']]
supported_provider_types = neutron_settings.get(
'supported_provider_types', DEFAULT_PROVIDER_TYPES)
if supported_provider_types == ['*']:
supported_provider_types = DEFAULT_PROVIDER_TYPES
undefined_provider_types = [
net_type for net_type in supported_provider_types
if net_type not in self.provider_types]
if undefined_provider_types:
LOG.error('Undefined provider network types are found: %s',
undefined_provider_types)
seg_id_help = [
_("For %(type)s networks, valid IDs are %(min)s to %(max)s.")
% {'type': net_type,
'min': self.seg_id_range[net_type][0],
'max': self.seg_id_range[net_type][1]}
for net_type in self.nettypes_with_seg_id]
self.fields['segmentation_id'].help_text = ' '.join(seg_id_help)
# Register network types which require segmentation ID
attrs = dict(('data-network_type-%s' % network_type,
_('Segmentation ID'))
for network_type in self.nettypes_with_seg_id)
self.fields['segmentation_id'].widget.attrs.update(attrs)
# Register network types which require physical network
attrs = dict(('data-network_type-%s' % network_type,
_('Physical Network'))
for network_type in self.nettypes_with_physnet)
self.fields['physical_network'].widget.attrs.update(attrs)
network_type_choices = [
(net_type, self.provider_types[net_type]['display_name'])
for net_type in supported_provider_types]
if len(network_type_choices) == 0:
self._hide_provider_network_type()
else:
self.fields['network_type'].choices = network_type_choices
else:
self._hide_provider_network_type()
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
def _hide_provider_network_type(self):
self.fields['network_type'].widget = forms.HiddenInput()
self.fields['physical_network'].widget = forms.HiddenInput()
self.fields['segmentation_id'].widget = forms.HiddenInput()
self.fields['network_type'].required = False
self.fields['physical_network'].required = False
self.fields['segmentation_id'].required = False
def handle(self, request, data):
try:
params = {'name': data['name'],
'tenant_id': data['tenant_id'],
'admin_state_up': (data['admin_state'] == 'True'),
'shared': data['shared'],
'router:external': data['external']}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
if api.neutron.is_extension_supported(request, 'provider'):
network_type = data['network_type']
params['provider:network_type'] = network_type
if network_type in self.nettypes_with_physnet:
params['provider:physical_network'] = (
data['physical_network'])
if network_type in self.nettypes_with_seg_id:
params['provider:segmentation_id'] = (
data['segmentation_id'])
network = api.neutron.network_create(request, **params)
msg = _('Network %s was successfully created.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return network
except Exception:
redirect = reverse('horizon:admin:networks:index')
msg = _('Failed to create network %s') % data['name']
exceptions.handle(request, msg, redirect=redirect)
def clean(self):
cleaned_data = super(CreateNetwork, self).clean()
if api.neutron.is_extension_supported(self.request, 'provider'):
self._clean_physical_network(cleaned_data)
self._clean_segmentation_id(cleaned_data)
return cleaned_data
def _clean_physical_network(self, data):
network_type = data.get('network_type')
if ('physical_network' in self._errors and
network_type not in self.nettypes_with_physnet):
# In this case the physical network is not required, so we can
# ignore any errors.
del self._errors['physical_network']
def _clean_segmentation_id(self, data):
network_type = data.get('network_type')
if 'segmentation_id' in self._errors:
if network_type not in self.nettypes_with_seg_id:
# In this case the segmentation ID is not required, so we can
# ignore any errors.
del self._errors['segmentation_id']
elif network_type in self.nettypes_with_seg_id:
seg_id = data.get('segmentation_id')
seg_id_range = {'min': self.seg_id_range[network_type][0],
'max': self.seg_id_range[network_type][1]}
if seg_id < seg_id_range['min'] or seg_id > seg_id_range['max']:
msg = (_('For a %(network_type)s network, valid segmentation '
'IDs are %(min)s through %(max)s.')
% {'network_type': network_type,
'min': seg_id_range['min'],
'max': seg_id_range['max']})
self._errors['segmentation_id'] = self.error_class([msg])
class UpdateNetwork(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
tenant_id = forms.CharField(widget=forms.HiddenInput)
network_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
admin_state = forms.ThemableChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
shared = forms.BooleanField(label=_("Shared"), required=False)
external = forms.BooleanField(label=_("External Network"), required=False)
failure_url = 'horizon:admin:networks:index'
def handle(self, request, data):
try:
params = {'name': data['name'],
'admin_state_up': (data['admin_state'] == 'True'),
'shared': data['shared'],
'router:external': data['external']}
network = api.neutron.network_update(request,
self.initial['network_id'],
**params)
msg = _('Network %s was successfully updated.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return network
except Exception:
msg = _('Failed to update network %s') % data['name']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| |
import unittest
from django.test import TestCase
from django.http import HttpRequest
from django.core.urlresolvers import reverse
from django.test import TestCase
import settings
try:
import json
except ImportError:
import simplejson as json
def print_resp(resp):
if not resp.content:
return
try:
deserialized = json.loads(resp.content)
if 'error_message' in deserialized.keys():
print "ERROR: ", deserialized.get('error_message', '')
print "TRACEBACK: ", deserialized.get('traceback', '')
print json.dumps(deserialized, indent=4)
except:
print "resp is not json: ", resp
############################################
# LISTS
############################################
class ListFieldTest(TestCase):
# fixtures = ['list_field_test.json', 'dict_field_test.json']
def setUp(self):
from django.conf import settings; settings.DEBUG = True
from models import ListFieldTest, DictFieldTest
l = ListFieldTest.objects.create(
list=[1,2,3],
intlist=[1,2,3],
)
l = ListFieldTest.objects.create(
list=[1.0,2.0,3.0],
intlist=[1.0,2.0,3.0],
)
l = ListFieldTest.objects.create(
list=[1,2,3],
intlist=['1','2','3'],
)
def test_get(self):
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
self.assertEqual(len(os), 3)
self.assertEqual(os[0]['intlist'], [1,2,3])
self.assertEqual(os[0]['list'], ['1','2','3'])
# Objects get transformed to the underlying type of the list
self.assertEqual(os[1]['intlist'], [1,2,3])
def test_post(self):
post_data = '{"list":["1", "2"], "intlist":[1,2]}'
resp = self.client.post('/api/v1/listfieldtest/',
data = post_data,
content_type = 'application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
self.assertEqual(len(os), 4)
self.assertEqual(os[3]['intlist'], [1,2])
self.assertEqual(os[3]['list'], ['1','2'])
def test_put(self):
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
l = os[0]
l['list'] = [4,5]
location = l['resource_uri']
put_data = json.dumps(l)
resp = self.client.put(location,
data=put_data,
content_type='application/json')
self.assertEqual(resp.status_code, 204)
# make sure the update happened
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
l = deserialized['objects'][0]
# the list is of Charfield
self.assertEquals(l['list'], ['4','5'])
resp = self.client.get(location,
content_type='application/json')
deserialized = json.loads(resp.content)
self.assertEqual(deserialized['list'], ['4', '5'])
def test_delete(self):
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
old_len = len(os)
location = os[0]['resource_uri']
resp = self.client.delete(location,
content_type='application/json')
self.assertEquals(resp.status_code, 204)
# make sure it's gone
resp = self.client.get('/api/v1/listfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
self.assertEquals(len(os), old_len - 1)
############################################
# EMBEDDED LISTS
############################################
class EmbeddedListFieldTest(TestCase):
# fixtures = ['list_field_test.json', 'dict_field_test.json']
def setUp(self):
from django.conf import settings; settings.DEBUG = True
from models import EmbeddedListFieldTest, PersonTest
p = PersonTest(name="andres")
p1 = PersonTest(name="arman")
l = EmbeddedListFieldTest.objects.create()
l.list.append(p)
l.save()
l.list.append(p1)
l.save()
def test_get(self):
resp = self.client.get('/api/v1/embeddedlistfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
self.assertEqual(len(os), 1)
self.assertEqual(os[0]['list'][0]['name'], 'andres')
def test_post(self):
post_data = '{"list":[{"name":"evan"}, {"name":"ethan"}]}'
resp = self.client.post('/api/v1/embeddedlistfieldtest/',
data = post_data,
content_type = 'application/json'
)
self.assertEqual(resp.status_code, 201)
location = resp['location']
resp = self.client.get('/api/v1/embeddedlistfieldtest/',
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
os = deserialized['objects']
self.assertEqual(len(os), 2)
self.assertEqual(os[1]['list'][0]['name'], 'evan')
def test_put(self):
resp = self.client.get('/api/v1/embeddedlistfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
p = deserialized['objects'][0]
p['list'][0]['name'] = "philip"
location = p['resource_uri']
# submit completely new data
put_data = '{"list":[{"name":"evan"}, {"name":"ethan"}]}'
resp = self.client.put(location,
data=put_data,
content_type='application/json',
)
self.assertEquals(resp.status_code, 204)
resp = self.client.get(location,
content_type='application/json',
)
deserialized = json.loads(resp.content)
self.assertEqual(len(deserialized['list']), 2)
self.assertEqual(deserialized['list'][0]['name'], 'evan')
self.assertEqual(deserialized['list'][1]['name'], 'ethan')
def test_delete(self):
resp = self.client.get('/api/v1/embeddedlistfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
location = deserialized['objects'][0]['resource_uri']
resp = self.client.delete(location,
content_type='application/json')
self.assertEqual(resp.status_code, 204)
# make sure it's actually gone
resp = self.client.get('/api/v1/embeddedlistfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
# boom
self.assertEqual(len(deserialized['objects']), 0)
############################################
# DICTS
############################################
class DictFieldTest(TestCase):
def setUp(self):
from django.conf import settings; settings.DEBUG = True
from models import ListFieldTest, DictFieldTest
self.location = '/api/v1/dictfieldtest/'
l = DictFieldTest.objects.create(
dict={"1":1, '2':'2',})
l = DictFieldTest.objects.create(
dict={"1":1, '2':'2', '3':[1,2,3]})
l = DictFieldTest.objects.create(
dict={"1":1,
'2':'2',
'latlon':[1.234, 2.3443],
'3':[1,2,3],
'4':{'1':1},
},
)
l = DictFieldTest.objects.create(
dict={"1":1,
'2':'2',
})
def test_get(self):
resp = self.client.get(self.location,
content_type='application/json')
self.assertEqual(resp.status_code, 200)
def test_post(self):
post_data = '{"dict":{"1":1, "2":"2", "3":[1,2,3], "4":{"1":1}}}'
resp = self.client.post(self.location,
data = post_data,
content_type = 'application/json'
)
self.assertEqual(resp.status_code, 201)
resp = self.client.get(self.location,
content_type='application/json')
self.assertEqual(resp.status_code, 200)
def test_put(self):
resp = self.client.get(self.location,
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
l = deserialized['objects'][0]
l['dict'] = {'1':'one', 'two':2}
location = l['resource_uri']
put_data = json.dumps(l)
resp = self.client.put(location,
data=put_data,
content_type='application/json')
self.assertEqual(resp.status_code, 204)
# make sure the update happened
resp = self.client.get(self.location,
content_type='application/json')
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
# it's last, because when you delete an element in a list it gets pushed
# to the back
l = deserialized['objects'][3]
self.assertEquals(l['dict'], {'1':'one', 'two':2})
def test_delete(self):
resp = self.client.get(self.location,
content_type='application/json',
)
deserialized = json.loads(resp.content)
os = deserialized['objects']
location = os[0]['resource_uri']
num_os = len(os)
resp = self.client.delete(location,
content_type='application/json')
self.assertEqual(resp.status_code, 204)
# make sure it's actually gone
resp = self.client.get(self.location,
content_type='application/json',
)
deserialized = json.loads(resp.content)
# boom
self.assertEqual(len(deserialized['objects']), num_os-1)
############################################
# EMBEDDED
############################################
class EmbededModelFieldTest(TestCase):
def setUp(self):
from django.conf import settings; settings.DEBUG = True
from models import PersonTest, EmbeddedModelFieldTest
m = EmbeddedModelFieldTest.objects.create(
customer=PersonTest(name="andres"),
)
ms = EmbeddedModelFieldTest.objects.all()
def test_get(self):
request = HttpRequest()
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
rj = json.loads(resp.content)
self.assertEqual(rj['objects'][0]['customer']['name'], 'andres')
def test_post(self):
request = HttpRequest()
post_data = '{"customer":{"name":"san"}}'
resp = self.client.post('/api/v1/embeddedmodelfieldtest/',
data=post_data,
content_type='application/json',
)
self.assertEqual(resp.status_code, 201)
# make sure it's there
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual(deserialized['objects'][1]['customer']['name'], 'san')
def test_put(self):
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
p = deserialized['objects'][0]
p['customer']['name'] = "philip"
put_data = json.dumps(p)
location = p['resource_uri']
resp = self.client.put(location,
data=put_data,
content_type='application/json',
)
self.assertEquals(resp.status_code, 204)
resp = self.client.get(location,
content_type='application/json',
)
deserialized = json.loads(resp.content)
self.assertEqual(deserialized,
p)
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
self.assertEquals(len(deserialized['objects']), 1)
p = deserialized['objects'][0]
self.assertEquals(p['customer']['name'], "philip")
def test_delete(self):
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
location = deserialized['objects'][0]['resource_uri']
resp = self.client.delete(location,
content_type='application/json')
self.assertEqual(resp.status_code, 204)
# make sure it's actually gone
resp = self.client.get('/api/v1/embeddedmodelfieldtest/',
content_type='application/json',
)
deserialized = json.loads(resp.content)
# boom
self.assertEqual(len(deserialized['objects']), 0)
############################
# EmbeddedCollections
############################
class EmbeddedCollectionFieldTestCase(TestCase):
def setUp(self):
from django.conf import settings; settings.DEBUG = True
from models import PersonTest, EmbeddedCollectionFieldTest
self.m = EmbeddedCollectionFieldTest.objects.create(
list=[PersonTest(name="andres"),PersonTest(name="josh")]
)
ms = EmbeddedCollectionFieldTest.objects.all()
@property
def url(self):
r = lambda name, *args, **kwargs: reverse(name, args=args, kwargs=kwargs)
return r('api_dispatch_subresource_list',
api_name='v1',
resource_name='embeddedcollectionfieldtest',
pk=self.m.id,
subresource_name='list')
def test_get(self):
request = HttpRequest()
resp = self.client.get(self.url,
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
rj = json.loads(resp.content)
self.assertEqual(len(rj['objects']), 2)
self.assertEqual(rj['objects'][0]['name'], 'andres')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A notifier leech invaders internet.
# Copyright (c) 2011, Nycholas de Oliveira e Oliveira <nycholas@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Nycholas de Oliveira e Oliveira nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import glob
import locale
import fnmatch
from distutils.core import setup
from setuptools import find_packages
if sys.platform == 'win32':
import py2exe
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append('py2exe')
sys.argv.append('-q')
Executable = lambda x, *y, **z: x
setup_requires = ['py2exe']
elif sys.platform == 'linux2':
import cx_Freeze
from cx_Freeze import setup, Executable
setup_requires = ['cx_Freeze']
elif sys.platform == 'darwin':
import py2app
Executable = lambda x, *y, **z: x
setup_requires = ['py2app']
else:
print('Error in buld!')
sys.exit()
locale.setlocale(locale.LC_ALL, '')
try:
sys.setappdefaultencoding('utf-8')
except AttributeError:
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except LookupError:
pass
if sys.version_info[0] == 3:
extra_args = dict(use_2to3=True)
else:
extra_args = dict()
long_description = '''\
Leech Invaders
==============
A notifier leech invaders internet.
Referencies
***********
* http://en.wikipedia.org/wiki/Leech
* http://www.voidspace.org.uk/python/articles/authentication.shtml
Installation
************
::
$ python setup.py install
Usage
*****
::
$ leech_invaders.py
Dependecies
***********
* Python 2.6 or later (http://www.python.org)
Project Information
*******************
:Author: Nycholas de Oliveira e Olivera
:License: New BSD License
'''
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Education',
'License :: OSI Approved :: New BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Mathematics',
]
manifest = '''\
<?xml version='1.0' encoding='UTF-8' standalone='yes'?>
<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>
<assemblyIdentity version='0.64.1.0' processorArchitecture='x86'
name='Controls' type='win32' />
<description>Polygon Area</description>
<dependency>
<dependentAssembly>
<assemblyIdentity type='win32'
name='Microsoft.Windows.Common-Controls' version='6.0.0.0'
processorArchitecture='X86' publicKeyToken='6595b64144ccf1df'
language='*' />
</dependentAssembly>
</dependency>
</assembly>
'''
def get_packages():
return find_packages()
def get_all_packages():
fls = []
for dirpath, dirnames, filenames in os.walk(os.path.join(os.curdir,
'leech_invaders')):
for filename in fnmatch.filter(filenames, '*.py'):
fileui = os.path.join(dirpath, filename)
if not os.access(fileui, os.R_OK):
raise IOError('Can not access the file {0}' \
.format(fileui))
fileui = fileui.replace('%s%s' % (os.curdir, os.sep), '')
fileui = fileui.replace('%s' % os.sep, '.')
fileui = fileui.replace('.py', '')
fileui = fileui.replace('.__init__', '')
fls.append(fileui)
return fls
def get_include_modules():
inc = [
'encodings.utf_8',
]
return inc
def get_package_modules():
return [
'encodings',
]
def get_scripts():
if os.name == 'posix':
return [os.path.join('resources', 'scripts', 'leech_invaders')]
return [os.path.join('resources', 'scripts', 'leech_invaders.bat')]
def get_data_files():
data_files = []
data_path_src = os.curdir
data_path_dst = os.curdir
data_files.append((data_path_src,
['AUTHORS', 'ChangeLog', 'CONTRIBUTORS', 'COPYING',
'FAQ', 'INSTALL', 'README', 'THANKS', 'TODO',]))
'''locale = os.path.join('resources', 'locale')
try:
langs = [i for i in os.listdir(locale) \
if os.path.isdir(os.path.join(locale, i))]
except OSError:
langs = []
for lang in langs:
listFiles = []
diretory = os.path.join('resources', 'locale', lang, 'LC_MESSAGES')
mo = os.path.join('resources', 'locale', lang,
'LC_MESSAGES', 'leech_invaders.mo')
if os.path.isfile(mo):
listFiles.append(mo)
qm = os.path.join('resources', 'locale', lang,
'LC_MESSAGES', 'leech_invaders.qm')
if os.path.isfile(qm):
listFiles.append(qm)
data_files.append((diretory, listFiles))
'''
return data_files
def get_include_files():
include_files = []
data_path_src = os.curdir
data_path_dst = os.curdir
filelist = ['AUTHORS', 'ChangeLog', 'CONTRIBUTORS', 'COPYING',
'FAQ', 'INSTALL', 'README', 'THANKS', 'TODO',]
for fl in filelist:
include_files.append((os.path.join(data_path_src, fl),
os.path.join(data_path_dst, fl)))
'''locale = os.path.join('resources', 'locale')
try:
langs = [i for i in os.listdir(locale) \
if os.path.isdir(os.path.join(locale, i))]
except OSError:
langs = []
for lang in langs:
listFiles = []
data_path_src = os.path.join('resources', 'locale', lang, 'LC_MESSAGES')
data_path_dst = os.path.join('resources', 'locale', lang, 'LC_MESSAGES')
mo = os.path.join('resources', 'locale', lang,
'LC_MESSAGES', 'leech_invaders.mo')
if os.path.isfile(mo):
include_files.append((mo, mo))
qm = os.path.join('resources', 'locale', lang,
'LC_MESSAGES', 'leech_invaders.qm')
if os.path.isfile(qm):
include_files.append((qm, qm))
'''
return include_files
def run():
setup(name='Leech Invaders',
version='0.1',
url='http://https://github.com/nycholas/leech-invaders/',
download_url='http://https://github.com/nycholas/leech-invaders/',
license='New BSD License',
description='''A notifier leech invaders internet.''',
long_description=long_description,
classifiers=classifiers,
platforms=['Many'],
packages=get_packages(),
scripts=get_scripts(),
options={
'py2exe': {
'compressed': 1,
'optimize': 2,
'ascii': 1,
'excludes': [
'pywin',
'pywin.debugger',
'pywin.debugger.dbgcon',
'pywin.dialogs',
'pywin.dialogs.list',
],
'includes': get_include_modules(),
'packages': get_package_modules(),
},
'build_exe': {
'compressed': 1,
'optimize': 2,
'includes': get_include_modules(),
'packages': get_package_modules(),
'include_files': get_include_files(),
'create_shared_zip': 1,
'include_in_shared_zip': get_include_files(),
'icon': os.path.join(os.curdir, 'resources', 'static',
'leech_invaders.png'),
},
'py2app': {
'compressed': 1,
'optimize': 2,
'argv_emulation': 0,
'includes': get_include_modules() + get_all_packages(),
'packages': get_package_modules(),
'resources': ['AUTHORS', 'ChangeLog', 'CONTRIBUTORS', 'COPYING',
'FAQ', 'INSTALL', 'README', 'THANKS', 'TODO',],
'iconfile': os.path.join(os.curdir, 'resources', 'static',
'leech_invaders.icns'),
'plist': {
'CFBundleName': 'Polygon Area',
'CFBundleShortVersionString': '0.1.0', # must be in X.X.X format
'CFBundleGetInfoString': 'Polygon Area 0.1',
'CFBundleExecutable': 'Polygon Area',
'CFBundleIdentifier': 'org.cenobites.leech_invaders',
},
},
},
zipfile=None,
windows=[
{
'script': 'leech_invaders.pyw',
'icon_resources': [
(1, os.path.join(os.curdir, 'resources', 'static',
'leech_invaders.ico'))
],
},
],
data_files=get_data_files(),
executables=[
Executable(
'leech_invaders.py',
copyDependentFiles=1,
icon=os.path.join(os.curdir, 'resources', 'static',
'leech_invaders.png'),
)
],
app=['leech_invaders.py'],
package_data={
'py2app.apptemplate': [
'prebuilt/main-i386',
'prebuilt/main-ppc',
'prebuilt/main-x86_64',
'prebuilt/main-ppc64',
'prebuilt/main-fat',
'prebuilt/main-fat3',
'prebuilt/main-intel',
'prebuilt/main-universal',
'lib/__error__.sh',
'lib/site.py',
'src/main.c',
],
'py2app.bundletemplate': [
'prebuilt/main-i386',
'prebuilt/main-ppc',
'prebuilt/main-x86_64',
'prebuilt/main-ppc64',
'prebuilt/main-fat',
'prebuilt/main-fat3',
'prebuilt/main-intel',
'prebuilt/main-universal',
'lib/__error__.sh',
'lib/site.py',
'src/main.m',
],
},
entry_points={
'distutils.commands': [
'py2app = py2app.build_app:py2app',
],
'distutils.setup_keywords': [
'app = py2app.build_app:validate_target',
'plugin = py2app.build_app:validate_target',
],
'console_scripts': [
'py2applet = py2app.script_py2applet:main',
],
'py2app.converter': [
'xib = py2app.converters.nibfile:convert_xib',
'datamodel = py2app.converters.coredata:convert_datamodel',
'mappingmodel = py2app.converters.coredata:convert_mappingmodel',
],
'py2app.recipe': [
]
},
setup_requires=setup_requires,
**extra_args
)
# Commands:
# ./setup.py clean -a
# ./setup.py build
# ./setup.py py2exe
# ./setup.py install -c -O2
# ./setup.py sdist --formats=bztar, gztar, tar, zip, ztar
# ./setup.py bdist --formats=rpm, gztar, bztar, ztar, tar, wininst, zip
if __name__ == '__main__':
run()
| |
"""JSON implementations of repository searches."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import objects
from . import queries
from .. import utilities
from ..osid import searches as osid_searches
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.repository import searches as abc_repository_searches
class AssetSearch(abc_repository_searches.AssetSearch, osid_searches.OsidSearch):
"""The search interface for governing asset searches."""
def __init__(self, runtime):
self._namespace = 'repository.Asset'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_assets(self, asset_ids):
"""Execute this search among the given list of assets.
arg: asset_ids (osid.id.IdList): list of asset ``Ids``
raise: NullArgument - ``asset_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = asset_ids
@utilities.arguments_not_none
def order_asset_results(self, asset_search_order):
"""Specify an ordering to the search results.
arg: asset_search_order (osid.repository.AssetSearchOrder):
asset search order
raise: NullArgument - ``asset_search_order`` is ``null``
raise: Unsupported - ``asset_search_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_asset_search_record(self, asset_search_record_type):
"""Gets the asset search record corresponding to the given asset search record ``Type``.
This method used to retrieve an object implementing the
requested record.
arg: asset_search_record_type (osid.type.Type): an asset
search record type
return: (osid.repository.records.AssetSearchRecord) - the asset
search record
raise: NullArgument - ``asset_search_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(asset_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class AssetSearchResults(abc_repository_searches.AssetSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'repository.Asset'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_assets(self):
"""Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AssetList(self._results, runtime=self._runtime)
assets = property(fget=get_assets)
def get_asset_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.repository.AssetQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.AssetQueryInspector(self._query_terms, runtime=self._runtime)
asset_query_inspector = property(fget=get_asset_query_inspector)
@utilities.arguments_not_none
def get_asset_search_results_record(self, asset_search_record_type):
"""Gets the asset search results record corresponding to the given asset search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: asset_search_record_type (osid.type.Type): an asset
search record type
return: (osid.repository.records.AssetSearchResultsRecord) - the
asset search results record
raise: NullArgument - ``asset_search_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(asset_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class CompositionSearch(abc_repository_searches.CompositionSearch, osid_searches.OsidSearch):
"""The interface for governing composition searches."""
def __init__(self, runtime):
self._namespace = 'repository.Composition'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_compositions(self, composition_ids):
"""Execute this search among the given list of compositions.
arg: composition_ids (osid.id.IdList): list of compositions
raise: NullArgument - ``composition_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = composition_ids
@utilities.arguments_not_none
def order_composition_results(self, composition_search_order):
"""Specify an ordering to the search results.
arg: composition_search_order
(osid.repository.CompositionSearchOrder): composition
search order
raise: NullArgument - ``composition_search_order`` is ``null``
raise: Unsupported - ``composition_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_composition_search_record(self, composition_search_record_type):
"""Gets the composition search record corresponding to the given composition search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: composition_search_record_type (osid.type.Type): a
composition search record type
return: (osid.repository.records.CompositionSearchRecord) - the
composition search record
raise: NullArgument - ``composition_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(composition_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class CompositionSearchResults(abc_repository_searches.CompositionSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'repository.Composition'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_compositions(self):
"""Gets the composition list resulting from a search.
return: (osid.repository.CompositionList) - the composition list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CompositionList(self._results, runtime=self._runtime)
compositions = property(fget=get_compositions)
def get_composition_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.repository.CompositionQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.CompositionQueryInspector(self._query_terms, runtime=self._runtime)
composition_query_inspector = property(fget=get_composition_query_inspector)
@utilities.arguments_not_none
def get_composition_search_results_record(self, composition_search_record_type):
"""Gets the composition search results record corresponding to the given composition search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: composition_search_record_type (osid.type.Type): a
composition search record type
return: (osid.repository.records.CompositionSearchResultsRecord)
- the composition search results record
raise: NullArgument - ``composition_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(composition_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class RepositorySearch(abc_repository_searches.RepositorySearch, osid_searches.OsidSearch):
"""The interface for governing repository searches."""
def __init__(self, runtime):
self._namespace = 'repository.Repository'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_repositories(self, repository_ids):
"""Execute this search among the given list of repositories.
arg: repository_ids (osid.id.IdList): list of repositories
raise: NullArgument - ``repository_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = repository_ids
@utilities.arguments_not_none
def order_repository_results(self, repository_search_order):
"""Specify an ordering to the search results.
arg: repository_search_order
(osid.repository.RepositorySearchOrder): repository
search order
raise: NullArgument - ``repository_search_order`` is ``null``
raise: Unsupported - ``repository_search_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_repository_search_record(self, repository_search_record_type):
"""Gets the repository search record corresponding to the given repository search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: repository_search_record_type (osid.type.Type): a
repository search record type
return: (osid.repository.records.RepositorySearchRecord) - the
repository search record
raise: NullArgument - ``repository_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(repository_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class RepositorySearchResults(abc_repository_searches.RepositorySearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'repository.Repository'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_repositories(self):
"""Gets the repository list resulting from the search.
return: (osid.repository.RepositoryList) - the repository list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.RepositoryList(self._results, runtime=self._runtime)
repositories = property(fget=get_repositories)
def get_repository_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.repository.RepositoryQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.RepositoryQueryInspector(self._query_terms, runtime=self._runtime)
repository_query_inspector = property(fget=get_repository_query_inspector)
@utilities.arguments_not_none
def get_repository_search_results_record(self, repository_search_record_type):
"""Gets the repository search results record corresponding to the given repository search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: repository_search_record_type (osid.type.Type): a
repository search record type
return: (osid.repository.records.RepositorySearchResultsRecord)
- the repository search results record
raise: NullArgument - ``repository_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(repository_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| |
#!/usr/bin/env python
import codecs
import fnmatch
import os
import subprocess
import sys
import tarfile
import unicodedata
import pandas
import progressbar
from sox import Transformer
from tensorflow.python.platform import gfile
from deepspeech_training.util.downloader import maybe_download
SAMPLE_RATE = 16000
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print(
"Downloading Librivox data set (55GB) into {} if not already present...".format(
data_dir
)
)
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
TRAIN_CLEAN_100_URL = (
"http://www.openslr.org/resources/12/train-clean-100.tar.gz"
)
TRAIN_CLEAN_360_URL = (
"http://www.openslr.org/resources/12/train-clean-360.tar.gz"
)
TRAIN_OTHER_500_URL = (
"http://www.openslr.org/resources/12/train-other-500.tar.gz"
)
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x):
return os.path.split(x)[1]
train_clean_100 = maybe_download(
filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL
)
bar.update(0)
train_clean_360 = maybe_download(
filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL
)
bar.update(1)
train_other_500 = maybe_download(
filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL
)
bar.update(2)
dev_clean = maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
bar.update(3)
dev_other = maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
bar.update(4)
test_clean = maybe_download(
filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL
)
bar.update(5)
test_other = maybe_download(
filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL
)
bar.update(6)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100
)
bar.update(0)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360
)
bar.update(1)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500
)
bar.update(2)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
bar.update(3)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
bar.update(4)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
bar.update(5)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
bar.update(6)
# Convert FLAC data to wav, from:
# data_dir/LibriSpeech/split/1/2/1-2-3.flac
# to:
# data_dir/LibriSpeech/split-wav/1-2-3.wav
#
# And split LibriSpeech transcriptions, from:
# data_dir/LibriSpeech/split/1/2/1-2.trans.txt
# to:
# data_dir/LibriSpeech/split-wav/1-2-0.txt
# data_dir/LibriSpeech/split-wav/1-2-1.txt
# data_dir/LibriSpeech/split-wav/1-2-2.txt
# ...
print("Converting FLAC to WAV and splitting transcriptions...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
train_100 = _convert_audio_and_split_sentences(
work_dir, "train-clean-100", "train-clean-100-wav"
)
bar.update(0)
train_360 = _convert_audio_and_split_sentences(
work_dir, "train-clean-360", "train-clean-360-wav"
)
bar.update(1)
train_500 = _convert_audio_and_split_sentences(
work_dir, "train-other-500", "train-other-500-wav"
)
bar.update(2)
dev_clean = _convert_audio_and_split_sentences(
work_dir, "dev-clean", "dev-clean-wav"
)
bar.update(3)
dev_other = _convert_audio_and_split_sentences(
work_dir, "dev-other", "dev-other-wav"
)
bar.update(4)
test_clean = _convert_audio_and_split_sentences(
work_dir, "test-clean", "test-clean-wav"
)
bar.update(5)
test_other = _convert_audio_and_split_sentences(
work_dir, "test-other", "test-other-wav"
)
bar.update(6)
# Write sets to disk as CSV files
train_100.to_csv(
os.path.join(data_dir, "librivox-train-clean-100.csv"), index=False
)
train_360.to_csv(
os.path.join(data_dir, "librivox-train-clean-360.csv"), index=False
)
train_500.to_csv(
os.path.join(data_dir, "librivox-train-other-500.csv"), index=False
)
dev_clean.to_csv(os.path.join(data_dir, "librivox-dev-clean.csv"), index=False)
dev_other.to_csv(os.path.join(data_dir, "librivox-dev-other.csv"), index=False)
test_clean.to_csv(os.path.join(data_dir, "librivox-test-clean.csv"), index=False)
test_other.to_csv(os.path.join(data_dir, "librivox-test-other.csv"), index=False)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
# We also convert the corresponding FLACs to WAV in the same pass
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, "*.trans.txt"):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space + 1 :]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = (
unicodedata.normalize("NFKD", transcript)
.encode("ascii", "ignore")
.decode("ascii", "ignore")
)
transcript = transcript.lower().strip()
# Convert corresponding FLAC to a WAV
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not os.path.exists(wav_file):
tfm = Transformer()
tfm.set_output_format(rate=SAMPLE_RATE)
tfm.build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
return pandas.DataFrame(
data=files, columns=["wav_filename", "wav_filesize", "transcript"]
)
if __name__ == "__main__":
_download_and_preprocess_data(sys.argv[1])
| |
import asyncio
import time
from typing import Any, Dict, Optional
from . import constants as c
from .exceptions import EventError
from .utils import http_post
def _get_options(data):
"""
Return a dict of parsed message options.
"""
raw_options = data.get('options', {})
options: Dict[str, Any] = {
'order': None,
'order_key': None,
'throttle': None,
'throttle_key': None,
}
if 'order' in raw_options:
try:
options['order'] = float(raw_options['order'])
except (TypeError, ValueError):
pass
else:
options['order_key'] = raw_options.get('order_key')
if 'throttle' in raw_options:
try:
options['throttle'] = float(raw_options['throttle'])
except (TypeError, ValueError):
pass
else:
options['throttle_key'] = raw_options.get('throttle_key')
return options
class Subscription:
"""
A subscription of a session to a service and topic.
"""
def __init__(self, config, session, data):
self.config = config
self.session = session
self.shark = session.shark
self.name = data.get('subscription') or ''
self.service: Optional[str] = None
self.topic: Optional[str] = None
if '.' in self.name:
self.service, self.topic = self.name.split('.', 1)
if self.service in config['SERVICES']:
self.service_config = config['SERVICES'][self.service]
self.extra_fields = self.service_config.get('extra_fields', [])
self.extra_data = {
field: data[field]
for field in self.extra_fields
if field in data
}
self.authorizer_fields = self.service_config.get(
'authorizer_fields', []
)
else:
self.service_config = None
self.extra_data = {}
self.authorizer_fields = []
self.authorizer_data = None
# order key -> numeric order (the default order key is None)
self.order_state = {}
# throttle key -> (last message sent timestamp, latest message, task)
# If this is set, the possible states are:
# (ts, None, None) -- No messages are pending.
# (ts, msg, task) -- The given message is scheduled to be sent by the
# given asyncio task.
# (ts, None, task) -- A message is currently being sent by the given
# asyncio task.
self.throttle_state = {}
self._periodic_authorizer_task = None
def validate(self):
if not self.service or not self.topic:
raise EventError(c.ERR_INVALID_SUBSCRIPTION_FORMAT)
if self.service_config is None:
raise EventError(c.ERR_INVALID_SERVICE)
def prepare_service_data(self):
"""
Return a data dict to be sent to the service handler.
"""
data = {'subscription': self.name}
data.update(self.extra_data)
if self.authorizer_data:
data.update(self.authorizer_data)
data.update(self.session.auth_info)
return data
async def perform_service_request(
self,
service_event,
extra_data=None,
error_message=None,
raise_error=True,
):
if service_event in self.service_config:
url = self.service_config[service_event]
data = self.prepare_service_data()
if extra_data is not None:
data.update(extra_data)
result = await http_post(self.shark, url, data)
if raise_error and result.get('status') != 'ok':
raise EventError(
result.get(
'error', error_message or c.ERR_UNHANDLED_EXCEPTION
),
data=result.get('data'),
)
return result
return {'status': 'ok'}
async def authorize_subscription(self):
data = await self.perform_service_request(
'authorizer', error_message=c.ERR_UNAUTHORIZED
)
authorizer_data = {
field: data[field]
for field in self.authorizer_fields
if field in data
}
# If authorizer fields changed during periodic authorization, invoke
# a special callback.
fields_changed = (
self.authorizer_data is not None
and authorizer_data != self.authorizer_data
)
self.authorizer_data = authorizer_data
if fields_changed:
await self.perform_service_request('on_authorization_change')
async def periodic_authorizer(self):
period = self.service_config['authorization_renewal_period']
self.session.trace_log.debug(
'initializing periodic authorizer',
subscription=self.name,
period=period,
)
while True:
await asyncio.sleep(period)
try:
self.session.log.debug(
'verifying authorization', subscription=self.name
)
await self.authorize_subscription()
self.session.log.debug(
'authorization verified', subscription=self.name
)
except EventError as e:
self.session.log.info(
'authorization expired',
subscription=self.name,
error=e.error,
)
await self.self_unsubscribe(e.error)
async def before_subscribe(self):
return await self.perform_service_request('before_subscribe')
async def on_subscribe(self):
return await self.perform_service_request(
'on_subscribe', raise_error=False
)
async def on_message(self, message_data):
return await self.perform_service_request(
'on_message',
extra_data={
'data': message_data,
},
)
async def before_unsubscribe(self, raise_error=True):
return await self.perform_service_request(
'before_unsubscribe', raise_error=raise_error
)
async def on_unsubscribe(self):
return await self.perform_service_request(
'on_unsubscribe', raise_error=False
)
def _should_deliver_message_filter_fields(self, data):
"""
Return whether to deliver the given message based on filter feilds.
"""
# Check whether the message is filtered by comparing any defined
# filter_fields to auth_info and extra_fields.
filter_fields = self.service_config.get('filter_fields', [])
for field in filter_fields:
if field in data:
if field in self.extra_fields:
if self.extra_data.get(field) != data[field]:
# Message doesn't match extra fields.
return False
elif self.session.auth_info.get(field) != data[field]:
# Message doesn't match auth fields.
return False
return True
def _should_deliver_message_order(self, data, options):
"""
Return whether to deliver the given message based on order.
"""
order = options['order']
if order is None:
return True
# Check whether the message is out-of-order.
key = options['order_key']
last_order = self.order_state.get(key)
if last_order is not None and order <= last_order:
return False # Message out-of-order.
self.order_state[key] = order
return True
def _should_deliver_message_throttle(self, data, options):
"""
Return whether to deliver the given message based on throttling.
"""
throttle = options['throttle']
if throttle is None:
return True
key = options['throttle_key']
last_throttle = self.throttle_state.get(key)
now = time.time()
if last_throttle:
ts_last_msg_sent, pending_msg, task = last_throttle
if task: # We'll update the message and let the task send it.
self.throttle_state[key] = (ts_last_msg_sent, data, task)
return False
elif now - ts_last_msg_sent < throttle:
# Schedule a task to send the message.
self._schedule_throttled_message_task(
key, ts_last_msg_sent, data
)
return False
# Send current message and store time.
self.throttle_state[key] = (now, None, None)
return True
def _schedule_throttled_message_task(self, key, ts_last_msg_sent, data):
options = _get_options(data)
# This should succeed since we parsed it previously
when = ts_last_msg_sent + options['throttle']
task = asyncio.ensure_future(
self._schedule_throttled_message(when, key)
)
self.throttle_state[key] = (ts_last_msg_sent, data, task)
def should_deliver_message(self, data):
"""
Return whether to deliver the given message.
"""
options = _get_options(data)
if not self._should_deliver_message_filter_fields(data):
self.session.trace_log.debug(
'message filtered', data=data, reason='fields'
)
return False
if not self._should_deliver_message_order(data, options):
self.session.trace_log.debug(
'message filtered', data=data, reason='order'
)
return False
if not self._should_deliver_message_throttle(data, options):
self.session.trace_log.debug(
'message filtered', data=data, reason='throttle'
)
return False
return True
async def _schedule_throttled_message(self, when, throttle_key):
delay = when - time.time()
self.session.trace_log.debug(
'throttled message scheduled',
throttle_key=throttle_key,
delay=delay,
)
try:
await asyncio.sleep(delay)
await self._send_throttled_message(throttle_key)
except asyncio.CancelledError: # Cancelled by unsubscribe
self.session.trace_log.debug(
'throttled message canceled', throttle_key=throttle_key
)
except Exception:
self.session.log.exception(
'unhandled exception when sending ' 'throttled message'
)
async def _send_throttled_message(self, throttle_key):
# We've unsubscribed meanwhile.
if self.name not in self.session.subscriptions:
self.session.trace_log.debug(
'throttled message subscription ' 'invalid',
throttle_key=throttle_key,
)
return
last_throttle = self.throttle_state[throttle_key]
ts_last_msg_sent, pending_msg, task = last_throttle
now = time.time()
self.throttle_state[throttle_key] = (now, None, task)
self.session.trace_log.debug(
'sending throttled message', throttle_key=throttle_key
)
await self.session.send_message(self, pending_msg['data'])
ts_last_msg_sent, pending_msg, task = self.throttle_state[throttle_key]
# A throttled message was submitted while we were sending.
# Schedule another task.
if pending_msg:
self.session.trace_log.debug(
'throttled message submitted while ' 'sending',
throttle_key=throttle_key,
)
self._schedule_throttled_message_task(
throttle_key, ts_last_msg_sent, pending_msg
)
else:
self.throttle_state[throttle_key] = (now, None, None)
async def subscribe(self, event):
"""
Subscribe to the subscription.
"""
require_authentication = self.service_config.get(
'require_authentication', True
)
if require_authentication and not self.session.auth_info:
raise EventError(c.ERR_AUTH_REQUIRED)
if self.name in self.session.subscriptions:
raise EventError(c.ERR_ALREADY_SUBSCRIBED)
await self.authorize_subscription()
await self.shark.service_receiver.add_provisional_subscription(
self.session, self.name
)
result = await self.before_subscribe()
self.session.subscriptions[self.name] = self
if self.should_deliver_message(result):
await event.send_ok(result.get('data'))
await self.shark.service_receiver.confirm_subscription(
self.session, self.name
)
if 'authorization_renewal_period' in self.service_config:
self._periodic_authorizer_task = asyncio.ensure_future(
self.periodic_authorizer()
)
await self.on_subscribe()
async def message(self, event):
"""
Send a message to the subscription.
"""
if self.name not in self.session.subscriptions:
raise EventError(c.ERR_SUBSCRIPTION_NOT_FOUND)
message_data = event.data.get('data')
result = await self.on_message(message_data)
if 'data' in result:
if event:
await event.send_ok(result['data'])
async def cleanup_subscription(self):
await self.shark.service_receiver.delete_subscription(
self.session, self.name
)
for throttle in self.throttle_state.values():
ts_last_msg_sent, pending_msg, task = throttle
if task:
task.cancel()
if self._periodic_authorizer_task:
self._periodic_authorizer_task.cancel()
async def unsubscribe(self, event):
"""
Unsubscribe from the subscription.
"""
if self.name not in self.session.subscriptions:
raise EventError(c.ERR_SUBSCRIPTION_NOT_FOUND)
result = await self.before_unsubscribe()
del self.session.subscriptions[self.name]
await self.cleanup_subscription()
await event.send_ok(result.get('data'))
await self.on_unsubscribe()
async def self_unsubscribe(self, error):
"""
Unsubscribe from the subscription (not triggered by the user).
"""
del self.session.subscriptions[self.name]
await self.cleanup_subscription()
result = await self.before_unsubscribe(raise_error=False)
await self.on_unsubscribe()
await self.session.send_unsubscribe(self, result.get('data'), error)
async def force_unsubscribe(self):
"""
Force-unsubscribes from the subscription.
Caller is responsible for deleting the subscription from the session's
subscriptions array. This method is called when a session is
disconnected.
"""
await self.cleanup_subscription()
await self.before_unsubscribe(raise_error=False)
await self.on_unsubscribe()
| |
"""
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, \
FileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, \
DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
PendingDeprecationWarning
)
return getattr(formats, name)
DEFAULT_DATE_INPUT_FORMATS = lazy(lambda: en_format('DATE_INPUT_FORMATS'), tuple, list)()
DEFAULT_TIME_INPUT_FORMATS = lazy(lambda: en_format('TIME_INPUT_FORMATS'), tuple, list)()
DEFAULT_DATETIME_INPUT_FORMATS = lazy(lambda: en_format('DATETIME_INPUT_FORMATS'), tuple, list)()
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def localize_value(self, value):
return formats.localize_input(value)
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
class FileField(Field):
widget = FileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
if not data and initial:
return initial
return super(FileField, self).clean(data)
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
if '://' not in value:
# If no URL scheme given, assume http://
value = u'http://%s' % value
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validate that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
fields = (
DateField(input_formats=input_date_formats, error_messages={'invalid': errors['invalid_date']}),
TimeField(input_formats=input_time_formats, error_messages={'invalid': errors['invalid_time']}),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| |
"""Standalone Authenticator."""
import argparse
import collections
import logging
import socket
import threading
import OpenSSL
import six
import zope.interface
from acme import challenges
from acme import standalone as acme_standalone
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import common
from letsencrypt.plugins import util
logger = logging.getLogger(__name__)
class ServerManager(object):
"""Standalone servers manager.
Manager for `ACMEServer` and `ACMETLSServer` instances.
`certs` and `http_01_resources` correspond to
`acme.crypto_util.SSLSocket.certs` and
`acme.crypto_util.SSLSocket.http_01_resources` respectively. All
created servers share the same certificates and resources, so if
you're running both TLS and non-TLS instances, HTTP01 handlers
will serve the same URLs!
"""
_Instance = collections.namedtuple("_Instance", "server thread")
def __init__(self, certs, http_01_resources):
self._instances = {}
self.certs = certs
self.http_01_resources = http_01_resources
def run(self, port, challenge_type):
"""Run ACME server on specified ``port``.
This method is idempotent, i.e. all calls with the same pair of
``(port, challenge_type)`` will reuse the same server.
:param int port: Port to run the server on.
:param challenge_type: Subclass of `acme.challenges.Challenge`,
either `acme.challenge.HTTP01` or `acme.challenges.TLSSNI01`.
:returns: Server instance.
:rtype: ACMEServerMixin
"""
assert challenge_type in (challenges.TLSSNI01, challenges.HTTP01)
if port in self._instances:
return self._instances[port].server
address = ("", port)
try:
if challenge_type is challenges.TLSSNI01:
server = acme_standalone.TLSSNI01Server(address, self.certs)
else: # challenges.HTTP01
server = acme_standalone.HTTP01Server(
address, self.http_01_resources)
except socket.error as error:
raise errors.StandaloneBindError(error, port)
thread = threading.Thread(
# pylint: disable=no-member
target=server.serve_forever)
thread.start()
# if port == 0, then random free port on OS is taken
# pylint: disable=no-member
real_port = server.socket.getsockname()[1]
self._instances[real_port] = self._Instance(server, thread)
return server
def stop(self, port):
"""Stop ACME server running on the specified ``port``.
:param int port:
"""
instance = self._instances[port]
logger.debug("Stopping server at %s:%d...",
*instance.server.socket.getsockname()[:2])
instance.server.shutdown()
instance.thread.join()
del self._instances[port]
def running(self):
"""Return all running instances.
Once the server is stopped using `stop`, it will not be
returned.
:returns: Mapping from ``port`` to ``server``.
:rtype: tuple
"""
return dict((port, instance.server) for port, instance
in six.iteritems(self._instances))
SUPPORTED_CHALLENGES = [challenges.TLSSNI01, challenges.HTTP01]
def supported_challenges_validator(data):
"""Supported challenges validator for the `argparse`.
It should be passed as `type` argument to `add_argument`.
"""
challs = data.split(",")
unrecognized = [name for name in challs
if name not in challenges.Challenge.TYPES]
if unrecognized:
raise argparse.ArgumentTypeError(
"Unrecognized challenges: {0}".format(", ".join(unrecognized)))
choices = set(chall.typ for chall in SUPPORTED_CHALLENGES)
if not set(challs).issubset(choices):
raise argparse.ArgumentTypeError(
"Plugin does not support the following (valid) "
"challenges: {0}".format(", ".join(set(challs) - choices)))
return data
class Authenticator(common.Plugin):
"""Standalone Authenticator.
This authenticator creates its own ephemeral TCP listener on the
necessary port in order to respond to incoming tls-sni-01 and http-01
challenges from the certificate authority. Therefore, it does not
rely on any existing server program.
"""
zope.interface.implements(interfaces.IAuthenticator)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Automatically use a temporary webserver"
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
# one self-signed key for all tls-sni-01 certificates
self.key = OpenSSL.crypto.PKey()
self.key.generate_key(OpenSSL.crypto.TYPE_RSA, bits=2048)
self.served = collections.defaultdict(set)
# Stuff below is shared across threads (i.e. servers read
# values, main thread writes). Due to the nature of CPython's
# GIL, the operations are safe, c.f.
# https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.certs = {}
self.http_01_resources = set()
self.servers = ServerManager(self.certs, self.http_01_resources)
@classmethod
def add_parser_arguments(cls, add):
add("supported-challenges",
help="Supported challenges. Preferred in the order they are listed.",
type=supported_challenges_validator,
default=",".join(chall.typ for chall in SUPPORTED_CHALLENGES))
@property
def supported_challenges(self):
"""Challenges supported by this plugin."""
return [challenges.Challenge.TYPES[name] for name in
self.conf("supported-challenges").split(",")]
@property
def _necessary_ports(self):
necessary_ports = set()
if challenges.HTTP01 in self.supported_challenges:
necessary_ports.add(self.config.http01_port)
if challenges.TLSSNI01 in self.supported_challenges:
necessary_ports.add(self.config.tls_sni_01_port)
return necessary_ports
def more_info(self): # pylint: disable=missing-docstring
return("This authenticator creates its own ephemeral TCP listener "
"on the necessary port in order to respond to incoming "
"tls-sni-01 and http-01 challenges from the certificate "
"authority. Therefore, it does not rely on any existing "
"server program.")
def prepare(self): # pylint: disable=missing-docstring
pass
def get_chall_pref(self, domain):
# pylint: disable=unused-argument,missing-docstring
return self.supported_challenges
def perform(self, achalls): # pylint: disable=missing-docstring
if any(util.already_listening(port) for port in self._necessary_ports):
raise errors.MisconfigurationError(
"At least one of the (possibly) required ports is "
"already taken.")
try:
return self.perform2(achalls)
except errors.StandaloneBindError as error:
display = zope.component.getUtility(interfaces.IDisplay)
if error.socket_error.errno == socket.errno.EACCES:
display.notification(
"Could not bind TCP port {0} because you don't have "
"the appropriate permissions (for example, you "
"aren't running this program as "
"root).".format(error.port))
elif error.socket_error.errno == socket.errno.EADDRINUSE:
display.notification(
"Could not bind TCP port {0} because it is already in "
"use by another process on this system (such as a web "
"server). Please stop the program in question and then "
"try again.".format(error.port))
else:
raise # XXX: How to handle unknown errors in binding?
def perform2(self, achalls):
"""Perform achallenges without IDisplay interaction."""
responses = []
for achall in achalls:
if isinstance(achall.chall, challenges.HTTP01):
server = self.servers.run(
self.config.http01_port, challenges.HTTP01)
response, validation = achall.response_and_validation()
self.http_01_resources.add(
acme_standalone.HTTP01RequestHandler.HTTP01Resource(
chall=achall.chall, response=response,
validation=validation))
else: # tls-sni-01
server = self.servers.run(
self.config.tls_sni_01_port, challenges.TLSSNI01)
response, (cert, _) = achall.response_and_validation(
cert_key=self.key)
self.certs[response.z_domain] = (self.key, cert)
self.served[server].add(achall)
responses.append(response)
return responses
def cleanup(self, achalls): # pylint: disable=missing-docstring
# reduce self.served and close servers if none challenges are served
for server, server_achalls in self.served.items():
for achall in achalls:
if achall in server_achalls:
server_achalls.remove(achall)
for port, server in six.iteritems(self.servers.running()):
if not self.served[server]:
self.servers.stop(port)
| |
"""User related views."""
from django.contrib.auth.decorators import login_required
from django.views.generic import View, ListView, DetailView, CreateView, RedirectView
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect, JsonResponse
from core.forms import RegisterUserForm, EditUserForm
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from core.models import Group, User, Game, GroupInvitation
from django.shortcuts import get_object_or_404
from . import LoginRequiredMixin
def register(request):
"""Register a user account."""
if request.method == 'POST':
form = RegisterUserForm(request.POST)
if form.is_valid():
form.save()
username = request.POST['email']
password = request.POST['password1']
user = authenticate(username=username,
password=password)
login(request, user)
return HttpResponseRedirect(reverse('core:home'))
else:
form = RegisterUserForm()
return render(request, "registration/register.html", {
'form': form,
})
@login_required
def edit(request):
"""Edit a user account."""
if request.method == 'POST':
form = EditUserForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('core:profile:base'))
else:
form = EditUserForm(instance=request.user)
return render(request, "user/edit_profile.html", {
'form': form,
})
class ProfileRedirectView(LoginRequiredMixin, RedirectView):
"""Redirect to the user profile page."""
permanent = False
pattern_name = 'core:profile:user-profile'
def get_redirect_url(self, *args, **kwargs):
"""Redirect to the user's page."""
kwargs['pk'] = self.request.user.pk
return super(ProfileRedirectView, self).get_redirect_url(*args, **kwargs)
class ProfileView(DetailView):
"""Display the user profile page."""
template_name = "user/profile.html"
model = User
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data(**kwargs)
user = User.objects.get(id=self.kwargs.get('pk'))
groups = Group.objects.filter(members=user)
context['games_list'] = Game.objects.filter(group__in=groups)
context['show_edit'] = user.pk is self.request.user.pk
if self.request.user and self.request.user != user and hasattr(self.request.user, 'group_set'):
invite_groups = set(self.request.user.group_set.all())
invite_groups -= set(groups)
pending_invites = set(i.group for i in GroupInvitation.objects.filter(user=user))
invite_groups -= pending_invites
context['invite_groups'] = invite_groups
return context
class UserGroupsView(LoginRequiredMixin, ListView):
"""Display a list of user's groups."""
template_name = "user/groups.html"
def get_queryset(self):
"""Get all of the user's groups."""
return self.request.user.group_set.all()
def get_context_data(self, **kwargs):
"""Set the page title."""
context = super(UserGroupsView, self).get_context_data(**kwargs)
context["page_title"] = "My Groups"
return context
class GroupsView(ListView):
"""Display a list of ALL groups."""
template_name = "user/groups.html"
def get_queryset(self):
"""Get ALL groups."""
return Group.objects.all()
def get_context_data(self, **kwargs):
"""Set the page title."""
context = super(GroupsView, self).get_context_data(**kwargs)
context["page_title"] = "Groups"
return context
class GroupDetailView(DetailView):
"""Display a single group."""
template_name = "user/group.html"
model = Group
def get_object(self, **kwargs):
"""Get the specific group.
:param **kwargs:
"""
pk = self.kwargs.get('pk')
# TODO: 404 when object not found
return Group.objects.get(id=pk)
def get_context_data(self, **kwargs):
"""Set the page title."""
context = super(GroupDetailView, self).get_context_data(**kwargs)
context["in_group"] = self.request.user in self.object.members.all()
context["request_sent"] = GroupInvitation.objects.filter(user=self.request.user, group=self.object).exists()
context["games"] = self.object.game_set.all()
return context
class GroupInvitationView(LoginRequiredMixin, DetailView):
"""View a group invitation."""
model = GroupInvitation
template_name = "user/invitation.html"
def get_object(self, **kwargs):
"""Get the specific group.
"""
pk = self.kwargs.get('pk')
invitation = get_object_or_404(GroupInvitation, id=pk)
return invitation
# We need this to override django's defaults for posting.
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(GroupInvitationView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
"""Set the page title."""
context = super(GroupInvitationView, self).get_context_data(**kwargs)
invite = self.object
group = invite.group
user = invite.user
inviting = invite.inviting
# Simplify our checks here
in_group = self.request.user in group.members.all()
is_user = self.request.user == user
# not inviting === this is a request to join the group
viewable = (inviting and is_user) or (not inviting and in_group)
context["viewable"] = viewable
context["in_group"] = in_group
context["is_user"] = is_user
context["invite"] = invite
return context
def post(self, request, *args, **kwargs):
# Don't do anything about invalid posts
invite = self.get_object()
group = invite.group
if invite.valid_user(request.user):
accept = request.POST.get('accept')
# Convert from a string here
accept = {'True': True, 'False': False}.get(accept)
if accept:
invite.accept()
elif accept is False:
invite.decline()
return JsonResponse({'url': reverse('core:groups-detail',
kwargs={'pk': group.id})})
class GroupJoinView(LoginRequiredMixin, View):
"""Allow a user to join a given group."""
model = Group
# We need this to override django's defaults for posting.
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(GroupJoinView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
"""Add the logged in user to this group."""
pk = self.kwargs.get('pk')
group = Group.objects.get(id=pk)
user = self.request.user
GroupInvitation.create(user=user, group=group, inviting=False)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def post(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
group = Group.objects.get(id=pk)
user_pk = self.request.POST.get('user')
if user_pk:
user = User.objects.get(id=user_pk)
GroupInvitation.create(user=user, group=group, inviting=True)
return JsonResponse({'success': True})
else:
return JsonResponse({'success': False})
class GroupLeaveView(LoginRequiredMixin, View):
"""Allow a user to leave a given group."""
model = Group
def get(self, request, *args, **kwargs):
"""Remove the logged in user to this group."""
pk = self.kwargs.get('pk')
group = Group.objects.get(id=pk)
group.members.remove(self.request.user)
group.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
class GroupCreateView(LoginRequiredMixin, CreateView):
"""Create a group."""
model = Group
fields = ['name']
template_name = "user/new_group.html"
def form_valid(self, form):
response = super(GroupCreateView, self).form_valid(form)
self.object.members.add(self.request.user)
self.object.save()
return response
| |
"""Utilities for building response surface approximations."""
import numpy as np
import logging
from scipy.optimize import fminbound
from scipy.misc import comb
from misc import process_inputs, process_inputs_outputs
import pdb
class ResponseSurface():
"""
An abstract class for response surfaces.
:cvar int N: Maximum degree of global polynomial in the response surface.
:cvar float Rsqr: The R-squared coefficient for the response surface.
:cvar ndarray X: An ndarray of training points for the response surface. The
shape is M-by-m, where m is the number of dimensions.
:cvar ndarray f: An ndarray of function values used to train the response
surface. The shape of `f` is M-by-1.
**See Also**
utils.response_surfaces.PolynomialApproximation
utils.response_surfaces.RadialBasisApproximation
"""
N = None
Rsqr = None
X, f = None, None
def __init__(self, N=2):
self.N = N
def train(self, X, f):
raise NotImplementedError()
def predict(self, X, compgrad=False):
raise NotImplementedError()
def gradient(self, X):
return self.predict(X, compgrad=True)[1]
def __call__(self, X):
return self.predict(X)[0]
class PolynomialApproximation(ResponseSurface):
"""
A class for least-squares-fit, global, multivariate polynomial
approximation.
:cvar ndarray poly_weights: An ndarray of coefficients for the polynomial
approximation in the monomial basis.
:cvar ndarray g: Contains the m coefficients corresponding to the degree 1
monomials in the polynomial approximation
:cvar ndarray H: An ndarray of shape m-by-m that contains the coefficients
of the degree 2 monomials in the approximation.
**See Also**
utils.response_surfaces.RadialBasisApproximation
**Notes**
All attributes besides the degree `N` are set when the class's `train`
method is called.
"""
poly_weights = None
g, H = None, None
def train(self, X, f):
"""
Train the least-squares-fit polynomial approximation.
:param ndarray X: An ndarray of training points for the polynomial
approximation. The shape is M-by-m, where m is the number of
dimensions.
:param ndarray f: An ndarray of function values used to train the
polynomial approximation. The shape of `f` is M-by-1.
**Notes**
This method sets all the attributes of the class for use in the
`predict` method.
"""
X, f, M, m = process_inputs_outputs(X, f)
# check that there are enough points to train the polynomial
if M < comb(self.N + m, m):
raise Exception('Not enough points to fit response surface of order {:d}'.format(self.N))
logging.getLogger(__name__).debug('Training a degree {:d} polynomial in {:d} dims with {:d} points.'.format(self.N, m, M))
B, indices = polynomial_bases(X, self.N)
p = B.shape[1]
Q, R = np.linalg.qr(B)
Qf = np.dot(Q.T, f)
poly_weights = np.linalg.solve(R, Qf)
Rsqr = 1.0 - ( np.linalg.norm(np.dot(R, poly_weights) - Qf)**2 / np.var(f) )
# store data
self.X, self.f = X, f
self.poly_weights = poly_weights.reshape((p,1))
self.Rsqr = Rsqr
# organize linear and quadratic coefficients
self.g = poly_weights[1:m+1].copy().reshape((m,1))
if self.N > 1:
H = np.zeros((m, m))
for i in range(m+1, indices.shape[0]):
ind = indices[i,:]
loc = np.nonzero(ind!=0)[0]
if loc.size==1:
H[loc,loc] = 2.0*poly_weights[i]
elif loc.size==2:
H[loc[0],loc[1]] = poly_weights[i]
H[loc[1],loc[0]] = poly_weights[i]
else:
raise Exception('Error creating quadratic coefficients.')
self.H = H
def predict(self, X, compgrad=False):
"""
Evaluate the least-squares-fit polynomial approximation at new points.
:param ndarray X: An ndarray of points to evaluate the polynomial
approximation. The shape is M-by-m, where m is the number of
dimensions.
:param bool compgrad: A flag to decide whether or not to compute the
gradient of the polynomial approximation at the points `X`.
:return: f, An ndarray of predictions from the polynomial approximation.
The shape of `f` is M-by-1.
:rtype: ndarray
:return: df, An ndarray of gradient predictions from the polynomial
approximation. The shape of `df` is M-by-m.
:rtype: ndarray
"""
X, M, m = process_inputs(X)
B = polynomial_bases(X, self.N)[0]
f = np.dot(B, self.poly_weights).reshape((M, 1))
if compgrad:
dB = grad_polynomial_bases(X, self.N)
df = np.zeros((M, m))
for i in range(m):
df[:,i] = np.dot(dB[:,:,i], self.poly_weights).reshape((M))
df = df.reshape((M, m))
else:
df = None
return f, df
class RadialBasisApproximation(ResponseSurface):
"""
A class for global, multivariate radial basis approximation with anisotropic
squared-exponential radial basis and a weighted-least-squares-fit monomial
basis.
:cvar ndarray radial_weights: An ndarray of coefficients radial basis
functions in the model.
:cvar ndarray poly_weights: An ndarray of coefficients for the polynomial
approximation in the monomial basis.
:cvar ndarray K: An ndarray of shape M-by-M that contains the matrix of
radial basis functions evaluated at the training points.
:cvar ndarray ell: An ndarray of shape m-by-1 that contains the
characteristic length scales along each of the inputs.
**See Also**
utils.response_surfaces.PolynomialApproximation
**Notes**
All attributes besides the degree `N` are set when the class's `train`
method is called.
"""
K, ell = None, None
radial_weights, poly_weights = None, None
def train(self, X, f, v=None, e=None):
"""
Train the radial basis approximation.
:param ndarray X: An ndarray of training points for the polynomial
approximation. The shape is M-by-m, where m is the number of
dimensions.
:parma ndarray f: An ndarray of function values used to train the
polynomial approximation. The shape of `f` is M-by-1.
:param ndarray v: Contains the regularization parameters that model
error in the function values.
:param ndarray e: An ndarray containing the eigenvalues from the active
subspace analysis. If present, the radial basis uses it to
determine the appropriate anisotropy in the length scales.
**Notes**
The approximation uses an multivariate, squared exponential radial
basis. If `e` is not None, then the radial basis is anisotropic with
length scales determined by `e`. Otherwise, the basis is isotropic.
The length scale parameters (i.e., the rbf shape parameters) are
determined with a maximum likelihood heuristic inspired by
techniques for fitting a Gaussian process model.
The approximation also includes a monomial basis with monomials of
total degree up to order `N`. These are fit with weighted least-squares,
where the weight matrix is the inverse of the matrix of radial basis
functions evaluated at the training points.
This method sets all the attributes of the class for use in the
`predict` method.
"""
X, f, M, m = process_inputs_outputs(X, f)
# check that there are enough points to train the polynomial
if M < comb(self.N + m, m):
raise Exception('Not enough points to fit response surface of order {:d}'.format(self.N))
logging.getLogger(__name__).debug('Training an RBF surface with degree {:d} polynomial in {:d} dims with {:d} points.'.format(self.N, m, M))
# use maximum likelihood to tune parameters
log10g = fminbound(rbf_objective, -10.0, 1.0, args=(X, f, v, self.N, e, ))
g = 10**(log10g)
if e is None:
ell = g*np.ones((m,1))
if v is None:
v = 1e-6*np.ones(f.shape)
else:
ell = g*np.sum(e)/e[:m]
if v is None:
v = g*np.sum(e[m:])*np.ones(f.shape)
# covariance matrix of observations
K = exponential_squared(X, X, 1.0, ell)
K += np.diag(v.reshape((M,)))
B = polynomial_bases(X, self.N)[0]
p = B.shape[1]
C = np.hstack(( np.vstack(( K, B.T )), np.vstack(( B, np.zeros((p, p)) )) ))
weights = np.linalg.solve(C, np.vstack(( f, np.zeros((p, 1)) )) )
radial_weights, poly_weights = weights[:M], weights[M:]
res = f - np.dot(B, poly_weights)
Rsqr = 1.0 - (np.dot( res.T, np.linalg.solve(K, res)) / np.dot( f.T, np.linalg.solve(K, f) ))
# store parameters
self.X, self.f = X, f
self.ell, self.K = ell, K
self.Rsqr = Rsqr[0,0]
self.radial_weights, self.poly_weights = radial_weights, poly_weights
def predict(self, X, compgrad=False):
"""
Evaluate the radial basis approximation at new points.
:param ndarray X: An ndarray of points to evaluate the polynomial
approximation. The shape is M-by-m, where m is the number of
dimensions.
:param bool compgrad: A flag to decide whether or not to compute the
gradient of the polynomial approximation at the points `X`.
:return: f, An ndarray of predictions from the polynomial approximation.
The shape of `f` is M-by-1.
:rtype: ndarray
:return: df, An ndarray of gradient predictions from the polynomial
approximation. The shape of `df` is M-by-m.
:rtype: ndarray
**Notes**
I'll tell you what. I just refactored this code to use terminology from
radial basis functions instead of Gaussian processes, and I feel so
much better about it. Now I don't have to compute that silly
prediction variance and try to pretend that it has anything to do with
the actual error in the approximation. Also, computing that variance
requires another system solve, which might be expensive. So it's both
expensive and of dubious value. So I got rid of it. Sorry, Gaussian
process people.
"""
X, M, m = process_inputs(X)
#
K = exponential_squared(X, self.X, 1.0, self.ell)
B = polynomial_bases(X, self.N)[0]
f = np.dot(K, self.radial_weights) + np.dot(B, self.poly_weights)
f = f.reshape((M, 1))
if compgrad:
dK = grad_exponential_squared(self.X, X, 1.0, self.ell)
dB = grad_polynomial_bases(X, self.N)
df = np.zeros((M, m))
for i in range(m):
df[:,i] = (np.dot(dK[:,:,i].T, self.radial_weights) + \
np.dot(dB[:,:,i], self.poly_weights)).reshape((M, ))
df = df.reshape((M, m))
else:
df = None
return f, df
def rbf_objective(log10g, X, f, v, N, e):
"""
Objective function for the maximum likelihood heuristic for choosing the
rbf shape parameters.
:param float log10g: The log of the scaling factor for the rbf shape
parameters.
:param ndarray X: The ndarray of training points.
:param ndarray f: The ndarray of training data.
:param ndarray v: Contains the regularization parameters for the training
data.
:param int N: The order of polynomial approximation.
:param ndarray e: Contains the eigenvalues from the active subspace
analysis.
:return: r, Objective function value. If you were training a Gaussian
process, it would be the negative log likelihood. In this context, it's
just a heuristic.
:rtype: float
"""
# TODO: I can probably make this implementation more efficient, but as of
# now, I don't need to.
g = 10**(log10g)
M, m = X.shape
if e is None:
ell = g*np.ones((m,1))
if v is None:
v = 1e-6*np.ones(f.shape)
else:
ell = g*np.sum(e)/e[:m]
if v is None:
v = g*np.sum(e[m:])*np.ones(f.shape)
# covariance matrix
K = exponential_squared(X, X, 1.0, ell)
K += np.diag(v.reshape((M,)))
L = np.linalg.cholesky(K)
# polynomial basis
B = polynomial_bases(X, N)[0]
A = np.dot(B.T, np.linalg.solve(K, B))
z = np.dot(B.T, np.linalg.solve(K, f))
beta = np.linalg.solve(A, z)
# residual
res = f - np.dot(B, beta)
# variance
sig2 = np.dot(res.T, np.linalg.solve(K, res))/M
r = np.sum(np.log(np.diag(L))) + M*np.log(sig2)
return r
def exponential_squared(X1, X2, sigma, ell):
"""
Compute the matrix of radial basis functions.
:param ndarray X1: Contains the centers of the radial functions.
:param ndarray X2: The evaluation points of the radial functions.
:param float sigma: Scales the radial functions.
:param ndarray ell: Contains the length scales of each dimension.
:return: C, The matrix of radial functions centered at `X1` and evaluated
at `X2`. The shape of `C` is `X1.shape[0]`-by-`X2.shape[0]`.
:rtype: ndarray
"""
m = X1.shape[0]
n = X2.shape[0]
c = -1.0 / ell.flatten()
C = np.zeros((m, n))
for i in range(n):
x2 = X2[i,:]
B = X1 - x2
C[:,i] = sigma*np.exp(np.dot(B*B, c))
return C
def grad_exponential_squared(X1, X2, sigma, ell):
"""
Compute the matrices of radial basis function gradients.
:param ndarray X1:
`X1` contains the centers of the radial functions.
:param ndarray X2:
`X2` is the evaluation points of the radial functions.
:param float sigma:
`sigma` scales the radial functions.
:param ndarray ell:
`ell` contains the length scales of each dimension.
:return: dC, The matrix of radial function gradients centered at `X1` and
evaluated at `X2`. The shape of `dC` is `X1.shape[0]`-by-`X2.shape[0]`-
by-m. `dC` is a three-dimensional ndarray. The third dimension indexes
the partial derivatives in each gradient.
:rtype: ndarray
"""
m, d = X1.shape
n = X2.shape[0]
c = -1.0 / ell.flatten()
C = np.zeros((m, n, d))
for k in range(d):
for i in range(n):
x2 = X2[i,:]
B = X1 - x2
C[:,i,k] = sigma*(-2.0*c[k]*B[:,k])*np.exp(np.dot(B*B, c))
return C
def polynomial_bases(X, N):
"""
Compute the monomial bases.
:param ndarray X: Contains the points to evaluate the monomials.
:param int N: The maximum degree of the monomial basis.
:return: B, Contains the monomial evaluations.
:rtype: ndarray
:return: I, Contains the multi-indices that tell the degree of each
univariate monomial term in the multivariate monomial.
:rtype: ndarray
"""
M, m = X.shape
I = index_set(N, m)
n = I.shape[0]
B = np.zeros((M, n))
for i in range(n):
ind = I[i,:]
B[:,i] = np.prod(np.power(X, ind), axis=1)
return B, I
def grad_polynomial_bases(X, N):
"""
Compute the gradients of the monomial bases.
:param ndarray X: Contains the points to evaluate the monomials.
:param int N: The maximum degree of the monomial basis.
:return: dB, Contains the gradients of the monomials evaluate at `X`. `dB`
is a three-dimensional ndarray. The third dimension indexes the partial
derivatives in each gradient.
:rtype: ndarray
"""
M, m = X.shape
I = index_set(N, m)
n = I.shape[0]
B = np.zeros((M, n, m))
for k in range(m):
for i in range(n):
ind = I[i,:].copy()
indk = ind[k]
if indk==0:
B[:,i,k] = np.zeros(M)
else:
ind[k] -= 1
B[:,i,k] = indk*np.prod(np.power(X, ind), axis=1)
return B
def _full_index_set(n, d):
"""
A helper function for index_set.
"""
if d == 1:
I = np.array([[n]])
else:
II = _full_index_set(n, d-1)
m = II.shape[0]
I = np.hstack((np.zeros((m, 1)), II))
for i in range(1, n+1):
II = _full_index_set(n-i, d-1)
m = II.shape[0]
T = np.hstack((i*np.ones((m, 1)), II))
I = np.vstack((I, T))
return I
def index_set(n, d):
"""
Enumerate multi-indices for a total degree of order `n` in `d` variables.
:param int n:
:param int d:
:return: I
:rtype: ndarray
"""
I = np.zeros((1, d))
for i in range(1, n+1):
II = _full_index_set(i, d)
I = np.vstack((I, II))
return I[:,::-1]
| |
import os
import csv
import requests
from datetime import datetime
import simplejson as json
import platform
import base64
import ohmysportsfeedspy
from ohmysportsfeedspy.v1_0 import API_v1_0
# API class for dealing with v2.1 of the API
class API_v2_1(API_v1_0):
# Constructor
def __init__(self, verbose, store_type=None, store_location=None):
super().__init__(verbose, store_type, store_location)
self.base_url = "https://api.mysportsfeeds.com/v2.1/pull"
self.valid_feeds = [
'seasonal_games',
'daily_games',
'weekly_games',
'seasonal_dfs',
'daily_dfs',
'weekly_dfs',
'seasonal_player_gamelogs',
'daily_player_gamelogs',
'weekly_player_gamelogs',
'seasonal_player_stats_projections',
'daily_player_gamelogs_projections',
'weekly_player_gamelogs_projections',
'seasonal_team_gamelogs',
'daily_team_gamelogs',
'weekly_team_gamelogs',
'game_boxscore',
'game_playbyplay',
'game_lineup',
'current_season',
'player_injuries',
'latest_updates',
'seasonal_team_stats',
'seasonal_player_stats',
'seasonal_venues',
'players',
'seasonal_standings',
'seasonal_game_lines',
'daily_game_lines',
'daily_futures'
]
# Feed URL
def determine_url(self, league, season, feed, output_format, params):
if feed == "seasonal_games":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/games.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_games":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "date" in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/games.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format)
elif feed == "weekly_games":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "week" in params:
raise AssertionError("You must specify a 'week' param for this request.")
return "{base_url}/{league}/{season}/week/{week}/games.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format)
elif feed == "seasonal_dfs":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_dfs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "date" in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format)
elif feed == "weekly_dfs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "week" in params:
raise AssertionError("You must specify a 'week' param for this request.")
return "{base_url}/{league}/{season}/week/{week}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format)
elif feed == "seasonal_player_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_player_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "date" in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format)
elif feed == "weekly_player_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "week" in params:
raise AssertionError("You must specify a 'week' param for this request.")
return "{base_url}/{league}/{season}/week/{week}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format)
elif feed == "seasonal_player_stats_projections":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/player_stats_totals_projections.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_player_gamelogs_projections":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "date" in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/player_gamelogs_projections.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format)
elif feed == "weekly_player_gamelogs_projections":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "week" in params:
raise AssertionError("You must specify a 'week' param for this request.")
return "{base_url}/{league}/{season}/week/{week}/player_gamelogs_projections.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format)
elif feed == "seasonal_team_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_team_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "date" in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format)
elif feed == "weekly_team_gamelogs":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "week" in params:
raise AssertionError("You must specify a 'week' param for this request.")
return "{base_url}/{league}/{season}/week/{week}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format)
elif feed == "game_boxscore":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "game" in params:
raise AssertionError("You must specify a 'game' param for this request.")
return "{base_url}/{league}/{season}/games/{game}/boxscore.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format)
elif feed == "game_playbyplay":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "game" in params:
raise AssertionError("You must specify a 'game' param for this request.")
return "{base_url}/{league}/{season}/games/{game}/playbyplay.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format)
elif feed == "game_lineup":
if season == "":
raise AssertionError("You must specify a season for this request.")
if not "game" in params:
raise AssertionError("You must specify a 'game' param for this request.")
return "{base_url}/{league}/{season}/games/{game}/lineup.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format)
elif feed == "current_season":
return "{base_url}/{league}/current_season.{output}".format(base_url=self.base_url, league=league, output=output_format)
elif feed == "player_injuries":
return "{base_url}/{league}/injuries.{output}".format(base_url=self.base_url, league=league, output=output_format)
elif feed == "latest_updates":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/latest_updates.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "seasonal_team_stats":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/team_stats_totals.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "seasonal_player_stats":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/player_stats_totals.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "seasonal_venues":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/venues.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "players":
return "{base_url}/{league}/players.{output}".format(base_url=self.base_url, league=league, output=output_format)
elif feed == "seasonal_standings":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/standings.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "seasonal_game_lines":
if season == "":
raise AssertionError("You must specify a season for this request.")
return "{base_url}/{league}/{season}/odds_gamelines.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format)
elif feed == "daily_game_lines":
if season == "":
raise AssertionError("You must specify a season for this request.")
if "date" not in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/odds_gamelines.{output}".format(base_url=self.base_url, league=league,
season=season, date=params["date"], output=output_format)
elif feed == "daily_futures":
if season == "":
raise AssertionError("You must specify a season for this request.")
if "date" not in params:
raise AssertionError("You must specify a 'date' param for this request.")
return "{base_url}/{league}/{season}/date/{date}/odds_futures.{output}".format(base_url=self.base_url, league=league,
season=season, date=params["date"], output=output_format)
else:
return ""
| |
#!/usr/bin/env python
import sys
class DynamicMulticastFullTrialRecord(object):
def __init__(self, packet_loss, avg_num_flows, avg_max_link_mbps, avg_avg_link_mbps, avg_traffic_conc, avg_link_mbps_std_dev, avg_flow_tracker_response_time,
avg_flow_tracker_network_time, avg_flow_tracker_processing_time, avg_switch_load_mbps, avg_num_active_receivers):
self.packet_loss = packet_loss
self.avg_num_flows = avg_num_flows
self.avg_max_link_mbps = avg_max_link_mbps
self.avg_avg_link_mbps = avg_avg_link_mbps
self.avg_traffic_conc = avg_traffic_conc
self.avg_link_mbps_std_dev = avg_link_mbps_std_dev
self.avg_flow_tracker_response_time = avg_flow_tracker_response_time
self.avg_flow_tracker_network_time = avg_flow_tracker_network_time
self.avg_flow_tracker_processing_time = avg_flow_tracker_processing_time
self.avg_switch_load_mbps = avg_switch_load_mbps
self.avg_num_active_receivers = avg_num_active_receivers
class DynamicMulticastTimeSnapshotRecord(object):
def __init__(self, time_index, sim_time, num_flows, max_link_mbps, avg_link_mbps, traffic_conc, link_mbps_std_dev, flow_tracker_response_time,
flow_tracker_network_time, flow_tracker_processing_time, switch_load_mbps, num_active_receivers):
self.time_index = time_index
self.sim_time = sim_time
self.num_flows = num_flows
self.max_link_mbps = max_link_mbps
self.avg_link_mbps = avg_link_mbps
self.traffic_conc = traffic_conc
self.link_mbps_std_dev = link_mbps_std_dev
self.flow_tracker_response_time = flow_tracker_response_time
self.flow_tracker_network_time = flow_tracker_network_time
self.flow_tracker_processing_time = flow_tracker_processing_time
self.switch_load_mbps = switch_load_mbps
self.num_active_receivers = num_active_receivers
def mean_confidence_interval(data, confidence=0.95):
import scipy.stats
from numpy import mean, array, sqrt
a = 1.0 * array(data)
n = len(a)
m, se = mean(a), scipy.stats.sem(a)
# calls the inverse CDF of the Student's t distribution
h = se * scipy.stats.t._ppf((1+confidence)/2., n-1)
return m-h, m+h
def read_dynamic_log_set(filepath_prefix, num_logs):
snapshot_records = [] # list of lists -> snapshot_records[time_index] = list of DynamicMulticastTimeSnapshotRecord
full_trial_records = [] # List of DynamicMulticastFullTrialRecord
for log_index in range(0, num_logs):
filepath = filepath_prefix + str(log_index) + '.log'
log_file = open(filepath, 'r')
snapshots_cur_log = []
packet_loss = 0
valid_log = True
for line in log_file:
if 'AvgPacketLoss:' in line:
split_line = line.strip().split(' ')
packet_loss = float(split_line[2][len('AvgPacketLoss:'):])
if packet_loss > 20:
# This occurs when the controller's LLDP polling incorrectly detects a link as down
# due to congestion on the link. When this occurs, all multicast routes are recalculated,
# and sometimes the particular link that was detected as down bisects the network, preventing
# multicast delivery and resulting in high packet loss.
print 'WARNING: DISCARDING OUTLIER WITH EXCESSIVE PACKET LOSS'
valid_log = False
break
if 'TimeIndex:' in line:
split_line = line.strip().split(' ')
time_index = int(split_line[0][len('TimeIndex:'):])
sim_time = float(split_line[1][len('SimTime:'):])
num_flows = int(split_line[2][len('TotalNumFlows:'):])
max_link_mbps = float(split_line[3][len('MaxLinkUsageMbps:'):])
avg_link_mbps = float(split_line[4][len('AvgLinkUsageMbps:'):])
traffic_conc = float(split_line[5][len('TrafficConcentration:'):])
link_mbps_std_dev = float(split_line[6][len('LinkUsageStdDev:'):])
flow_tracker_response_time = float(split_line[7][len('ResponseTime:'):])
flow_tracker_network_time = float(split_line[8][len('NetworkTime:'):])
flow_tracker_processing_time = float(split_line[9][len('ProcessingTime:'):])
switch_load_mbps = float(split_line[10][len('SwitchAvgLoadMbps:'):])
num_active_receivers = int(split_line[11][len('NumActiveReceivers:'):])
snapshot_record = DynamicMulticastTimeSnapshotRecord(time_index, sim_time, num_flows, max_link_mbps, avg_link_mbps, traffic_conc,
link_mbps_std_dev, flow_tracker_response_time, flow_tracker_network_time, flow_tracker_processing_time, switch_load_mbps,
num_active_receivers)
if time_index < len(snapshot_records):
snapshot_records[time_index].append(snapshot_record)
else:
snapshot_records.append([snapshot_record])
snapshots_cur_log.append(snapshot_record)
if valid_log:
avg_num_flows = float(sum(snapshot_record.num_flows for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_max_link_mbps = float(sum(snapshot_record.max_link_mbps for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_avg_link_mbps = float(sum(snapshot_record.avg_link_mbps for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_traffic_conc = float(sum(snapshot_record.traffic_conc for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_link_mbps_std_dev = float(sum(snapshot_record.link_mbps_std_dev for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_flow_tracker_response_time = float(sum(snapshot_record.flow_tracker_response_time for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_flow_tracker_network_time = float(sum(snapshot_record.flow_tracker_network_time for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_flow_tracker_processing_time = float(sum(snapshot_record.flow_tracker_processing_time for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_switch_load_mbps = float(sum(snapshot_record.switch_load_mbps for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
avg_num_active_receivers = float(sum(snapshot_record.num_active_receivers for snapshot_record in snapshots_cur_log)) / len(snapshots_cur_log)
full_trial_record = DynamicMulticastFullTrialRecord(packet_loss, avg_num_flows, avg_max_link_mbps, avg_avg_link_mbps, avg_traffic_conc, avg_link_mbps_std_dev,
avg_flow_tracker_response_time, avg_flow_tracker_network_time, avg_flow_tracker_processing_time, avg_switch_load_mbps, avg_num_active_receivers)
full_trial_records.append(full_trial_record)
log_file.close()
print 'Processed log: ' + str(filepath)
return snapshot_records, full_trial_records
def print_dynamic_trial_statistics(snapshot_records, full_trial_records, output_prefix):
traffic_conc_avgs = []
traffic_conc_cis = []
link_std_dev_avgs = []
link_std_dev_cis = []
link_avg_mbps_avgs = []
link_avg_mbps_cis = []
link_max_mbps_avgs = []
link_max_mbps_cis = []
switch_load_mbps_avgs = []
switch_load_mbps_cis = []
num_flows_avgs = []
num_flows_cis = []
response_time_avgs = []
response_time_cis = []
network_time_avgs = []
network_time_cis = []
processing_time_avgs = []
processing_time_cis = []
active_receivers_avgs = []
active_receivers_cis = []
print ' '
for time_index in range(0, len(snapshot_records)):
if len(snapshot_records[time_index]) < 2:
# Ignore entries that do not produce valid confidence intervals
continue
print 'TimeIndex #' + str(time_index) + ' stats:'
print '# Trials:\t\t' + str(len(snapshot_records[time_index]))
# num_receivers_list = [float(r.num_receivers) for r in snapshot_records[time_index]]
# avg = sum(num_receivers_list) / len(num_receivers_list)
# ci_upper, ci_lower = mean_confidence_interval(num_receivers_list)
# print 'NumReceivers:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
num_flows_list = [float(r.num_flows) for r in snapshot_records[time_index]]
avg = sum(num_flows_list) / len(num_flows_list)
num_flows_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(num_flows_list)
num_flows_cis.append(abs(ci_upper - ci_lower) / 2)
print 'TotalNumFlows:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
max_link_mbps_list = [float(r.max_link_mbps) for r in snapshot_records[time_index]]
avg = sum(max_link_mbps_list) / len(max_link_mbps_list)
link_max_mbps_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(max_link_mbps_list)
link_max_mbps_cis.append(abs(ci_upper - ci_lower) / 2)
print 'MaxLinkUsageMbps:\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
avg_link_mbps_list = [float(r.avg_link_mbps) for r in snapshot_records[time_index]]
avg = sum(avg_link_mbps_list) / len(avg_link_mbps_list)
link_avg_mbps_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(avg_link_mbps_list)
link_avg_mbps_cis.append(abs(ci_upper - ci_lower) / 2)
print 'AvgLinkUsageMbps:\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
switch_load_mbps_list = [float(r.switch_load_mbps) for r in snapshot_records[time_index]]
avg = sum(switch_load_mbps_list) / len(switch_load_mbps_list)
switch_load_mbps_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(switch_load_mbps_list)
switch_load_mbps_cis.append(abs(ci_upper - ci_lower) / 2)
print 'SwitchLoadMbps:\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
traffic_conc_list = [float(r.traffic_conc) for r in snapshot_records[time_index]]
avg = sum(traffic_conc_list) / len(traffic_conc_list)
traffic_conc_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(traffic_conc_list)
traffic_conc_cis.append(abs(ci_upper - ci_lower) / 2)
print 'TrafficConcentration:\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
link_mbps_std_dev_list = [float(r.link_mbps_std_dev) for r in snapshot_records[time_index]]
avg = sum(link_mbps_std_dev_list) / len(link_mbps_std_dev_list)
link_std_dev_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(link_mbps_std_dev_list)
link_std_dev_cis.append(abs(ci_upper - ci_lower) / 2)
print 'LinkUsageStdDev:\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
response_time_list = [float(r.flow_tracker_response_time) for r in snapshot_records[time_index]]
avg = sum(response_time_list) / len(response_time_list)
response_time_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(response_time_list)
response_time_cis.append(abs(ci_upper - ci_lower) / 2)
print 'ResponseTime:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
network_time_list = [float(r.flow_tracker_network_time) for r in snapshot_records[time_index]]
avg = sum(network_time_list) / len(network_time_list)
network_time_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(network_time_list)
network_time_cis.append(abs(ci_upper - ci_lower) / 2)
print 'NetworkTime:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
processing_time_list = [float(r.flow_tracker_processing_time) for r in snapshot_records[time_index]]
avg = sum(processing_time_list) / len(processing_time_list)
processing_time_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(processing_time_list)
processing_time_cis.append(abs(ci_upper - ci_lower) / 2)
print 'ProcessingTime:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
num_active_receivers_list = [float(r.num_active_receivers) for r in snapshot_records[time_index]]
avg = sum(num_active_receivers_list) / len(num_active_receivers_list)
active_receivers_avgs.append(avg)
ci_upper, ci_lower = mean_confidence_interval(num_active_receivers_list)
active_receivers_cis.append(abs(ci_upper - ci_lower) / 2)
print 'ActiveReceivers:\t\t' + str(avg) + '\t[' + str(ci_lower) + ', ' + str(ci_upper) + ']'
print ' '
# Print time indexed output in MATLAB matrix format
print str(output_prefix) + 'traffic_conc = [' + ', '.join([str(r) for r in traffic_conc_avgs]) + '];'
print str(output_prefix) + 'traffic_conc_ci = [' + ', '.join([str(r) for r in traffic_conc_cis]) + '];'
print str(output_prefix) + 'link_std_dev = [' + ', '.join([str(r) for r in link_std_dev_avgs]) + '];'
print str(output_prefix) + 'link_std_dev_ci = [' + ', '.join([str(r) for r in link_std_dev_cis]) + '];'
print str(output_prefix) + 'link_avg_mbps = [' + ', '.join([str(r) for r in link_avg_mbps_avgs]) + '];'
print str(output_prefix) + 'link_avg_mbps_ci = [' + ', '.join([str(r) for r in link_avg_mbps_cis]) + '];'
print str(output_prefix) + 'link_max_mbps = [' + ', '.join([str(r) for r in link_max_mbps_avgs]) + '];'
print str(output_prefix) + 'link_max_mbps_ci = [' + ', '.join([str(r) for r in link_max_mbps_avgs]) + '];'
print str(output_prefix) + 'switch_load_mbps = [' + ', '.join([str(r) for r in switch_load_mbps_avgs]) + '];'
print str(output_prefix) + 'switch_load_mbps_ci = [' + ', '.join([str(r) for r in switch_load_mbps_cis]) + '];'
print str(output_prefix) + 'num_flows = [' + ', '.join([str(r) for r in num_flows_avgs]) + '];'
print str(output_prefix) + 'num_flows_ci = [' + ', '.join([str(r) for r in num_flows_cis]) + '];'
print str(output_prefix) + 'response_time = [' + ', '.join([str(r) for r in response_time_avgs]) + '];'
print str(output_prefix) + 'response_time_ci = [' + ', '.join([str(r) for r in response_time_cis]) + '];'
print str(output_prefix) + 'network_time = [' + ', '.join([str(r) for r in network_time_avgs]) + '];'
print str(output_prefix) + 'network_time_ci = [' + ', '.join([str(r) for r in network_time_cis]) + '];'
print str(output_prefix) + 'processing_time = [' + ', '.join([str(r) for r in processing_time_avgs]) + '];'
print str(output_prefix) + 'processing_time_ci = [' + ', '.join([str(r) for r in processing_time_cis]) + '];'
print str(output_prefix) + 'active_receivers = [' + ', '.join([str(r) for r in active_receivers_avgs]) + '];'
print str(output_prefix) + 'active_receivers_ci = [' + ', '.join([str(r) for r in active_receivers_cis]) + '];'
print ' '
# Print overall trial averages in MATLAB format
avg = float(sum(trial_record.packet_loss for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.packet_loss for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_packet_loss = ' + str(avg) + ';'
print str(output_prefix) + 'fulltrial_packet_loss_ci = ' + str(ci) + ';'
avg = float(sum(trial_record.avg_traffic_conc for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_traffic_conc for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_traffic_conc = ' + str(avg)
print str(output_prefix) + 'fulltrial_traffic_conc_ci = ' + str(ci)
avg = float(sum(trial_record.avg_link_mbps_std_dev for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_link_mbps_std_dev for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_link_std_dev = ' + str(avg)
print str(output_prefix) + 'fulltrial_link_std_dev_ci = ' + str(ci)
avg = float(sum(trial_record.avg_avg_link_mbps for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_avg_link_mbps for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_link_avg_mbps = ' + str(avg)
print str(output_prefix) + 'fulltrial_link_avg_mbps_ci = ' + str(ci)
avg = float(sum(trial_record.avg_max_link_mbps for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_max_link_mbps for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_link_max_mbps = ' + str(avg)
print str(output_prefix) + 'fulltrial_link_max_mbps_ci = ' + str(ci)
avg = float(sum(trial_record.avg_switch_load_mbps for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_switch_load_mbps for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_switch_load_mbps = ' + str(avg)
print str(output_prefix) + 'fulltrial_switch_load_mbps_ci = ' + str(ci)
avg = float(sum(trial_record.avg_num_flows for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_num_flows for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_num_flows = ' + str(avg)
print str(output_prefix) + 'fulltrial_num_flows_ci = ' + str(ci)
avg = float(sum(trial_record.avg_num_active_receivers for trial_record in full_trial_records)) / len(full_trial_records)
ci_upper, ci_lower = mean_confidence_interval([trial_record.avg_num_active_receivers for trial_record in full_trial_records])
ci = abs(ci_upper - ci_lower) / 2
print str(output_prefix) + 'fulltrial_num_active_receivers = ' + str(avg)
print str(output_prefix) + 'fulltrial_num_active_receivers_ci = ' + str(ci)
if __name__ == '__main__':
if len(sys.argv) >= 4:
filepath_prefix = sys.argv[1]
num_logs = int(sys.argv[2])
output_prefix = sys.argv[3]
snapshot_records, full_trial_records = read_dynamic_log_set(filepath_prefix, num_logs)
print_dynamic_trial_statistics(snapshot_records, full_trial_records, output_prefix)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Functions "ndtr" and "ndtri" are derived from calculations made in:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# In the following email exchange, the author gives his consent to redistribute
# derived works under an Apache 2.0 license.
#
# From: Stephen Moshier <steve@moshier.net>
# Date: Sat, Jun 9, 2018 at 2:36 PM
# Subject: Re: Licensing cephes under Apache (BSD-like) license.
# To: rif <rif@google.com>
#
#
#
# Hello Rif,
#
# Yes, Google may distribute Cephes files under the Apache 2 license.
#
# If clarification is needed, I do not favor BSD over other free licenses.
# I would agree that Apache 2 seems to cover the concern you mentioned
# about sublicensees.
#
# Best wishes for good luck with your projects!
# Steve Moshier
#
#
#
# On Thu, 31 May 2018, rif wrote:
#
# > Hello Steve.
# > My name is Rif. I work on machine learning software at Google.
# >
# > Your cephes software continues to be incredibly useful and widely used. I
# > was wondering whether it would be permissible for us to use the Cephes code
# > under the Apache 2.0 license, which is extremely similar in permissions to
# > the BSD license (Wikipedia comparisons). This would be quite helpful to us
# > in terms of avoiding multiple licenses on software.
# >
# > I'm sorry to bother you with this (I can imagine you're sick of hearing
# > about this by now), but I want to be absolutely clear we're on the level and
# > not misusing your important software. In former conversation with Eugene
# > Brevdo (ebrevdo@google.com), you wrote "If your licensing is similar to BSD,
# > the formal way that has been handled is simply to add a statement to the
# > effect that you are incorporating the Cephes software by permission of the
# > author." I wanted to confirm that (a) we could use the Apache license, (b)
# > that we don't need to (and probably you don't want to) keep getting
# > contacted about individual uses, because your intent is generally to allow
# > this software to be reused under "BSD-like" license, and (c) you're OK
# > letting incorporators decide whether a license is sufficiently BSD-like?
# >
# > Best,
# >
# > rif
# >
# >
# >
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"erfinv",
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64)
LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32)
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64)
LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32)
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * np.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where_v2(
math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w),
array_ops.where_v2(
math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
p0 = [
-1.23916583867381258016E0, 1.39312609387279679503E1,
-5.66762857469070293439E1, 9.80010754185999661536E1,
-5.99633501014107895267E1
]
q0 = [
-1.18331621121330003142E0, 1.59056225126211695515E1,
-8.20372256168333339912E1, 2.00260212380060660359E2,
-2.25462687854119370527E2, 8.63602421390890590575E1,
4.67627912898881538453E0, 1.95448858338141759834E0, 1.0
]
p1 = [
-8.57456785154685413611E-4, -3.50424626827848203418E-2,
-1.40256079171354495875E-1, 2.18663306850790267539E0,
1.46849561928858024014E1, 4.40805073893200834700E1,
5.71628192246421288162E1, 3.15251094599893866154E1,
4.05544892305962419923E0
]
q1 = [
-9.33259480895457427372E-4, -3.80806407691578277194E-2,
-1.42182922854787788574E-1, 2.50464946208309415979E0,
1.50425385692907503408E1, 4.13172038254672030440E1,
4.53907635128879210584E1, 1.57799883256466749731E1, 1.0
]
p2 = [
6.23974539184983293730E-9, 2.65806974686737550832E-6,
3.01581553508235416007E-4, 1.23716634817820021358E-2,
2.01485389549179081538E-1, 1.33303460815807542389E0,
3.93881025292474443415E0, 6.91522889068984211695E0,
3.23774891776946035970E0
]
q2 = [
6.79019408009981274425E-9, 2.89247864745380683936E-6,
3.28014464682127739104E-4, 1.34204006088543189037E-2,
2.16236993594496635890E-1, 1.37702099489081330271E0,
3.67983563856160859403E0, 6.02427039364742014255E0, 1.0
]
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
coeffs = np.array(coeffs, var.dtype.as_numpy_dtype)
if not coeffs.size:
return array_ops.zeros_like(var)
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where_v2(p > -np.expm1(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = array_ops.where_v2(
maybe_complement_p <= 0.,
array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on whether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (
_create_polynomial(1. / z, p2) /
_create_polynomial(1. / z, q2) / z)
second_term_otherwise = (
_create_polynomial(1. / z, p1) /
_create_polynomial(1. / z, q1) / z)
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where_v2(
sanitized_mcp > np.exp(-2.), x_for_big_p,
array_ops.where_v2(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where_v2(p > 1. - np.exp(-2.), x, -x)
infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)
infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)
x_nan_replaced = array_ops.where_v2(p <= 0.0, -infinity,
array_ops.where_v2(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from:
# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where_v2(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1 # pylint: disable=invalid-unary-operand-type
array_ops.where_v2(
math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
dtype = x.dtype.as_numpy_dtype
if series_order <= 0:
return np.array(1, dtype)
x_2 = math_ops.square(x)
even_sum = array_ops.zeros_like(x)
odd_sum = array_ops.zeros_like(x)
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n
if n % 2:
odd_sum += y
else:
even_sum += y
x_2n *= x_2
return 1. + even_sum - odd_sum
def erfinv(x, name="erfinv"):
"""The inverse function for erf, the error function.
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="erfinv").
Returns:
x: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return ndtri((x + 1.0) / 2.0) / np.sqrt(2)
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where_v2(x < 0., lower_solution, upper_solution)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_vectorize_loop():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.index, tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_vector():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.index, tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_with_if():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.IfThenElse)
assert isinstance(stmt.then_case.index, tvm.tir.Ramp)
assert isinstance(stmt.then_case.value, tvm.tir.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.tir.For)
def test_vectorize_let():
v = tvm.tir.Var("v", "float32")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
ib.emit(lambda body: tvm.tir.LetStmt(v, A[i] + 1, body))
A[i] = v + 2
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], ib.get()))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.LetStmt)
assert stmt.value.dtype == "float32x4"
def test_vectorize_with_le_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_with_ge_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_if_then_else():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
A[i] = tvm.tir.call_intrin("float32", "tir.if_then_else", i > 0, A[i] + 1, A[i])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, kind="vectorize") as i:
A[k * 4 + i] = tvm.tir.call_intrin(
"float32", "tir.if_then_else", k > 0, A[k * 4 + i], 0
)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.value.args[2], tvm.tir.Broadcast)
def test_vectorize_while_fail():
"""A while loop inside a vectorized loop should fail."""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.for_range(0, n, kind="vectorize") as j:
with ib.while_loop(i[0] < num_iter):
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
try:
tvm.lower(s, [A, B, C], "llvm")
assert False
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
expected = "A while loop inside a vectorized loop not supported"
assert expected in error_msg
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
test_vectorize_let()
test_vectorize_while_fail()
| |
from abc import ABCMeta, abstractmethod
from HTMLParser import HTMLParser
import re
import urllib2
from RaceResults import RaceInfo, RaceResult, StructuredRaceResults, UnstructuredTextRaceResults
TEXT_RESULT_DIV_CLASS = "racetextresults"
MTEC_BASE_URL = "http://www.mtecresults.com"
MTEC_AJAX_URL = MTEC_BASE_URL + "/race/quickResults?raceid=%s&version=63&overall=yes&offset=0&perPage=10000" # expects the race id
REFERER_HEADER_FORMAT = MTEC_BASE_URL + "/race/show/%s'" # expects the race id
REQUESTED_WITH_HEADER = "XMLHttpRequest"
class SubdivisionParser(HTMLParser):
"""
find the all the races for the event (race_info is a bit of a misnomer I guess)
"""
def __init__(self, race_info):
HTMLParser.__init__(self)
self.event_race_info = race_info
# since unstructured content include a race on the event landing page, we need to include the landing page :O
self.race_infos = [race_info]
self.results_are_structured = True
self.in_other_race_select = False
self.in_race_option = False
self.current_name = u""
self.current_url = ""
def _generate_race_info(self):
return RaceInfo(self.event_race_info.season, self.event_race_info.division, self.event_race_info.date, self.current_url, u"{}, {}".format(self.event_race_info.name, self.current_name))
def handle_starttag(self, tag, attrs):
if tag == "div" and SubdivisionParser.extract_attr(attrs, "class") == TEXT_RESULT_DIV_CLASS:
self.results_are_structured = False
elif tag == "select" and SubdivisionParser.extract_attr(attrs, "id") == "otherracesselect":
self.in_other_race_select = True
elif tag == "option" and self.in_other_race_select:
# todo if we have structured results, we need to handle this differently. these links just take you to another top level page
# we need to parse out ids or do some post-processing in get_race_infos
# in any event, the url should look like: http://www.mtecresults.com/race/quickResults?raceid=2866&version=1&overall=yes&offset=0&perPage=10000
self.in_race_option = True
relative_path = self.extract_attr(attrs, "value")
if relative_path:
self.current_url = MTEC_BASE_URL + relative_path
else:
# todo logging & this will yield a shiesty raceinfo
print "Skipping mtec subdivision race due to no supplied url"
def handle_data(self, data):
if self.in_race_option:
self.current_name = data.decode('utf-8')
def handle_endtag(self, tag):
if tag == "select":
self.in_other_race_select = False
elif tag == "option" and self.in_race_option:
self.race_infos.append(self._generate_race_info())
self.in_race_option = False
# todo filter out garbage / repeated races
def get_race_infos(self):
return self.race_infos
@staticmethod
def extract_attr(attrs, attr_name):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == attr_name:
return attr_pair[1]
return None
class ResultParser:
__metaclass__ = ABCMeta
@abstractmethod
def get_race_results(self):
"""
:return: get the parsed race results (RaceResults)
"""
class StructuredResultParser(HTMLParser, ResultParser):
"""
Parser for the case where we have a good race result summary
except it's not good because mtec does not see fit to close all it's <tr> tags...
"""
def __init__(self, ri):
HTMLParser.__init__(self)
self.race_info = ri
self.race_results = []
self.current_data = [] # for handling multiline data...
self.current_race_result = RaceResult("", "", "")
self.in_thead = False
self.first_tr = True
self.td_count = 0
def handle_starttag(self, tag, attrs):
if tag == "thead":
self.in_thead = True
elif tag == "tr" and not self.in_thead:
self.in_tr = True
# since we have no closing tr tags, we must assume that a newly opened one indicates a prior closure
if self.first_tr:
self.first_tr = False
else:
self.race_results.append(self.current_race_result)
self.current_race_result = RaceResult("", "", "")
self.td_count = 0
elif tag == "td":
self.td_count += 1
def handle_data(self, data):
self.current_data.append(data.strip())
def handle_endtag(self, tag):
"""
note that there are no useful closing tr tags in this data
"""
if tag == "thead":
self.in_thead = False
elif tag == "td":
if self.td_count == 2:
self.current_race_result.name = self._get_current_data()
elif self.td_count == 7:
self.current_race_result.place = self._get_current_data()
elif self.td_count == 10:
self.current_race_result.time = self._get_current_data()
self.current_data = []
def _get_current_data(self):
return "".join(self.current_data)
def get_race_results(self):
return StructuredRaceResults(self.race_info, self.race_results)
class UnstructuredResultParser(HTMLParser, ResultParser):
"""
unfortunately, some results are in plain text, and I don't believe I can depend on consistent
headers across all mtex races (todo study this)
"""
def __init__(self, ri):
HTMLParser.__init__(self)
self.race_info = ri
# list for handling multiple
self.results = []
self.in_result_div = False
def handle_starttag(self, tag, attrs):
if tag == "div" and UnstructuredResultParser.extract_attr(attrs, "class") == TEXT_RESULT_DIV_CLASS:
self.in_result_div = True
def handle_data(self, data):
if self.in_result_div:
self.results.append(data)
def handle_endtag(self, tag):
if tag == "div":
self.in_result_div = False
def get_race_results(self):
return UnstructuredTextRaceResults(self.race_info, "\n".join(self.results))
@staticmethod
def extract_attr(attrs, attr_name):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == attr_name:
return attr_pair[1]
return None
def _get_id_from_url(url):
"""
:param url: the race url to be parsed
:return: mtec id of the requested race (str, none if not present)
"""
KEY = "race/show/"
key_regex = re.compile(KEY + "[0-9]+")
match = key_regex.search(url)
if match:
return match.group(0).lstrip(KEY)
return None
def process_race(race_info, race_store):
"""
process results on the mtec site. this method may spawn the creation of new race_info types.
:param race_info: race metadata (RaceInfo)
:param race_store: collection of races existing in the race db
:return: void
"""
response = urllib2.urlopen(race_info.url)
if not response.getcode() == 200:
print("Unexpected response code from url (%s): %d. Unable to fetch mtec results." % (race_info.url, response.status_code))
return
event_parser = SubdivisionParser(race_info)
event_parser.feed(response.read())
for sub_race_info in event_parser.get_race_infos():
if sub_race_info in race_store:
# todo logging
print "Skipping the processing of mtec race (%s) because it already exists in the race store" % (sub_race_info,)
continue
event_id = _get_id_from_url(sub_race_info.url)
if event_id:
# todo I don't like "scoping" response and parser like this,
if event_parser.results_are_structured:
request_headers = {"Referrer" : REFERER_HEADER_FORMAT % (event_id, ),
"X-Requested-With" : REQUESTED_WITH_HEADER}
response = urllib2.urlopen(urllib2.Request(MTEC_AJAX_URL % (event_id, ), headers=request_headers))
race_parser = StructuredResultParser(sub_race_info)
else:
response = urllib2.urlopen(sub_race_info.url)
race_parser = UnstructuredResultParser(sub_race_info)
if not response.getcode() == 200:
print "Failed to fetch results at url %s" % (race_info.url, )
continue
race_parser.feed(response.read())
race_parser.get_race_results().serialize()
else:
# todo logging
print "Skipping mtec url due to no id: " + sub_race_info.url
if __name__ == "__main__":
r = RaceInfo("2015", "101", "2015-01-01","http://www.mtecresults.com/race/show/3824/2016_City_of_Lakes_Loppet_Festival-Columbia_Sportswear_Skate","COLL")
process_race(r)
r = RaceInfo("2011", "101","2011-01-01", "http://www.mtecresults.com/race/show/250/2011_Mora_Vasaloppet-58K_Freestyle", "Mora Vasaloppet")
process_race(r)
| |
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Algorithms from Geocentric Datum of Australia Technical Manual
http://www.anzlic.org.au/icsm/gdatum/chapter4.html
This page last updated 11 May 1999
Computations on the Ellipsoid
There are a number of formulae that are available
to calculate accurate geodetic positions,
azimuths and distances on the ellipsoid.
Vincenty's formulae (Vincenty, 1975) may be used
for lines ranging from a few cm to nearly 20,000 km,
with millimetre accuracy.
The formulae have been extensively tested
for the Australian region, by comparison with results
from other formulae (Rainsford, 1955 & Sodano, 1965).
* Inverse problem: azimuth and distance from known
latitudes and longitudes
* Direct problem: Latitude and longitude from known
position, azimuth and distance.
* Sample data
* Excel spreadsheet
Vincenty's Inverse formulae
Given: latitude and longitude of two points
(phi1, lembda1 and phi2, lembda2),
Calculate: the ellipsoidal distance (s) and
forward and reverse azimuths between the points (alpha12, alpha21).
'''
import numpy
import math
__version__ = '1.0.1'
class GreatCircle(object):
"""
formula for perfect sphere from Ed Williams' 'Aviation Formulary'
(http://williams.best.vwh.net/avform.htm)
code for ellipsoid posted to GMT mailing list by Jim Leven in Dec 1999
Contact: Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
"""
def __init__(self,rmajor,rminor,lon1,lat1,lon2,lat2):
"""
Define a great circle by specifying:
rmajor - radius of major axis of ellipsoid
rminor - radius of minor axis of ellipsoid.
lon1 - starting longitude of great circle
lat1 - starting latitude
lon2 - ending longitude
lat2 - ending latitude
All must be given in degrees.
Instance variables:
distance - distance along great circle in radians.
lon1,lat1,lon2,lat2 - start and end points (in radians).
"""
# convert to radians from degrees.
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
self.a = rmajor
self.f = (rmajor-rminor)/rmajor
self.lat1 = lat1
self.lat2 = lat2
self.lon1 = lon1
self.lon2 = lon2
# distance along geodesic in meters.
d,a12,a21 = vinc_dist(self.f, self.a, lat1, lon1, lat2, lon2 )
self.distance = d
self.azimuth12 = a12
self.azimuth21 = a21
# great circle arc-length distance (in radians).
self.gcarclen = 2.*math.asin(math.sqrt((math.sin((lat1-lat2)/2))**2+\
math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2))**2))
# check to see if points are antipodal (if so, route is undefined).
if self.gcarclen == math.pi:
self.antipodal = True
else:
self.antipodal = False
def points(self,npoints):
"""
compute arrays of npoints equally spaced
intermediate points along the great circle.
input parameter npoints is the number of points
to compute.
Returns lons, lats (lists with longitudes and latitudes
of intermediate points in degrees).
For example npoints=10 will return arrays lons,lats of 10
equally spaced points along the great circle.
"""
# must ask for at least 2 points.
if npoints <= 1:
raise ValueError('npoints must be greater than 1')
elif npoints == 2:
return [math.degrees(self.lon1),math.degrees(self.lon2)],[math.degrees(self.lat1),math.degrees(self.lat2)]
# can't do it if endpoints are antipodal, since
# route is undefined.
if self.antipodal:
raise ValueError('cannot compute intermediate points on a great circle whose endpoints are antipodal')
d = self.gcarclen
delta = 1.0/(npoints-1)
f = delta*numpy.arange(npoints) # f=0 is point 1, f=1 is point 2.
incdist = self.distance/(npoints-1)
lat1 = self.lat1
lat2 = self.lat2
lon1 = self.lon1
lon2 = self.lon2
# perfect sphere, use great circle formula
if self.f == 0.:
A = numpy.sin((1-f)*d)/math.sin(d)
B = numpy.sin(f*d)/math.sin(d)
x = A*math.cos(lat1)*math.cos(lon1)+B*math.cos(lat2)*math.cos(lon2)
y = A*math.cos(lat1)*math.sin(lon1)+B*math.cos(lat2)*math.sin(lon2)
z = A*math.sin(lat1) +B*math.sin(lat2)
lats=numpy.arctan2(z,numpy.sqrt(x**2+y**2))
lons=numpy.arctan2(y,x)
lons = map(math.degrees,lons.tolist())
lats = map(math.degrees,lats.tolist())
# use ellipsoid formulas
else:
latpt = self.lat1
lonpt = self.lon1
azimuth = self.azimuth12
lons = [math.degrees(lonpt)]
lats = [math.degrees(latpt)]
for _n in range(npoints-2):
latptnew, lonptnew, _alpha21=vinc_pt(self.f,self.a,latpt,lonpt,azimuth,incdist)
d,azimuth, _a21=vinc_dist(self.f,self.a,latptnew,lonptnew,lat2,lon2)
lats.append(math.degrees(latptnew))
lons.append(math.degrees(lonptnew))
latpt = latptnew; lonpt = lonptnew
lons.append(math.degrees(self.lon2))
lats.append(math.degrees(self.lat2))
return lons,lats
# ---------------------------------------------------------------------
# | |
# | geodetic.py - a collection of geodetic functions |
# | |
# ---------------------------------------------------------------------
#
#
# ----------------------------------------------------------------------
# | Algrothims from Geocentric Datum of Australia Technical Manual |
# | |
# | http://www.anzlic.org.au/icsm/gdatum/chapter4.html |
# | |
# | This page last updated 11 May 1999 |
# | |
# | Computations on the Ellipsoid |
# | |
# | There are a number of formulae that are available |
# | to calculate accurate geodetic positions, |
# | azimuths and distances on the ellipsoid. |
# | |
# | Vincenty's formulae (Vincenty, 1975) may be used |
# | for lines ranging from a few cm to nearly 20,000 km, |
# | with millimetre accuracy. |
# | The formulae have been extensively tested |
# | for the Australian region, by comparison with results |
# | from other formulae (Rainsford, 1955 & Sodano, 1965). |
# | |
# | * Inverse problem: azimuth and distance from known |
# | latitudes and longitudes |
# | * Direct problem: Latitude and longitude from known |
# | position, azimuth and distance. |
# | * Sample data |
# | * Excel spreadsheet |
# | |
# | Vincenty's Inverse formulae |
# | Given: latitude and longitude of two points |
# | (phi1, lembda1 and phi2, lembda2), |
# | Calculate: the ellipsoidal distance (s) and |
# | forward and reverse azimuths between the points (alpha12, alpha21). |
# | |
# ----------------------------------------------------------------------
def vinc_dist( f, a, phi1, lembda1, phi2, lembda2 ) :
"""
Returns the distance between two geographic points on the ellipsoid
and the forward and reverse azimuths between these points.
lats, longs and azimuths are in radians, distance in metres
Arguments:
f: flattening
a: equatorial radius (metres)
phi1: latitude of first point
lembda1: longitude of first point
phi2: latitude of second point
lembda2: longitude of second point
Returns ( s, alpha12, alpha21 ) as a tuple
"""
if (abs( phi2 - phi1 ) < 1e-8) and ( abs( lembda2 - lembda1) < 1e-8 ) :
return 0.0, 0.0, 0.0
two_pi = 2.0*math.pi
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan( phi1 )
TanU2 = (1-f) * math.tan( phi2 )
U1 = math.atan(TanU1)
U2 = math.atan(TanU2)
lembda = lembda2 - lembda1
last_lembda = -4000000.0 # an impossibe value
omega = lembda
# Iterate the following equations,
# until there is no significant change in lembda
while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) :
sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \
pow( (math.cos(U1) * math.sin(U2) - \
math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 )
Sin_sigma = math.sqrt( sqr_sin_sigma )
Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)
sigma = math.atan2( Sin_sigma, Cos_sigma )
Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)
alpha = math.asin( Sin_alpha )
Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) )
C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))
last_lembda = lembda
lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \
(Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) )))
u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b)
A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2)))
delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \
(Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \
(B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \
(-3 + 4 * pow(Cos2sigma_m,2 ) )))
s = b * A * (sigma - delta_sigma)
alpha12 = math.atan2( (math.cos(U2) * math.sin(lembda)), \
(math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))
alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \
(-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return s, alpha12, alpha21
# END of Vincenty's Inverse formulae
#----------------------------------------------------------------------------
# Vincenty's Direct formulae |
# Given: latitude and longitude of a point (phi1, lembda1) and |
# the geodetic azimuth (alpha12) |
# and ellipsoidal distance in metres (s) to a second point, |
# |
# Calculate: the latitude and longitude of the second point (phi2, lembda2) |
# and the reverse azimuth (alpha21). |
# |
#----------------------------------------------------------------------------
def vinc_pt( f, a, phi1, lembda1, alpha12, s ) :
"""
Returns the lat and long of projected point and reverse azimuth
given a reference point and a distance and azimuth to project.
lats, longs and azimuths are passed in decimal degrees
Returns ( phi2, lambda2, alpha21 ) as a tuple
"""
two_pi = 2.0*math.pi
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan(phi1)
U1 = math.atan( TanU1 )
sigma1 = math.atan2( TanU1, math.cos(alpha12) )
Sinalpha = math.cos(U1) * math.sin(alpha12)
cosalpha_sq = 1.0 - Sinalpha * Sinalpha
u2 = cosalpha_sq * (a * a - b * b ) / (b * b)
A = 1.0 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * \
(320 - 175 * u2) ) )
B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2) ) )
# Starting with the approximation
sigma = (s / (b * A))
last_sigma = 2.0 * sigma + 2.0 # something impossible
# Iterate the following three equations
# until there is no significant change in sigma
# two_sigma_m , delta_sigma
while ( abs( (last_sigma - sigma) / sigma) > 1.0e-9 ) :
two_sigma_m = 2 * sigma1 + sigma
delta_sigma = B * math.sin(sigma) * ( math.cos(two_sigma_m) \
+ (B/4) * (math.cos(sigma) * \
(-1 + 2 * math.pow( math.cos(two_sigma_m), 2 ) - \
(B/6) * math.cos(two_sigma_m) * \
(-3 + 4 * math.pow(math.sin(sigma), 2 )) * \
(-3 + 4 * math.pow( math.cos (two_sigma_m), 2 ))))) \
last_sigma = sigma
sigma = (s / (b * A)) + delta_sigma
phi2 = math.atan2 ( (math.sin(U1) * math.cos(sigma) + math.cos(U1) * math.sin(sigma) * math.cos(alpha12) ), \
((1-f) * math.sqrt( math.pow(Sinalpha, 2) + \
pow(math.sin(U1) * math.sin(sigma) - math.cos(U1) * math.cos(sigma) * math.cos(alpha12), 2))))
lembda = math.atan2( (math.sin(sigma) * math.sin(alpha12 )), (math.cos(U1) * math.cos(sigma) - \
math.sin(U1) * math.sin(sigma) * math.cos(alpha12)))
C = (f/16) * cosalpha_sq * (4 + f * (4 - 3 * cosalpha_sq ))
omega = lembda - (1-C) * f * Sinalpha * \
(sigma + C * math.sin(sigma) * (math.cos(two_sigma_m) + \
C * math.cos(sigma) * (-1 + 2 * math.pow(math.cos(two_sigma_m),2) )))
lembda2 = lembda1 + omega
alpha21 = math.atan2 ( Sinalpha, (-math.sin(U1) * math.sin(sigma) + \
math.cos(U1) * math.cos(sigma) * math.cos(alpha12)))
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return phi2, lembda2, alpha21
# END of Vincenty's Direct formulae
##---------------------------------------------------------------------------
# Notes:
#
# * "The inverse formulae may give no solution over a line
# between two nearly antipodal points. This will occur when
# lembda ... is greater than pi in absolute value". (Vincenty, 1975)
#
# * In Vincenty (1975) L is used for the difference in longitude,
# however for consistency with other formulae in this Manual,
# omega is used here.
#
# * Variables specific to Vincenty's formulae are shown below,
# others common throughout the manual are shown in the Glossary.
#
#
# alpha = Azimuth of the geodesic at the equator
# U = Reduced latitude
# lembda = Difference in longitude on an auxiliary sphere (lembda1 & lembda2
# are the geodetic longitudes of points 1 & 2)
# sigma = Angular distance on a sphere, from point 1 to point 2
# sigma1 = Angular distance on a sphere, from the equator to point 1
# sigma2 = Angular distance on a sphere, from the equator to point 2
# sigma_m = Angular distance on a sphere, from the equator to the
# midpoint of the line from point 1 to point 2
# u, A, B, C = Internal variables
#
#
# Sample Data
#
# Flinders Peak
# -37o57'03.72030"
# 144o25'29.52440"
# Buninyong
# -37o39'10.15610"
# 143o55'35.38390"
# Ellipsoidal Distance
# 54,972.271 m
#
# Forward Azimuth
# 306o52'05.37"
#
# Reverse Azimuth
# 127o10'25.07"
#
#
##*******************************************************************
| |
import collections
import logging
from abc import ABCMeta, abstractmethod
import six as six
from couchdbkit import ResourceNotFound
from dimagi.utils.decorators.memoized import memoized
from couchforms import const
DEFAULT_PARENT_IDENTIFIER = 'parent'
class AbstractXFormInstance(object):
# @property
# def form_id(self):
# raise NotImplementedError()
user_id = None
@property
def attachments(self):
"""
Get the extra attachments for this form. This will not include
the form itself
"""
raise NotImplementedError
@property
def form_data(self):
raise NotImplementedError()
@property
def metadata(self):
raise NotImplementedError()
@property
def is_normal(self):
raise NotImplementedError()
@property
def is_archived(self):
raise NotImplementedError()
@property
def is_deprecated(self):
raise NotImplementedError()
@property
def is_duplicate(self):
raise NotImplementedError()
@property
def is_error(self):
raise NotImplementedError()
@property
def is_submission_error_log(self):
raise NotImplementedError()
@property
def is_deleted(self):
raise NotImplementedError()
# @property
# def deletion_id(self):
# raise NotImplementedError
def auth_context(self):
raise NotImplementedError()
def get_data(self, xpath):
raise NotImplementedError()
def get_attachment(self, attachment_name):
raise NotImplementedError()
def archive(self, user_id=None):
raise NotImplementedError()
def unarchive(self, user_id=None):
raise NotImplementedError()
def get_xml_element(self):
raise NotImplementedError()
def get_xml(self):
raise NotImplementedError()
def save(self, *args, **kwargs):
raise NotImplementedError()
def set_submission_properties(self, submission_post):
raise NotImplementedError()
def soft_delete(self):
raise NotImplementedError()
def to_json(self):
raise NotImplementedError()
@classmethod
def get(self, xform_id):
raise NotImplementedError()
@property
def xml_md5(self):
raise NotImplementedError()
@property
def type(self):
return self.form_data.get(const.TAG_TYPE, "")
@property
def name(self):
return self.form_data.get(const.TAG_NAME, "")
@memoized
def get_sync_token(self):
from casexml.apps.phone.models import get_properly_wrapped_sync_log
if self.last_sync_token:
try:
return get_properly_wrapped_sync_log(self.last_sync_token)
except ResourceNotFound:
logging.exception('No sync token with ID {} found. Form is {} in domain {}'.format(
self.last_sync_token, self.form_id, self.domain,
))
raise
return None
def get_index_map(indices):
return dict([
(index.identifier, {
"case_type": index.referenced_type,
"case_id": index.referenced_id,
"relationship": index.relationship,
}) for index in indices
])
class CaseToXMLMixin(object):
def to_xml(self, version, include_case_on_closed=False):
from xml.etree import ElementTree
from casexml.apps.phone.xml import get_case_element
if self.closed:
if include_case_on_closed:
elem = get_case_element(self, ('create', 'update', 'close'), version)
else:
elem = get_case_element(self, ('close'), version)
else:
elem = get_case_element(self, ('create', 'update'), version)
return ElementTree.tostring(elem)
class AbstractCommCareCase(CaseToXMLMixin):
# @property
# def case_id(self):
# raise NotImplementedError()
@property
def case_name(self):
raise NotImplementedError()
@property
def parent(self):
raise NotImplementedError()
def soft_delete(self):
raise NotImplementedError()
def get_attachment(self, attachment_name):
raise NotImplementedError()
def is_deleted(self):
raise NotImplementedError()
# @property
# def deletion_id(self):
# raise NotImplementedError
def dynamic_case_properties(self):
raise NotImplementedError()
def get_actions_for_form(self, xform):
raise NotImplementedError
def modified_since_sync(self, sync_log):
raise NotImplementedError
def get_subcases(self, index_identifier=None):
raise NotImplementedError
def get_parent(self, identifier=None, relationship=None):
raise NotImplementedError
def get_case_property(self, property):
raise NotImplementedError
def get_closing_transactions(self):
raise NotImplementedError
def get_opening_transactions(self):
raise NotImplementedError
def to_json(self):
raise NotImplementedError()
def to_api_json(self):
raise NotImplementedError()
def set_case_id(self, case_id):
raise NotImplementedError()
def _resolve_case_property(self, property_name, result):
CasePropertyResult = collections.namedtuple('CasePropertyResult', 'case value')
if property_name.lower().startswith('parent/'):
parents = self.get_parent(identifier=DEFAULT_PARENT_IDENTIFIER)
for parent in parents:
parent._resolve_case_property(property_name[7:], result)
return
result.append(CasePropertyResult(
self,
self.to_json().get(property_name)
))
def resolve_case_property(self, property_name):
"""
Takes a case property expression and resolves the necessary references
to get the case property value(s).
property_name - The case property expression. Examples: name, parent/name,
parent/parent/name
Returns a list of named tuples of (case, value), where value is the
resolved case property value and case is the case that yielded that value.
There can be more than one tuple in the returned result if a case has more
than one parent or grandparent.
"""
result = []
self._resolve_case_property(property_name, result)
return result
@memoized
def get_index_map(self, reversed=False):
indices = self.indices if not reversed else self.reverse_indices
return get_index_map(indices)
def get_properties_in_api_format(self):
return dict(self.dynamic_case_properties().items() + {
"external_id": self.external_id,
"owner_id": self.owner_id,
# renamed
"case_name": self.name,
# renamed
"case_type": self.type,
# renamed
"date_opened": self.opened_on,
# all custom properties go here
}.items())
@memoized
def get_attachment_map(self):
return dict([
(name, {
'url': self.get_attachment_server_url(att.identifier),
'mime': att.attachment_from
}) for name, att in self.case_attachments.items()
])
def to_xml(self, version, include_case_on_closed=False):
from xml.etree import ElementTree
from casexml.apps.phone.xml import get_case_element
if self.closed:
if include_case_on_closed:
elem = get_case_element(self, ('create', 'update', 'close'), version)
else:
elem = get_case_element(self, ('close'), version)
else:
elem = get_case_element(self, ('create', 'update'), version)
return ElementTree.tostring(elem)
def get_attachment_server_url(self, identifier):
"""
A server specific URL for remote clients to access case attachment resources async.
"""
if identifier in self.case_attachments:
from dimagi.utils import web
from django.core.urlresolvers import reverse
return "%s%s" % (web.get_url_base(),
reverse("api_case_attachment", kwargs={
"domain": self.domain,
"case_id": self.case_id,
"attachment_id": identifier,
})
)
else:
return None
class AbstractSupplyInterface(six.with_metaclass(ABCMeta)):
@classmethod
@abstractmethod
def get_by_location(cls, location):
raise NotImplementedError
@classmethod
@abstractmethod
def get_or_create_by_location(cls, location):
raise NotImplementedError
class IsImageMixin(object):
@property
def is_image(self):
if self.content_type is None:
return None
return True if self.content_type.startswith('image/') else False
class CaseAttachmentMixin(IsImageMixin):
@property
def is_present(self):
"""
Helper method to see if this is a delete vs. update
"""
if self.identifier and (self.attachment_src == self.attachment_from is None):
return False
else:
return True
| |
#
# Widgets.py -- wrapped Qt widgets and convenience functions
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.qtw.QtHelp import QtGui, QtCore, QTextCursor, \
QIcon, QPixmap, QImage
from ginga.qtw import QtHelp
from ginga.misc import Callback, Bunch
from functools import reduce
class WidgetBase(Callback.Callbacks):
def __init__(self):
super(WidgetBase, self).__init__()
self.widget = None
self.changed = False
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.widget.setToolTip(text)
def set_enabled(self, tf):
self.widget.setEnabled(tf)
# BASIC WIDGETS
class TextEntry(WidgetBase):
def __init__(self, text=''):
super(TextEntry, self).__init__()
self.widget = QtGui.QLineEdit()
self.widget.setText(text)
self.widget.returnPressed.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.widget.text()
def set_text(self, text):
self.widget.setText(text)
def set_length(self, numchars):
# this is only supposed to set the visible length (but Qt doesn't
# really have a good way to do that)
#self.widget.setMaxLength(numchars)
pass
class GrowingTextEdit(QtGui.QTextEdit):
def __init__(self, *args, **kwargs):
super(GrowingTextEdit, self).__init__(*args, **kwargs)
self.document().documentLayout().documentSizeChanged.connect(
self.sizeChange)
self.heightMin = 0
self.heightMax = 65000
def sizeChange(self):
docHeight = self.document().size().height()
# add some margin to prevent auto scrollbars
docHeight += 20
if self.heightMin <= docHeight <= self.heightMax:
self.setMaximumHeight(docHeight)
class TextArea(WidgetBase):
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
#tw = QtGui.QTextEdit()
tw = GrowingTextEdit()
tw.setReadOnly(not editable)
if wrap:
tw.setLineWrapMode(QtGui.QTextEdit.WidgetWidth)
else:
tw.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.widget = tw
def append_text(self, text, autoscroll=True):
if text.endswith('\n'):
text = text[:-1]
self.widget.append(text)
if not autoscroll:
return
self.widget.moveCursor(QTextCursor.End)
self.widget.moveCursor(QTextCursor.StartOfLine)
self.widget.ensureCursorVisible()
def get_text(self):
return self.widget.document().toPlainText()
def clear(self):
self.widget.clear()
def set_text(self, text):
self.clear()
self.append_text(text)
def set_limit(self, numlines):
#self.widget.setMaximumBlockCount(numlines)
pass
def set_font(self, font):
self.widget.setCurrentFont(font)
def set_wrap(self, tf):
if tf:
self.widget.setLineWrapMode(QtGui.QTextEdit.WidgetWidth)
else:
self.widget.setLineWrapMode(QtGui.QTextEdit.NoWrap)
class Label(WidgetBase):
def __init__(self, text=''):
super(Label, self).__init__()
self.widget = QtGui.QLabel(text)
def get_text(self):
return self.widget.text()
def set_text(self, text):
self.widget.setText(text)
class Button(WidgetBase):
def __init__(self, text=''):
super(Button, self).__init__()
self.widget = QtGui.QPushButton(text)
self.widget.clicked.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
class ComboBox(WidgetBase):
def __init__(self, editable=False):
super(ComboBox, self).__init__()
self.widget = QtHelp.ComboBox()
self.widget.setEditable(editable)
self.widget.activated.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self):
idx = self.widget.currentIndex()
self.make_callback('activated', idx)
def insert_alpha(self, text):
index = 0
while True:
itemText = self.widget.itemText(index)
if len(itemText) == 0:
break
if itemText > text:
self.widget.insertItem(index, text)
return
index += 1
self.widget.addItem(text)
def delete_alpha(self, text):
index = self.widget.findText(text)
self.widget.removeItem(index)
def clear(self):
self.widget.clear()
def show_text(self, text):
index = self.widget.findText(text)
self.set_index(index)
def append_text(self, text):
self.widget.addItem(text)
def set_index(self, index):
self.widget.setCurrentIndex(index)
def get_index(self):
return self.widget.currentIndex()
class SpinBox(WidgetBase):
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
if dtype == float:
w = QtGui.QDoubleSpinBox()
else:
w = QtGui.QSpinBox()
w.valueChanged.connect(self._cb_redirect)
# should values wrap around
w.setWrapping(False)
self.widget = w
self.enable_callback('value-changed')
def _cb_redirect(self, val):
if self.changed:
self.changed = False
return
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.value()
def set_value(self, val):
self.changed = True
self.widget.setValue(val)
def set_decimals(self, num):
self.widget.setDecimals(num)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget
adj.setRange(minval, maxval)
adj.setSingleStep(incr_value)
class Slider(WidgetBase):
def __init__(self, orientation='horizontal', track=False):
super(Slider, self).__init__()
if orientation == 'horizontal':
w = QtGui.QSlider(QtCore.Qt.Horizontal)
else:
w = QtGui.QSlider(QtCore.Qt.Vertical)
# this controls whether the callbacks are made *as the user
# moves the slider* or afterwards
w.setTracking(track)
w.setTickPosition(QtGui.QSlider.TicksBelow)
self.widget = w
w.valueChanged.connect(self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, val):
if self.changed:
self.changed = False
return
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.value()
def set_value(self, val):
self.changed = True
self.widget.setValue(val)
def set_tracking(self, tf):
self.widget.setTracking(tf)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget
adj.setRange(minval, maxval)
adj.setSingleStep(incr_value)
class ScrollBar(WidgetBase):
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
if orientation == 'horizontal':
self.widget = QtGui.QScrollBar(QtCore.Qt.Horizontal)
else:
self.widget = QtGui.QScrollBar(QtCore.Qt.Vertical)
self.widget.valueChanged.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self):
val = self.widget.value()
self.make_callback('activated', val)
class CheckBox(WidgetBase):
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = QtGui.QCheckBox(text)
self.widget.stateChanged.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
val = self.get_state()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
val = self.widget.checkState()
# returns 0 (unchecked) or 2 (checked)
return (val != 0)
class ToggleButton(WidgetBase):
def __init__(self, text=''):
super(ToggleButton, self).__init__()
self.widget = QtGui.QPushButton(text)
self.widget.setCheckable(True)
self.widget.clicked.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, val):
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class RadioButton(WidgetBase):
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
self.widget = QtGui.QRadioButton(text)
self.widget.toggled.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, val):
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class ProgressBar(WidgetBase):
def __init__(self):
super(ProgressBar, self).__init__()
w = QtGui.QProgressBar()
w.setRange(0, 100)
w.setTextVisible(True)
self.widget = w
def set_value(self, pct):
self.widget.setValue(int(pct * 100.0))
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
self.children = []
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def _remove(self, childw, delete=False):
self.widget.layout().removeWidget(childw)
childw.setParent(None)
if delete:
childw.deleteLater()
def remove(self, w, delete=False):
if not w in self.children:
raise KeyError("Widget is not a child of this container")
self.children.remove(w)
self._remove(w.get_widget(), delete=delete)
def remove_all(self):
for w in list(self.children):
self.remove(w)
def get_children(self):
return self.children
class Box(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
self.orientation = orientation
if orientation == 'horizontal':
self.widget = QtHelp.HBox()
else:
self.widget = QtHelp.VBox()
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
child_w = child.get_widget()
self.widget.layout().addWidget(child_w, stretch=stretch)
def set_spacing(self, val):
self.widget.layout().setSpacing(val)
def set_margins(self, left, right, top, bottom):
self.widget.layout().setContentsMargins(left, right, top, bottom)
def set_border_width(self, pix):
self.widget.layout().setContentsMargins(pix, pix, pix, pix)
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class Frame(ContainerBase):
def __init__(self, title=None):
super(Frame, self).__init__()
self.widget = QtGui.QFrame()
self.widget.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
vbox = QtGui.QVBoxLayout()
# because of ridiculous defaults
vbox.setContentsMargins(2, 2, 2, 2)
self.widget.setLayout(vbox)
if title:
lbl = QtGui.QLabel(title)
lbl.setAlignment(QtCore.Qt.AlignHCenter)
vbox.addWidget(lbl, stretch=0)
self.label = lbl
else:
self.label = None
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
self.widget.layout().addWidget(child.get_widget(), stretch=stretch)
class TabWidget(ContainerBase):
def __init__(self, tabpos='top'):
super(TabWidget, self).__init__()
nb = QtGui.QTabWidget()
if tabpos == 'top':
nb.setTabPosition(QtGui.QTabWidget.North)
elif tabpos == 'bottom':
nb.setTabPosition(QtGui.QTabWidget.South)
elif tabpos == 'left':
nb.setTabPosition(QtGui.QTabWidget.West)
elif tabpos == 'right':
nb.setTabPosition(QtGui.QTabWidget.East)
nb.currentChanged.connect(self._cb_redirect)
self.widget = nb
def _cb_redirect(self, index):
self.make_callback('activated', index)
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addTab(child_w, title)
def get_index(self):
return self.widget.getCurrentIndex()
def set_index(self, idx):
self.widget.setCurrentIndex(idx)
def index_of(self, child):
return self.widget.indexOf(child.get_widget())
class StackWidget(ContainerBase):
def __init__(self):
super(StackWidget, self).__init__()
self.widget = QtHelp.StackedWidget()
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addTab(child_w, title)
def get_index(self):
return self.widget.getCurrentIndex()
def set_index(self, idx):
self.widget.setCurrentIndex(idx)
def index_of(self, child):
return self.widget.indexOf(child.get_widget())
class ScrollArea(ContainerBase):
def __init__(self):
super(ScrollArea, self).__init__()
self.widget = QtGui.QScrollArea()
self.widget.setWidgetResizable(True)
def set_widget(self, child):
self.add_ref(child)
self.widget.setWidget(child.get_widget())
class Splitter(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Splitter, self).__init__()
w = QtGui.QSplitter()
self.orientation = orientation
if orientation == 'horizontal':
w.setOrientation(QtCore.Qt.Horizontal)
else:
w.setOrientation(QtCore.Qt.Vertical)
self.widget = w
w.setStretchFactor(0, 0.5)
w.setStretchFactor(1, 0.5)
def add_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addWidget(child_w)
class GridBox(ContainerBase):
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
self.widget = w
def set_row_spacing(self, val):
self.widget.layout().setVerticalSpacing(val)
def set_column_spacing(self, val):
self.widget.layout().setHorizontalSpacing(val)
def add_widget(self, child, row, col, stretch=0):
self.add_ref(child)
w = child.get_widget()
self.widget.layout().addWidget(w, row, col)
class ToolbarAction(WidgetBase):
def __init__(self):
super(ToolbarAction, self).__init__()
self.widget = None
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.widget.isCheckable():
tf = self.widget.isChecked()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
w = QtGui.QToolBar()
if orientation == 'horizontal':
w.setOrientation(QtCore.Qt.Horizontal)
else:
w.setOrientation(QtCore.Qt.Vertical)
self.widget = w
def add_action(self, text, toggle=False, iconpath=None):
child = ToolbarAction()
if iconpath:
image = QImage(iconpath)
qsize = QtCore.QSize(24, 24)
image = image.scaled(qsize)
pixmap = QPixmap.fromImage(image)
iconw = QIcon(pixmap)
action = self.widget.addAction(iconw, text,
child._cb_redirect)
else:
action = self.widget.addAction(text, child._cb_redirect)
action.setCheckable(toggle)
child.widget = action
self.add_ref(child)
return child
def add_widget(self, child):
self.add_ref(child)
w = child.get_widget()
self.widget.addWidget(w)
def add_separator(self):
self.widget.addSeparator()
class MenuAction(WidgetBase):
def __init__(self, text=None):
super(MenuAction, self).__init__()
self.widget = None
self.text = text
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.widget.isCheckable():
tf = self.widget.isChecked()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
class Menu(ContainerBase):
def __init__(self):
super(Menu, self).__init__()
# this ends up being a reference to the Qt menubar or toolbar
self.widget = None
def add_widget(self, child):
child.widget = self.widget.addAction(child.text,
lambda: child._cb_redirect())
self.add_ref(child)
def add_name(self, name):
child = MenuAction(text=name)
self.add_widget(child)
return child
def add_separator(self):
self.widget.addSeparator()
class Menubar(ContainerBase):
def __init__(self):
super(Menubar, self).__init__()
self.widget = QtGui.QMenuBar()
def add_widget(self, child):
menu_w = child.get_widget()
self.widget.addMenu(menu_w)
self.add_ref(child)
def add_name(self, name):
menu_w = self.widget.addMenu(name)
child = Menu()
child.widget = menu_w
self.add_ref(child)
return child
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
w.widget.setAlignment(QtCore.Qt.AlignRight)
elif wtype == 'llabel':
w = Label(title)
w.widget.setAlignment(QtCore.Qt.AlignLeft)
elif wtype == 'entry':
w = TextEntry()
#w.widget.setMaxLength(12)
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype == 'hscale':
w = Slider(orientation='horizontal')
elif wtype == 'vscale':
w = Slider(orientation='vertical')
elif wtype == 'checkbutton':
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'menubar':
w = Menubar()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
if orientation != 'horizontal':
return w
vbox = VBox()
vbox.add_widget(w)
vbox.add_widget(Label(''), stretch=1)
return vbox
def build_info(captions, orientation='vertical'):
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols /= 2
widget = QtGui.QWidget()
table = QtGui.QGridLayout()
widget.setLayout(table)
table.setVerticalSpacing(2)
table.setHorizontalSpacing(4)
table.setContentsMargins(2, 2, 2, 2)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx+2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_'+title[:-1])
w = make_widget(title, wtype)
table.addWidget(w.widget, row, col)
wb[name] = w
col += 1
row += 1
w = wrap(widget)
w = hadjust(w, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
def get_orientation(container):
if not hasattr(container, 'size'):
return 'vertical'
(wd, ht) = container.size
if wd < ht:
return 'vertical'
else:
return 'horizontal'
def get_oriented_box(container, scrolled=True, fill=False):
orientation = get_orientation(container)
if orientation == 'vertical':
box1 = VBox()
box2 = VBox()
else:
box1 = HBox()
box2 = VBox()
box2.add_widget(box1)
if scrolled:
box2.add_widget(Label(''), stretch=1)
sw = ScrollArea()
sw.set_widget(box2)
else:
sw = box2
return box1, sw, orientation
#END
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cluster Resolvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import SimpleClusterResolver
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import UnionClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class UnionClusterResolverTest(test.TestCase):
# TODO(frankchn): Transform to parameterized test after it is included in the
# TF open source codebase.
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testSingleClusterResolver(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
simple_resolver = SimpleClusterResolver(base_cluster_spec)
union_resolver = UnionClusterResolver(simple_resolver)
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
actual_cluster_spec = union_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testTwoNonOverlappingJobMergedClusterResolver(self):
cluster_spec_1 = server_lib.ClusterSpec({
"ps": [
"ps0:2222",
"ps1:2222"
]
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": [
"worker0:2222",
"worker1:2222",
"worker2:2222"
]
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
cluster_spec = union_cluster.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self._verifyClusterSpecEquality(cluster_spec, expected_proto)
def testOverlappingJobMergedClusterResolver(self):
cluster_spec_1 = server_lib.ClusterSpec({
"worker": [
"worker4:2222",
"worker5:2222"
]
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": [
"worker0:2222",
"worker1:2222",
"worker2:2222"
]
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
cluster_spec = union_cluster.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: 'worker4:2222' }
tasks { key: 1 value: 'worker5:2222' }
tasks { key: 2 value: 'worker0:2222' }
tasks { key: 3 value: 'worker1:2222' }
tasks { key: 4 value: 'worker2:2222' } }
"""
self._verifyClusterSpecEquality(cluster_spec, expected_proto)
def testOverlappingSparseJobMergedClusterResolverThrowError(self):
cluster_spec_1 = server_lib.ClusterSpec({
"worker": {
7: "worker4:2222",
9: "worker5:2222"
}
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": {
3: "worker0:2222",
6: "worker1:2222",
7: "worker2:2222"
}
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
self.assertRaises(KeyError, union_cluster.cluster_spec)
def testOverlappingDictAndListThrowError(self):
cluster_spec_1 = server_lib.ClusterSpec({
"worker": [
"worker4:2222",
"worker5:2222"
]
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": {
1: "worker0:2222",
2: "worker1:2222",
3: "worker2:2222"
}
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
self.assertRaises(KeyError, union_cluster.cluster_spec)
def testOverlappingJobNonOverlappingKey(self):
cluster_spec_1 = server_lib.ClusterSpec({
"worker": {
5: "worker4:2222",
9: "worker5:2222"
}
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": {
3: "worker0:2222",
6: "worker1:2222",
7: "worker2:2222"
}
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
cluster_spec = union_cluster.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 3 value: 'worker0:2222' }
tasks { key: 5 value: 'worker4:2222' }
tasks { key: 6 value: 'worker1:2222' }
tasks { key: 7 value: 'worker2:2222' }
tasks { key: 9 value: 'worker5:2222' }}
"""
self._verifyClusterSpecEquality(cluster_spec, expected_proto)
def testMixedModeNonOverlappingKey(self):
cluster_spec_1 = server_lib.ClusterSpec({
"worker": [
"worker4:2222",
"worker5:2222"
]
})
cluster_spec_2 = server_lib.ClusterSpec({
"worker": {
3: "worker0:2222",
6: "worker1:2222",
7: "worker2:2222"
}
})
cluster_resolver_1 = SimpleClusterResolver(cluster_spec_1)
cluster_resolver_2 = SimpleClusterResolver(cluster_spec_2)
union_cluster = UnionClusterResolver(cluster_resolver_1, cluster_resolver_2)
cluster_spec = union_cluster.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: 'worker4:2222' }
tasks { key: 1 value: 'worker5:2222' }
tasks { key: 3 value: 'worker0:2222' }
tasks { key: 6 value: 'worker1:2222' }
tasks { key: 7 value: 'worker2:2222' }}
"""
self._verifyClusterSpecEquality(cluster_spec, expected_proto)
def testRetainSparseJobWithNoMerging(self):
base_cluster_spec = server_lib.ClusterSpec({
"worker": {
1: "worker0:2222",
3: "worker1:2222",
5: "worker2:2222"
}
})
base_cluster_resolver = SimpleClusterResolver(base_cluster_spec)
union_cluster = UnionClusterResolver(base_cluster_resolver)
cluster_spec = union_cluster.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 1 value: 'worker0:2222' }
tasks { key: 3 value: 'worker1:2222' }
tasks { key: 5 value: 'worker2:2222' } }
"""
self._verifyClusterSpecEquality(cluster_spec, expected_proto)
if __name__ == "__main__":
test.main()
| |
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin(object):
@minimum_version('1.21')
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``True``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.21')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
def inspect_network(self, net_id):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
"""
url = self._url("/networks/{0}", net_id)
res = self._get(url)
return self._result(res, json=True)
@check_resource
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import re
import unittest
from contextlib import redirect_stdout
from unittest import mock
import pytest
from parameterized import parameterized
from airflow.cli import cli_parser
from airflow.cli.commands import connection_command
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.utils.db import merge_conn
from airflow.utils.session import create_session, provide_session
from tests.test_utils.db import clear_db_connections
class TestCliGetConnection(unittest.TestCase):
def setUp(self):
self.parser = cli_parser.get_parser()
clear_db_connections()
def tearDown(self):
clear_db_connections()
def test_cli_connection_get(self):
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_get(
self.parser.parse_args(["connections", "get", "google_cloud_default", "--output", "json"])
)
stdout = stdout.getvalue()
assert "google-cloud-platform:///default" in stdout
def test_cli_connection_get_invalid(self):
with pytest.raises(SystemExit, match=re.escape("Connection not found.")):
connection_command.connections_get(self.parser.parse_args(["connections", "get", "INVALID"]))
class TestCliListConnections(unittest.TestCase):
EXPECTED_CONS = [
(
'airflow_db',
'mysql',
),
(
'google_cloud_default',
'google_cloud_platform',
),
(
'http_default',
'http',
),
(
'local_mysql',
'mysql',
),
(
'mongo_default',
'mongo',
),
(
'mssql_default',
'mssql',
),
(
'mysql_default',
'mysql',
),
(
'pinot_broker_default',
'pinot',
),
(
'postgres_default',
'postgres',
),
(
'presto_default',
'presto',
),
(
'sqlite_default',
'sqlite',
),
(
'trino_default',
'trino',
),
(
'vertica_default',
'vertica',
),
]
def setUp(self):
self.parser = cli_parser.get_parser()
clear_db_connections()
def tearDown(self):
clear_db_connections()
def test_cli_connections_list_as_json(self):
args = self.parser.parse_args(["connections", "list", "--output", "json"])
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_list(args)
print(stdout.getvalue())
stdout = stdout.getvalue()
for conn_id, conn_type in self.EXPECTED_CONS:
assert conn_type in stdout
assert conn_id in stdout
def test_cli_connections_filter_conn_id(self):
args = self.parser.parse_args(
["connections", "list", "--output", "json", '--conn-id', 'http_default']
)
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_list(args)
stdout = stdout.getvalue()
assert "http_default" in stdout
class TestCliExportConnections(unittest.TestCase):
@provide_session
def setUp(self, session=None):
clear_db_connections(add_default_connections_back=False)
merge_conn(
Connection(
conn_id="airflow_db",
conn_type="mysql",
description="mysql conn description",
host="mysql",
login="root",
password="plainpassword",
schema="airflow",
),
session,
)
merge_conn(
Connection(
conn_id="druid_broker_default",
conn_type="druid",
description="druid-broker conn description",
host="druid-broker",
port=8082,
extra='{"endpoint": "druid/v2/sql"}',
),
session,
)
self.parser = cli_parser.get_parser()
def tearDown(self):
clear_db_connections()
def test_cli_connections_export_should_return_error_for_invalid_command(self):
with pytest.raises(SystemExit):
self.parser.parse_args(
[
"connections",
"export",
]
)
def test_cli_connections_export_should_return_error_for_invalid_format(self):
with pytest.raises(SystemExit):
self.parser.parse_args(["connections", "export", "--format", "invalid", "/path/to/file"])
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_return_error_for_invalid_export_format(
self, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.invalid'
mock_splittext.return_value = (None, '.invalid')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
with pytest.raises(
SystemExit, match=r"Unsupported file format. The file must have the extension .yaml, .json, .env"
):
connection_command.connections_export(args)
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_not_called()
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
@mock.patch.object(connection_command, 'create_session')
def test_cli_connections_export_should_return_error_if_create_session_fails(
self, mock_session, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.json'
def my_side_effect():
raise Exception("dummy exception")
mock_session.side_effect = my_side_effect
mock_splittext.return_value = (None, '.json')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
with pytest.raises(Exception, match=r"dummy exception"):
connection_command.connections_export(args)
mock_splittext.assert_not_called()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_not_called()
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
@mock.patch.object(connection_command, 'create_session')
def test_cli_connections_export_should_return_error_if_fetching_connections_fails(
self, mock_session, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.json'
def my_side_effect(_):
raise Exception("dummy exception")
mock_session.return_value.__enter__.return_value.query.return_value.order_by.side_effect = (
my_side_effect
)
mock_splittext.return_value = (None, '.json')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
with pytest.raises(Exception, match=r"dummy exception"):
connection_command.connections_export(args)
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_not_called()
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
@mock.patch.object(connection_command, 'create_session')
def test_cli_connections_export_should_not_return_error_if_connections_is_empty(
self, mock_session, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.json'
mock_session.return_value.__enter__.return_value.query.return_value.all.return_value = []
mock_splittext.return_value = (None, '.json')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
connection_command.connections_export(args)
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with('{}')
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_export_as_json(self, mock_file_open, mock_splittext):
output_filepath = '/tmp/connections.json'
mock_splittext.return_value = (None, '.json')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
connection_command.connections_export(args)
expected_connections = json.dumps(
{
"airflow_db": {
"conn_type": "mysql",
"description": "mysql conn description",
"host": "mysql",
"login": "root",
"password": "plainpassword",
"schema": "airflow",
"port": None,
"extra": None,
},
"druid_broker_default": {
"conn_type": "druid",
"description": "druid-broker conn description",
"host": "druid-broker",
"login": None,
"password": None,
"schema": None,
"port": 8082,
"extra": "{\"endpoint\": \"druid/v2/sql\"}",
},
},
indent=2,
)
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with(expected_connections)
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_export_as_yaml(self, mock_file_open, mock_splittext):
output_filepath = '/tmp/connections.yaml'
mock_splittext.return_value = (None, '.yaml')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
connection_command.connections_export(args)
expected_connections = (
"airflow_db:\n"
" conn_type: mysql\n"
" description: mysql conn description\n"
" extra: null\n"
" host: mysql\n"
" login: root\n"
" password: plainpassword\n"
" port: null\n"
" schema: airflow\n"
"druid_broker_default:\n"
" conn_type: druid\n"
" description: druid-broker conn description\n"
" extra: \'{\"endpoint\": \"druid/v2/sql\"}\'\n"
" host: druid-broker\n"
" login: null\n"
" password: null\n"
" port: 8082\n"
" schema: null\n"
)
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with(expected_connections)
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_export_as_env(self, mock_file_open, mock_splittext):
output_filepath = '/tmp/connections.env'
mock_splittext.return_value = (None, '.env')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
connection_command.connections_export(args)
expected_connections = [
"airflow_db=mysql://root:plainpassword@mysql/airflow\n"
"druid_broker_default=druid://druid-broker:8082?endpoint=druid%2Fv2%2Fsql\n",
"druid_broker_default=druid://druid-broker:8082?endpoint=druid%2Fv2%2Fsql\n"
"airflow_db=mysql://root:plainpassword@mysql/airflow\n",
]
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with(mock.ANY)
assert mock_file_open.return_value.write.call_args_list[0][0][0] in expected_connections
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_export_as_env_for_uppercase_file_extension(
self, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.ENV'
mock_splittext.return_value = (None, '.ENV')
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
]
)
connection_command.connections_export(args)
expected_connections = [
"airflow_db=mysql://root:plainpassword@mysql/airflow\n"
"druid_broker_default=druid://druid-broker:8082?endpoint=druid%2Fv2%2Fsql\n",
"druid_broker_default=druid://druid-broker:8082?endpoint=druid%2Fv2%2Fsql\n"
"airflow_db=mysql://root:plainpassword@mysql/airflow\n",
]
mock_splittext.assert_called_once()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with(mock.ANY)
assert mock_file_open.return_value.write.call_args_list[0][0][0] in expected_connections
@mock.patch('os.path.splitext')
@mock.patch('builtins.open', new_callable=mock.mock_open())
def test_cli_connections_export_should_force_export_as_specified_format(
self, mock_file_open, mock_splittext
):
output_filepath = '/tmp/connections.yaml'
args = self.parser.parse_args(
[
"connections",
"export",
output_filepath,
"--format",
"json",
]
)
connection_command.connections_export(args)
expected_connections = json.dumps(
{
"airflow_db": {
"conn_type": "mysql",
"description": "mysql conn description",
"host": "mysql",
"login": "root",
"password": "plainpassword",
"schema": "airflow",
"port": None,
"extra": None,
},
"druid_broker_default": {
"conn_type": "druid",
"description": "druid-broker conn description",
"host": "druid-broker",
"login": None,
"password": None,
"schema": None,
"port": 8082,
"extra": "{\"endpoint\": \"druid/v2/sql\"}",
},
},
indent=2,
)
mock_splittext.assert_not_called()
mock_file_open.assert_called_once_with(output_filepath, 'w', -1, 'UTF-8', None)
mock_file_open.return_value.write.assert_called_once_with(expected_connections)
TEST_URL = "postgresql://airflow:airflow@host:5432/airflow"
class TestCliAddConnections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
clear_db_connections()
@classmethod
def tearDownClass(cls):
clear_db_connections()
@parameterized.expand(
[
(
[
"connections",
"add",
"new0",
f"--conn-uri={TEST_URL}",
"--conn-description=new0 description",
],
"Successfully added `conn_id`=new0 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_type": "postgres",
"description": "new0 description",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new1",
f"--conn-uri={TEST_URL}",
"--conn-description=new1 description",
],
"Successfully added `conn_id`=new1 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_type": "postgres",
"description": "new1 description",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new2",
f"--conn-uri={TEST_URL}",
"--conn-extra",
"{'extra': 'yes'}",
],
"Successfully added `conn_id`=new2 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_type": "postgres",
"description": None,
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": True,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new3",
f"--conn-uri={TEST_URL}",
"--conn-extra",
"{'extra': 'yes'}",
"--conn-description",
"new3 description",
],
"Successfully added `conn_id`=new3 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_type": "postgres",
"description": "new3 description",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": True,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new4",
"--conn-type=hive_metastore",
"--conn-login=airflow",
"--conn-password=airflow",
"--conn-host=host",
"--conn-port=9083",
"--conn-schema=airflow",
"--conn-description= new4 description ",
],
"Successfully added `conn_id`=new4 : hive_metastore://airflow:******@host:9083/airflow",
{
"conn_type": "hive_metastore",
"description": " new4 description ",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 9083,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new5",
"--conn-uri",
"",
"--conn-type=google_cloud_platform",
"--conn-extra",
"{'extra': 'yes'}",
"--conn-description=new5 description",
],
"Successfully added `conn_id`=new5 : google_cloud_platform://:@:",
{
"conn_type": "google_cloud_platform",
"description": "new5 description",
"host": None,
"is_encrypted": False,
"is_extra_encrypted": True,
"login": None,
"port": None,
"schema": None,
},
),
]
)
def test_cli_connection_add(self, cmd, expected_output, expected_conn):
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_add(self.parser.parse_args(cmd))
stdout = stdout.getvalue()
assert expected_output in stdout
conn_id = cmd[2]
with create_session() as session:
comparable_attrs = [
"conn_type",
"description",
"host",
"is_encrypted",
"is_extra_encrypted",
"login",
"port",
"schema",
]
current_conn = session.query(Connection).filter(Connection.conn_id == conn_id).first()
assert expected_conn == {attr: getattr(current_conn, attr) for attr in comparable_attrs}
def test_cli_connections_add_duplicate(self):
conn_id = "to_be_duplicated"
connection_command.connections_add(
self.parser.parse_args(["connections", "add", conn_id, f"--conn-uri={TEST_URL}"])
)
# Check for addition attempt
with pytest.raises(SystemExit, match=rf"A connection with `conn_id`={conn_id} already exists"):
connection_command.connections_add(
self.parser.parse_args(["connections", "add", conn_id, f"--conn-uri={TEST_URL}"])
)
def test_cli_connections_add_delete_with_missing_parameters(self):
# Attempt to add without providing conn_uri
with pytest.raises(
SystemExit,
match=r"The following args are required to add a connection: \['conn-uri or conn-type'\]",
):
connection_command.connections_add(self.parser.parse_args(["connections", "add", "new1"]))
def test_cli_connections_add_invalid_uri(self):
# Attempt to add with invalid uri
with pytest.raises(SystemExit, match=r"The URI provided to --conn-uri is invalid: nonsense_uri"):
connection_command.connections_add(
self.parser.parse_args(["connections", "add", "new1", f"--conn-uri={'nonsense_uri'}"])
)
class TestCliDeleteConnections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
clear_db_connections()
@classmethod
def tearDownClass(cls):
clear_db_connections()
@provide_session
def test_cli_delete_connections(self, session=None):
merge_conn(
Connection(
conn_id="new1",
conn_type="mysql",
description="mysql description",
host="mysql",
login="root",
password="",
schema="airflow",
),
session=session,
)
# Delete connections
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "new1"]))
stdout = stdout.getvalue()
# Check deletion stdout
assert "Successfully deleted connection with `conn_id`=new1" in stdout
# Check deletions
result = session.query(Connection).filter(Connection.conn_id == "new1").first()
assert result is None
def test_cli_delete_invalid_connection(self):
# Attempt to delete a non-existing connection
with pytest.raises(SystemExit, match=r"Did not find a connection with `conn_id`=fake"):
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "fake"]))
class TestCliImportConnections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
clear_db_connections(add_default_connections_back=False)
@classmethod
def tearDownClass(cls):
clear_db_connections()
@mock.patch('os.path.exists')
def test_cli_connections_import_should_return_error_if_file_does_not_exist(self, mock_exists):
mock_exists.return_value = False
filepath = '/does/not/exist.json'
with pytest.raises(SystemExit, match=r"Missing connections file."):
connection_command.connections_import(self.parser.parse_args(["connections", "import", filepath]))
@parameterized.expand(
[
("sample.jso",),
("sample.yml",),
("sample.environ",),
]
)
@mock.patch('os.path.exists')
def test_cli_connections_import_should_return_error_if_file_format_is_invalid(
self, filepath, mock_exists
):
mock_exists.return_value = True
with pytest.raises(
AirflowException,
match=r"Unsupported file format. The file must have the extension .env or .json or .yaml",
):
connection_command.connections_import(self.parser.parse_args(["connections", "import", filepath]))
@mock.patch('airflow.secrets.local_filesystem._parse_secret_file')
@mock.patch('os.path.exists')
def test_cli_connections_import_should_load_connections(self, mock_exists, mock_parse_secret_file):
mock_exists.return_value = True
# Sample connections to import
expected_connections = {
"new0": {
"conn_type": "postgres",
"description": "new0 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": "test",
},
"new1": {
"conn_type": "mysql",
"description": "new1 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 3306,
"schema": "airflow",
"extra": "test",
},
}
# We're not testing the behavior of _parse_secret_file, assume it successfully reads JSON, YAML or env
mock_parse_secret_file.return_value = expected_connections
connection_command.connections_import(
self.parser.parse_args(["connections", "import", 'sample.json'])
)
# Verify that the imported connections match the expected, sample connections
with create_session() as session:
current_conns = session.query(Connection).all()
comparable_attrs = [
"conn_id",
"conn_type",
"description",
"host",
"login",
"password",
"port",
"schema",
"extra",
]
current_conns_as_dicts = {
current_conn.conn_id: {attr: getattr(current_conn, attr) for attr in comparable_attrs}
for current_conn in current_conns
}
assert expected_connections == current_conns_as_dicts
@provide_session
@mock.patch('airflow.secrets.local_filesystem._parse_secret_file')
@mock.patch('os.path.exists')
def test_cli_connections_import_should_not_overwrite_existing_connections(
self, mock_exists, mock_parse_secret_file, session=None
):
mock_exists.return_value = True
# Add a pre-existing connection "new3"
merge_conn(
Connection(
conn_id="new3",
conn_type="mysql",
description="original description",
host="mysql",
login="root",
password="password",
schema="airflow",
),
session=session,
)
# Sample connections to import, including a collision with "new3"
expected_connections = {
"new2": {
"conn_type": "postgres",
"description": "new2 description",
"host": "host",
"login": "airflow",
"password": "password",
"port": 5432,
"schema": "airflow",
"extra": "test",
},
"new3": {
"conn_type": "mysql",
"description": "updated description",
"host": "host",
"login": "airflow",
"password": "new password",
"port": 3306,
"schema": "airflow",
"extra": "test",
},
}
# We're not testing the behavior of _parse_secret_file, assume it successfully reads JSON, YAML or env
mock_parse_secret_file.return_value = expected_connections
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_import(
self.parser.parse_args(["connections", "import", 'sample.json'])
)
assert 'Could not import connection new3: connection already exists.' in stdout.getvalue()
# Verify that the imported connections match the expected, sample connections
current_conns = session.query(Connection).all()
comparable_attrs = [
"conn_id",
"conn_type",
"description",
"host",
"login",
"password",
"port",
"schema",
"extra",
]
current_conns_as_dicts = {
current_conn.conn_id: {attr: getattr(current_conn, attr) for attr in comparable_attrs}
for current_conn in current_conns
}
assert current_conns_as_dicts['new2'] == expected_connections['new2']
# The existing connection's description should not have changed
assert current_conns_as_dicts['new3']['description'] == 'original description'
| |
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import weakref
import IECore
import Gaffer
import GafferUI
## The Bookmarks class provides a registry of named locations for use
# in Path UIs. To allow different gaffer applications to coexist in the
# same process, separate bookmarks are maintained per application.
class Bookmarks( object ) :
## Use acquire() in preference to this constructor.
def __init__( self, applicationRoot, pathType, category ) :
self.__applicationRoot = weakref.ref( applicationRoot )
self.__pathType = pathType
self.__category = category
## Acquires a set of bookmarks for the specified target. Bookmarks are
# grouped according to the type of path they can be applied to and to an
# arbitrary category. The None category is special - bookmarks added to this
# category are available in all categories with the same path type.
#
# Bookmarks are stored on a per-application basis but target can be any
# of the following :
#
# - An instance of Gaffer.Application
# - An instance of Gaffer.ApplicationRoot
# - An instance of Gaffer.GraphComponent. In this case, the bookmarks
# of the ApplicationRoot ancestor of target are returned, with None
# being returned in the absence of such an ancestor.
# - An of instance of GafferUI.Widget. In this case, an instance of
# of EditorWidget or ScriptWindow will be sought, and the application
# determined using the attached script. This too may return None if
# no application can be found.
# - A tuple or list, containing potential targets in the above form. Each
# is tried in turn until bookmarks are found - this is useful when both
# a GraphComponent and a Widget are available, but it is not known that
# they each have a suitable ancestor for bookmark acquisition.
#
@classmethod
def acquire( cls, target, pathType=Gaffer.FileSystemPath, category=None ) :
if isinstance( target, ( tuple, list ) ) :
for t in target :
result = cls.acquire( t, pathType, category )
if result is not None :
return result
return None
if isinstance( target, Gaffer.Application ) :
applicationRoot = target.root()
elif isinstance( target, Gaffer.ApplicationRoot ) :
applicationRoot = target
elif isinstance( target, Gaffer.GraphComponent ) :
applicationRoot = target.ancestor( Gaffer.ApplicationRoot )
else :
assert( isinstance( target, GafferUI.Widget ) )
scriptWidget = None
if isinstance( target, ( GafferUI.EditorWidget, GafferUI.ScriptWindow ) ) :
scriptWidget = target
else :
scriptWidget = target.ancestor( GafferUI.EditorWidget )
if scriptWidget is None :
scriptWidget = target.ancestor( GafferUI.ScriptWindow )
if scriptWidget is None :
# needed to find bookmarks for floating op windows
# in the browser app. ideally we'd have a more general
# mechanism for finding scriptWidget in the closest
# descendant-of-an-ancestor.
window = target
while window is not None :
window = window.ancestor( GafferUI.Window )
if window is not None and isinstance( window.getChild(), GafferUI.EditorWidget ) :
scriptWidget = window.getChild()
break
if scriptWidget is not None :
applicationRoot = scriptWidget.scriptNode().ancestor( Gaffer.ApplicationRoot )
else :
applicationRoot = None
if applicationRoot is None :
return None
return Bookmarks( applicationRoot, pathType, category )
## Adds a bookmark. If persistent is True, then the bookmark
# will be saved in the application preferences and restored
# when the application next runs. The path passed may either
# be a string or a callable which takes the optional forWidget
# argument passed to get() and returns a string - this latter
# option makes it possible to define context sensitive bookmarks.
def add( self, name, path, persistent=False ) :
assert( isinstance( path, basestring ) or ( callable( path ) and not persistent ) )
# backwards compatibility with old mechanism for storing recents -
# convert to new form.
if name.startswith( "Recent/" ) :
self.addRecent( path )
return
s = self.__storage( self.__category )
try :
# find existing bookmark
b = next( x for x in s if x.name == name )
except StopIteration :
# add new one if none exists
b = IECore.Struct()
s.append( b )
# update bookmark
b.name = name
b.path = path
b.persistent = persistent
if persistent :
self.__save()
## Removes a bookmark previously stored with add().
def remove( self, name ) :
for s in [
self.__storage( self.__category ),
self.__storage( None )
] :
for i, b in enumerate( s ) :
if b.name == name :
del s[i]
if b.persistent :
self.__save()
return
raise KeyError( name )
## Returns a list of the names of currently defined bookmarks.
def names( self, persistent=None ) :
u = set()
result = []
for s in [
self.__storage( None ),
self.__storage( self.__category ),
] :
for b in s :
if persistent is not None and b.persistent != persistent :
continue
if b.name.startswith( "__" ) :
continue
if b.name not in u :
result.append( b.name )
u.add( b.name )
return result
## Returns the named bookmark as a string. The optional
# forWidget argument may be specified to provide a context
# in which dynamic (callable) bookmarks may compute their
# result.
def get( self, name, forWidget=None ) :
for s in [
self.__storage( self.__category ),
self.__storage( None ),
] :
for b in s :
if b.name == name :
if callable( b.path ) :
return b.path( forWidget )
else :
return b.path
raise KeyError( name )
## Adds a recently visited location to the bookmarks.
# Recent locations are always persistent, and are recycled
# so only the latest few are available. Recent bookmarks are
# not returned by names() or get(), but are instead accessed
# with recents().
def addRecent( self, path ) :
assert( isinstance( path, basestring ) )
name = "__recent:" + path
# first remove any recent items that match this one,
# and remove old items to make room for the new one
# if necessary. we only do this for the category storage
# because we don't want to flush recents from the general
# storage if a particular category is used heavily.
names = [ x.name for x in self.__storage( self.__category ) if x.name.startswith( "__recent:" ) ]
if name in names :
self.remove( name )
names.remove( name )
while len( names ) > 5 :
self.remove( names[0] )
del names[0]
# now add on the new bookmark
self.add( name, path, persistent=True )
## Removes a recently visited location from the bookmarks.
def removeRecent( self, path ) :
self.remove( "__recent:" + path )
## Returns a list of strings specifying the location of
# the bookmarks added with addRecent().
def recents( self ) :
u = set()
result = []
for s in [
self.__storage( None ),
self.__storage( self.__category ),
] :
for b in s :
if b.name.startswith( "__recent:" ) :
if b.path not in u :
result.append( b.path )
u.add( b.path )
return result
## Sets a default location which can be used when no
# information has been provided as to where to start
# browsing. Default locations are not persistent.
def setDefault( self, path ) :
self.add( "__default", path )
def getDefault( self, forWidget=None ) :
try :
return self.get( "__default", forWidget )
except KeyError :
return "/"
def __storage( self, category ) :
a = self.__applicationRoot()
try :
b = a.__bookmarks
except :
a.__bookmarks = {}
b = a.__bookmarks
return b.setdefault( ( self.__pathType, category ), [] )
def __save( self ) :
f = open( os.path.join( self.__applicationRoot().preferencesLocation(), "bookmarks.py" ), "w" )
f.write( "# This file was automatically generated by Gaffer.\n" )
f.write( "# Do not edit this file - it will be overwritten.\n\n" )
f.write( "import Gaffer\n" )
f.write( "import GafferUI\n" )
f.write( "\n" )
for key, value in self.__applicationRoot().__bookmarks.items() :
acquired = False
for b in value :
if not b.persistent :
continue
if not acquired :
f.write(
"bookmarks = GafferUI.Bookmarks.acquire( application, %s, %s )\n" %
( Gaffer.Serialisation.classPath( key[0] ), repr( key[1] ) )
)
acquired = True
if b.name.startswith( "__recent:" ) :
f.write( "bookmarks.addRecent( %s )\n" % repr( b.path ) )
else :
f.write( "bookmarks.add( %s, %s, persistent=True )\n" % ( repr( b.name ), repr( b.path ) ) )
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift.common.utils import search_tree, remove_file, write_file
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print _("WARNING: Unable to increase file descriptor limit. "
"Running as non-root?")
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yeilding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
class UnknownCommandError(Exception):
pass
class Manager():
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
server_names = set()
for server in servers:
if server == 'all':
server_names.update(ALL_SERVERS)
elif server == 'main':
server_names.update(MAIN_SERVERS)
elif server == 'rest':
server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
server_names.update([s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
server_names.add(server)
self.servers = set()
for name in server_names:
self.servers.add(Server(name, run_dir))
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print _('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print _('No %s running') % server
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print _('Waited %s seconds for %s to die; giving up') % (
kill_wait, server)
return 1
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.servers:
m = Manager([server.server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server():
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
if '-' not in server:
server = '%s-server' % server
self.server = server.lower()
self.type = server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).rsplit(
'.conf', 1)[0] + '.pid'
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1)\
.rsplit('.pid', 1)[0] + '.conf'
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).rsplit(
'.pid', 1)[0] + '.conf'
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
found_conf_files = search_tree(SWIFT_DIR, self.server + '*',
'.conf')
else:
found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type,
'.conf')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
print _('Unable to locate config %sfor %s') % (
('number %s ' % number if number else ''), self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print _('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print ' %d) %s' % (i + 1, conf_file)
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
pid_files = search_tree(self.run_dir, '%s*' % self.server, '.pid')
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
yield pid_file, int(open(pid_file).read().strip())
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
try:
if sig != signal.SIG_DFL:
print _('Signal %s pid: %s signal: %s') % (self.server,
pid, sig)
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print _("Removing stale pid file %s") % pid_file
remove_file(pid_file)
elif e.errno == errno.EPERM:
print _("No permission to signal PID %d") % pid
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print _("%s #%d not running (%s)") % (self.server, number,
conf_files[0])
else:
print _("No %s running") % self.server
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if true ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if output:
print output
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return []
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
elif not kwargs.get('number', 0):
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, pid_file)
if already_started:
print _("%s already started...") % self.server
return []
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print '%s...(%s)' % (msg, conf_file)
try:
pid = self.spawn(conf_file, **kwargs)
except OSError, e:
if e.errno == errno.ENOENT:
# TODO: should I check if self.cmd exists earlier?
print _("%s does not exist") % self.cmd
break
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import string
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from glanceclient import exc as glance_exceptions
from horizon.utils import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import glance
from openstack_dashboard.api import neutron
from os_cloud_config import keystone_pki
from tuskarclient import client as tuskar_client
from tuskar_ui.api import flavor
from tuskar_ui.cached_property import cached_property # noqa
from tuskar_ui.handle_errors import handle_errors # noqa
LOG = logging.getLogger(__name__)
MASTER_TEMPLATE_NAME = 'plan.yaml'
ENVIRONMENT_NAME = 'environment.yaml'
TUSKAR_SERVICE = 'management'
SSL_HIDDEN_PARAMS = ('SSLCertificate', 'SSLKey')
KEYSTONE_CERTIFICATE_PARAMS = (
'KeystoneSigningCertificate', 'KeystoneCACertificate',
'KeystoneSigningKey')
def tuskarclient(request, password=None):
api_version = "2"
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
ca_file = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
endpoint = base.url_for(request, TUSKAR_SERVICE)
LOG.debug('tuskarclient connection created using token "%s" and url "%s"' %
(request.user.token.id, endpoint))
client = tuskar_client.get_client(api_version,
tuskar_url=endpoint,
insecure=insecure,
ca_file=ca_file,
username=request.user.username,
password=password,
os_auth_token=request.user.token.id)
return client
def password_generator(size=40, chars=(string.ascii_uppercase +
string.ascii_lowercase +
string.digits)):
return ''.join(random.choice(chars) for _ in range(size))
def strip_prefix(parameter_name):
return parameter_name.split('::', 1)[-1]
def _is_blank(parameter):
return not parameter['value'] or parameter['value'] == 'unset'
def _should_generate_password(parameter):
# TODO(lsmola) Filter out SSL params for now. Once it will be generated
# in TripleO add it here too. Note: this will also affect how endpoints are
# created
key = parameter['name']
return all([
parameter['hidden'],
_is_blank(parameter),
strip_prefix(key) not in SSL_HIDDEN_PARAMS,
strip_prefix(key) not in KEYSTONE_CERTIFICATE_PARAMS,
key != 'SnmpdReadonlyUserPassword',
])
def _should_generate_keystone_cert(parameter):
return all([
strip_prefix(parameter['name']) in KEYSTONE_CERTIFICATE_PARAMS,
_is_blank(parameter),
])
def _should_generate_neutron_control_plane(parameter):
return all([
strip_prefix(parameter['name']) == 'NeutronControlPlaneID',
_is_blank(parameter),
])
class Plan(base.APIResourceWrapper):
_attrs = ('uuid', 'name', 'description', 'created_at', 'modified_at',
'roles', 'parameters')
def __init__(self, apiresource, request=None):
super(Plan, self).__init__(apiresource)
self._request = request
@classmethod
def create(cls, request, name, description):
"""Create a Plan in Tuskar
:param request: request object
:type request: django.http.HttpRequest
:param name: plan name
:type name: string
:param description: plan description
:type description: string
:return: the created Plan object
:rtype: tuskar_ui.api.tuskar.Plan
"""
plan = tuskarclient(request).plans.create(name=name,
description=description)
return cls(plan, request=request)
@classmethod
def patch(cls, request, plan_id, parameters):
"""Update a Plan in Tuskar
:param request: request object
:type request: django.http.HttpRequest
:param plan_id: id of the plan we want to update
:type plan_id: string
:param parameters: new values for the plan's parameters
:type parameters: dict
:return: the updated Plan object
:rtype: tuskar_ui.api.tuskar.Plan
"""
parameter_list = [{
'name': unicode(name),
'value': unicode(value),
} for (name, value) in parameters.items()]
plan = tuskarclient(request).plans.patch(plan_id, parameter_list)
return cls(plan, request=request)
@classmethod
def list(cls, request):
"""Return a list of Plans in Tuskar
:param request: request object
:type request: django.http.HttpRequest
:return: list of Plans, or an empty list if there are none
:rtype: list of tuskar_ui.api.tuskar.Plan
"""
plans = tuskarclient(request).plans.list()
return [cls(plan, request=request) for plan in plans]
@classmethod
@handle_errors(_("Unable to retrieve plan"))
def get(cls, request, plan_id):
"""Return the Plan that matches the ID
:param request: request object
:type request: django.http.HttpRequest
:param plan_id: id of Plan to be retrieved
:type plan_id: int
:return: matching Plan, or None if no Plan matches
the ID
:rtype: tuskar_ui.api.tuskar.Plan
"""
plan = tuskarclient(request).plans.get(plan_uuid=plan_id)
return cls(plan, request=request)
# TODO(lsmola) before will will support multiple overclouds, we
# can work only with overcloud that is named overcloud. Delete
# this once we have more overclouds. Till then, this is the overcloud
# that rules them all.
# This is how API supports it now, so we have to have it this way.
# Also till Overcloud workflow is done properly, we have to work
# with situations that overcloud is deleted, but stack is still
# there. So overcloud will pretend to exist when stack exist.
@classmethod
def get_the_plan(cls, request):
plan_list = cls.list(request)
for plan in plan_list:
return plan
# if plan doesn't exist, create it
plan = cls.create(request, 'overcloud', 'overcloud')
return plan
@classmethod
def delete(cls, request, plan_id):
"""Delete a Plan
:param request: request object
:type request: django.http.HttpRequest
:param plan_id: plan id
:type plan_id: int
"""
tuskarclient(request).plans.delete(plan_uuid=plan_id)
@cached_property
def role_list(self):
return [Role.get(self._request, role.uuid)
for role in self.roles]
@cached_property
def _roles_by_name(self):
return dict((role.name, role) for role in self.role_list)
def get_role_by_name(self, role_name):
"""Get the role with the given name."""
return self._roles_by_name[role_name]
def get_role_node_count(self, role):
"""Get the node count for the given role."""
return int(self.parameter_value(role.node_count_parameter_name,
0) or 0)
@cached_property
def templates(self):
return tuskarclient(self._request).plans.templates(self.uuid)
@cached_property
def master_template(self):
return self.templates.get(MASTER_TEMPLATE_NAME, '')
@cached_property
def environment(self):
return self.templates.get(ENVIRONMENT_NAME, '')
@cached_property
def provider_resource_templates(self):
template_dict = dict(self.templates)
del template_dict[MASTER_TEMPLATE_NAME]
del template_dict[ENVIRONMENT_NAME]
return template_dict
def parameter_list(self, include_key_parameters=True):
params = self.parameters
if not include_key_parameters:
key_params = []
for role in self.role_list:
key_params.extend([role.node_count_parameter_name,
role.image_id_parameter_name,
role.flavor_parameter_name])
params = [p for p in params if p['name'] not in key_params]
return [Parameter(p, plan=self) for p in params]
def parameter(self, param_name):
for parameter in self.parameters:
if parameter['name'] == param_name:
return Parameter(parameter, plan=self)
def parameter_value(self, param_name, default=None):
parameter = self.parameter(param_name)
if parameter is not None:
return parameter.value
return default
def list_generated_parameters(self, with_prefix=True):
if with_prefix:
key_format = lambda key: key
else:
key_format = strip_prefix
# Get all password like parameters
return dict(
(key_format(parameter['name']), parameter)
for parameter in self.parameter_list()
if any([
_should_generate_password(parameter),
_should_generate_keystone_cert(parameter),
_should_generate_neutron_control_plane(parameter),
])
)
def _make_keystone_certificates(self, wanted_generated_params):
generated_params = {}
for cert_param in KEYSTONE_CERTIFICATE_PARAMS:
if cert_param in wanted_generated_params.keys():
# If one of the keystone certificates is not set, we have
# to generate all of them.
generate_certificates = True
break
else:
generate_certificates = False
# Generate keystone certificates
if generate_certificates:
ca_key_pem, ca_cert_pem = keystone_pki.create_ca_pair()
signing_key_pem, signing_cert_pem = (
keystone_pki.create_signing_pair(ca_key_pem, ca_cert_pem))
generated_params['KeystoneSigningCertificate'] = (
signing_cert_pem)
generated_params['KeystoneCACertificate'] = ca_cert_pem
generated_params['KeystoneSigningKey'] = signing_key_pem
return generated_params
def make_generated_parameters(self):
wanted_generated_params = self.list_generated_parameters(
with_prefix=False)
# Generate keystone certificates
generated_params = self._make_keystone_certificates(
wanted_generated_params)
# Generate passwords and control plane id
for (key, param) in wanted_generated_params.items():
if _should_generate_password(param):
generated_params[key] = password_generator()
elif _should_generate_neutron_control_plane(param):
generated_params[key] = neutron.network_list(
self._request, name='ctlplane')[0].id
# Fill all the Tuskar parameters with generated content. There are
# parameters that has just different prefix, such parameters should
# have the same values.
wanted_prefixed_params = self.list_generated_parameters(
with_prefix=True)
tuskar_params = {}
for (key, param) in wanted_prefixed_params.items():
tuskar_params[key] = generated_params[strip_prefix(key)]
return tuskar_params
@property
def id(self):
return self.uuid
class Role(base.APIResourceWrapper):
_attrs = ('uuid', 'name', 'version', 'description', 'created')
def __init__(self, apiresource, request=None):
super(Role, self).__init__(apiresource)
self._request = request
@classmethod
@handle_errors(_("Unable to retrieve overcloud roles"), [])
def list(cls, request):
"""Return a list of Overcloud Roles in Tuskar
:param request: request object
:type request: django.http.HttpRequest
:return: list of Overcloud Roles, or an empty list if there
are none
:rtype: list of tuskar_ui.api.tuskar.Role
"""
roles = tuskarclient(request).roles.list()
return [cls(role, request=request) for role in roles]
@classmethod
@handle_errors(_("Unable to retrieve overcloud role"))
def get(cls, request, role_id):
"""Return the Tuskar Role that matches the ID
:param request: request object
:type request: django.http.HttpRequest
:param role_id: ID of Role to be retrieved
:type role_id: int
:return: matching Role, or None if no matching
Role can be found
:rtype: tuskar_ui.api.tuskar.Role
"""
for role in Role.list(request):
if role.uuid == role_id:
return role
@classmethod
@memoized.memoized
def _roles_by_image_id(cls, request, plan):
return {plan.parameter_value(role.image_id_parameter_name): role
for role in Role.list(request)}
@classmethod
@handle_errors(_("Unable to retrieve overcloud role"))
def get_by_image(cls, request, plan, image):
"""Return the Role whose ImageID parameter matches the image.
:param request: request object
:type request: django.http.HttpRequest
:param plan: associated plan to check against
:type plan: Plan
:param image: image to be matched
:type image: Image
:return: matching Role, or None if no matching
Role can be found
:rtype: tuskar_ui.api.tuskar.Role
"""
roles = cls._roles_by_image_id(request, plan)
try:
return roles[image.id]
except KeyError:
return None
@classmethod
@memoized.memoized
def _roles_by_resource_type(cls, request):
return {role.provider_resource_type: role
for role in Role.list(request)}
@classmethod
@handle_errors(_("Unable to retrieve overcloud role"))
def get_by_resource_type(cls, request, resource_type):
roles = cls._roles_by_resource_type(request)
try:
return roles[resource_type]
except KeyError:
return None
@property
def provider_resource_type(self):
return "Tuskar::{0}-{1}".format(self.name, self.version)
@property
def parameter_prefix(self):
return "{0}-{1}::".format(self.name, self.version)
@property
def node_count_parameter_name(self):
return self.parameter_prefix + 'count'
@property
def image_id_parameter_name(self):
return self.parameter_prefix + 'Image'
@property
def flavor_parameter_name(self):
return self.parameter_prefix + 'Flavor'
def image(self, plan):
image_id = plan.parameter_value(self.image_id_parameter_name)
if image_id:
try:
return glance.image_get(self._request, image_id)
except glance_exceptions.HTTPNotFound:
LOG.error("Couldn't obtain image with id %s" % image_id)
return None
def flavor(self, plan):
flavor_name = plan.parameter_value(
self.flavor_parameter_name)
if flavor_name:
return flavor.Flavor.get_by_name(self._request, flavor_name)
def parameter_list(self, plan):
return [p for p in plan.parameter_list() if self == p.role]
def is_valid_for_deployment(self, plan):
node_count = plan.get_role_node_count(self)
pending_required_params = list(Parameter.pending_parameters(
Parameter.required_parameters(self.parameter_list(plan))))
return not (
self.image(plan) is None or
(node_count and self.flavor(plan) is None) or
pending_required_params
)
@property
def id(self):
return self.uuid
class Parameter(base.APIDictWrapper):
_attrs = ['name', 'value', 'default', 'description', 'hidden', 'label',
'parameter_type', 'constraints']
def __init__(self, apidict, plan=None):
super(Parameter, self).__init__(apidict)
self._plan = plan
@property
def stripped_name(self):
return strip_prefix(self.name)
@property
def plan(self):
return self._plan
@property
def role(self):
if self.plan:
for role in self.plan.role_list:
if self.name.startswith(role.parameter_prefix):
return role
def is_required(self):
"""Boolean: True if parameter is required, False otherwise."""
return self.default is None
def get_constraint_by_type(self, constraint_type):
"""Returns parameter constraint by it's type.
For available constraint types see HOT Spec:
http://docs.openstack.org/developer/heat/template_guide/hot_spec.html
"""
constraints_of_type = [c for c in self.constraints
if c['constraint_type'] == constraint_type]
if constraints_of_type:
return constraints_of_type[0]
else:
return None
@staticmethod
def required_parameters(parameters):
"""Yields parameters which are required."""
for parameter in parameters:
if parameter.is_required():
yield parameter
@staticmethod
def pending_parameters(parameters):
"""Yields parameters which don't have value set."""
for parameter in parameters:
if not parameter.value:
yield parameter
@staticmethod
def global_parameters(parameters):
"""Yields parameters with name without role prefix."""
for parameter in parameters:
if '::' not in parameter.name:
yield parameter
| |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import logging
import platform
import subprocess
os.environ["PYTHONUNBUFFERED"] = "y"
PY2 = sys.version_info[0] == 2
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, subprocess_text_output, OKBLUE, ENDC, WARNING
from scripts.lib.setup_venv import setup_virtualenv, VENV_DEPENDENCIES
from scripts.lib.node_cache import setup_node_modules
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
"xenial",
],
}
PY2_VENV_PATH = "/srv/zulip-venv"
PY3_VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
LINECOVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'linecoverage-report')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
if PY2:
VENV_PATH = PY2_VENV_PATH
else:
VENV_PATH = PY3_VENV_PATH
TRAVIS = "--travis" in sys.argv
PRODUCTION_TRAVIS = "--production-travis" in sys.argv
if not os.path.exists(os.path.join(ZULIP_PATH, ".git")):
print("Error: No Zulip git repository present!")
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
if platform.architecture()[0] == '64bit':
arch = 'amd64'
elif platform.architecture()[0] == '32bit':
arch = "i386"
else:
logging.critical("Only x86 is supported; ping zulip-devel@googlegroups.com if you want another architecture.")
sys.exit(1)
# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
subprocess.check_call(["sudo", "apt-get", "update"])
subprocess.check_call(["sudo", "apt-get", "install", "-y", "lsb-release"])
vendor = subprocess_text_output(["lsb_release", "-is"])
codename = subprocess_text_output(["lsb_release", "-cs"])
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
logging.critical("Unsupported platform: {} {}".format(vendor, codename))
sys.exit(1)
POSTGRES_VERSION_MAP = {
"trusty": "9.3",
"xenial": "9.5",
}
POSTGRES_VERSION = POSTGRES_VERSION_MAP[codename]
UBUNTU_COMMON_APT_DEPENDENCIES = [
"closure-compiler",
"memcached",
"rabbitmq-server",
"redis-server",
"hunspell-en-us",
"supervisor",
"git",
"libssl-dev",
"yui-compressor",
"wget",
"ca-certificates", # Explicit dependency in case e.g. wget is already installed
"puppet", # Used by lint-all
"gettext", # Used by makemessages i18n
"curl", # Used for fetching PhantomJS as wget occasionally fails on redirects
"netcat", # Used for flushing memcached
] + VENV_DEPENDENCIES
APT_DEPENDENCIES = {
"trusty": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.3",
"postgresql-9.3-tsearch-extras",
"postgresql-9.3-pgroonga",
],
"xenial": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.5",
"postgresql-9.5-tsearch-extras",
"postgresql-9.5-pgroonga",
],
}
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/%s/tsearch_data/" % (POSTGRES_VERSION,)
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
def main():
# type: () -> int
# npm install and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)
run(["sudo", "./scripts/lib/setup-apt-repo"])
run(["sudo", "apt-get", "update"])
run(["sudo", "apt-get", "-y", "install", "--no-install-recommends"] + APT_DEPENDENCIES[codename])
if TRAVIS:
if PY2:
MYPY_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "mypy.txt")
setup_virtualenv(PY3_VENV_PATH, MYPY_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py2_dev.txt")
setup_virtualenv(PY2_VENV_PATH, DEV_REQS_FILE, patch_activate_script=True)
else:
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py3_dev.txt")
setup_virtualenv(VENV_PATH, DEV_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
else:
# Import tools/setup_venv.py instead of running it so that we get an
# activated virtualenv for the rest of the provisioning process.
from tools.setup import setup_venvs
setup_venvs.main()
# Put Python2 virtualenv activation in our .bash_profile.
with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
bash_profile.writelines([
"source .bashrc\n",
"source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
])
run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])
# create log directory `zulip/var/log`
run(["mkdir", "-p", LOG_DIR_PATH])
# create upload directory `var/uploads`
run(["mkdir", "-p", UPLOAD_DIR_PATH])
# create test upload directory `var/test_upload`
run(["mkdir", "-p", TEST_UPLOAD_DIR_PATH])
# create coverage directory`var/coverage`
run(["mkdir", "-p", COVERAGE_DIR_PATH])
# create linecoverage directory`var/linecoverage-report`
run(["mkdir", "-p", LINECOVERAGE_DIR_PATH])
# create linecoverage directory`var/node-coverage`
run(["mkdir", "-p", NODE_TEST_COVERAGE_DIR_PATH])
run(["tools/setup/download-zxcvbn"])
run(["tools/setup/emoji_dump/build_emoji"])
run(["scripts/setup/generate_secrets.py", "--development"])
if TRAVIS and not PRODUCTION_TRAVIS:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
elif "--docker" in sys.argv:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
run(["sudo", "pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
if not PRODUCTION_TRAVIS:
# These won't be used anyway
run(["scripts/setup/configure-rabbitmq"])
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
run(["python", "./manage.py", "compilemessages"])
# Here we install nvm, node, and npm.
run(["sudo", "tools/setup/install-node"])
# This is a wrapper around `npm install`, which we run last since
# it can often fail due to network issues beyond our control.
try:
# Hack: We remove `node_modules` as root to work around an
# issue with the symlinks being improperly owned by root.
if os.path.islink("node_modules"):
run(["sudo", "rm", "-f", "node_modules"])
setup_node_modules()
except subprocess.CalledProcessError:
print(WARNING + "`npm install` failed; retrying..." + ENDC)
setup_node_modules()
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self,
root,
model,
api_defs,
cpp_type_generator,
cpp_namespace,
source_file_dir,
impl_dir):
self._root = root
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._source_file_dir = source_file_dir
self._impl_dir = impl_dir
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % self._source_file_dir)
ifndef_name = cpp_util.GenerateIfndefName(self._source_file_dir, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
elif platform == Platforms.LINUX:
ifdefs.append('defined(OS_LINUX)')
elif platform == Platforms.MAC:
ifdefs.append('defined(OS_MACOSX)')
elif platform == Platforms.WIN:
ifdefs.append('defined(OS_WIN)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' || '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s" % namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"%s/%s/%s_api.h" % (self._bundle._impl_dir,
namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"extensions/browser/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Append('// Determines if schema named |name| is generated.')
c.Append('static bool IsGenerated(std::string name);')
c.Append()
c.Append('// Gets the API schema named |name|.')
c.Append('static base::StringPiece Get(const std::string& name);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_schemas', c)
def _FormatNameAsConstant(name):
"""Formats a name to be a C++ constant of the form kConstantName"""
name = '%s%s' % (name[0].upper(), name[1:])
return 'k%s' % re.sub('_[a-z]',
lambda m: m.group(0)[1].upper(),
name.replace('.', '_'))
class _SchemasCCGenerator(object):
"""Generates a code.Code object for the generated schemas .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_schemas.h')))
c.Append()
c.Append('#include "base/lazy_instance.h"')
c.Append()
c.Append('namespace {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([_RemoveDescriptions(api)],
separators=(',', ':'))
# Escape all double-quotes and backslashes. For this to output a valid
# JSON C string, we need to escape \ and ".
json_content = json_content.replace('\\', '\\\\').replace('"', '\\"')
c.Append('const char %s[] = "%s";' %
(_FormatNameAsConstant(namespace.name), json_content))
c.Append('}')
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Sblock('struct Static {')
c.Sblock('Static() {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
c.Append('schemas["%s"] = %s;' % (namespace.name,
_FormatNameAsConstant(namespace.name)))
c.Eblock('}')
c.Append()
c.Append('std::map<std::string, const char*> schemas;')
c.Eblock('};')
c.Append()
c.Append('base::LazyInstance<Static> g_lazy_instance;')
c.Append()
c.Append('// static')
c.Sblock('base::StringPiece GeneratedSchemas::Get('
'const std::string& name) {')
c.Append('return IsGenerated(name) ? '
'g_lazy_instance.Get().schemas[name] : "";')
c.Eblock('}')
c.Append()
c.Append('// static')
c.Sblock('bool GeneratedSchemas::IsGenerated(std::string name) {')
c.Append('return g_lazy_instance.Get().schemas.count(name) > 0;')
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
| |
"""The tests for the Async Media player helper functions."""
import asyncio
import unittest
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerDevice):
"""Async media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
@asyncio.coroutine
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
@asyncio.coroutine
def async_media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
@asyncio.coroutine
def async_media_pause(self):
"""Send pause command."""
self._state = STATE_PAUSED
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._state = STATE_ON
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerDevice):
"""Sync media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_VOLUME_STEP
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + 0.2))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - 0.2))
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
@asyncio.coroutine
def async_media_play_pause(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_media_play_pause()
@asyncio.coroutine
def async_toggle(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
| |
__author__ = 'sei'
import socket
import time
import sys
import multiprocessing
class Controller(object):
def __init__(self, ip=None, port=None, coordinate_mapping = None, z_correction_angle = None):
self.is_initialized = False
received = None
self._lock = multiprocessing.Lock()
self._x = multiprocessing.Value('d', 0.)
self._y = multiprocessing.Value('d', 0.)
self._z = multiprocessing.Value('d', 0.)
self._ID = None
self._sock = None
self._buffer_size = 1024
# examples for coordinate_mapping:
# coordinate_mapping = {"x":"x","y":"y","z":"z"} will map coordinates to themselves
# coordinate_mapping = {"x":"z","y":"y","z":"x"} will map x to z and z to x, y stays the same
self._coord_map = coordinate_mapping
# z_correction_angle will move the stage also in x direction if moved in z direction
# this compensates movement in x direction when stage is placed at an angle relativ to the objective
if z_correction_angle is None:
self.z_correction_angle = 0
else:
self.z_correction_angle = z_correction_angle
if ip is None:
self._ip, self._port, self._ID = self._findcontroller()
else:
self._ip = ip
self._port = port
print('Trying to connect to Controller...')
self._lock.acquire()
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._ip, self._port))
self._sock.send('POS?\n'.encode('UTF-8'))
received = self._sock.recv(self._buffer_size)
print(received)
except:
self._sock.close()
RuntimeError('Could not connect to Controller')
self._lock.release()
if not received is None:
print('Successfully connected to Controller')
self.is_initialized = True
else:
print('Could not connect to Controller')
def __del__(self):
if not self._sock is None:
if not self._sock._closed:
self._lock.acquire()
self._sock.close()
self._lock.release()
def _findcontroller(self):
def recv_timeout(the_socket, timeout=2):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
addr = None
# beginning time
begin = time.time()
while 1:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
#if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
#recv something
try:
data, addr = the_socket.recvfrom(8192)
if data:
total_data.append(data)
#change the beginning time for measurement
begin = time.time()
else:
#sleep for sometime to indicate a gap
time.sleep(0.01)
except:
pass
if addr is not None:
print(addr)
# join all parts to make final string
return addr, str(total_data[0],"UTF-8")
message = bytes('PI', 'UTF-8')
multicast_group = (bytes('<broadcast>', 'UTF-8'), 50000)
self._lock.acquire()
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
sock.settimeout(2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
# Send data to the multicast group
print('Searching for Controller')
sock.sendto(message, multicast_group)
except socket.error:
# Send failed
print('Send failed')
sys.exit()
# get reply and print
addr, data = recv_timeout(sock)
print('found Controller' + data + ' at ' + addr[0])
# Close the socket
sock.close()
self._lock.release()
return addr[0], addr[1], data
def map_coordinates(self,x,y,z):
if self._coord_map is not None:
if self._coord_map["x"] == "x":
nx = x
elif self._coord_map["x"] == "y":
nx = y
elif self._coord_map["x"] == "z":
nx = z
if self._coord_map["y"] == "x":
ny = x
elif self._coord_map["y"] == "y":
ny = y
elif self._coord_map["y"] == "z":
ny = z
if self._coord_map["z"] == "x":
nz = x
elif self._coord_map["z"] == "y":
nz = y
elif self._coord_map["z"] == "z":
nz = z
return nx, ny, nz
else:
return x,y,z
def query_pos(self):
pass
def moveabs(self, x=None, y=None, z=None):
pass
def moverel(self, dx=None, dy=None, dz=None):
pass
def home(self):
pass
def last_pos(self):
if self._coord_map is not None:
if self._coord_map["x"] == "x":
nx = self._x.value
elif self._coord_map["x"] == "y":
nx = self._y.value
elif self._coord_map["x"] == "z":
nx = self._z.value
if self._coord_map["y"] == "x":
ny = self._x.value
elif self._coord_map["y"] == "y":
ny = self._y.value
elif self._coord_map["y"] == "z":
ny = self._z.value
if self._coord_map["z"] == "x":
nz = self._x.value
elif self._coord_map["z"] == "y":
nz = self._y.value
elif self._coord_map["z"] == "z":
nz = self._z.value
return nx, ny, nz
else:
return self._x.value, self._y.value, self._z.value
#return self._x.value, self._y.value, self._z.value
def set_z_correction_angle(self,angle):
self.z_correction_angle = angle
| |
#!/usr/bin/env python
import os
import re
import sys
import json
import logging
import itertools
from optparse import OptionParser
__doc__ = """
Script to create LCA for contigs from seperate Protein and rRNA (feature) LCAs.
Features mapped to contigs based on ID prefix:
feature ID = PREFIX_start_stop_stand
contig ID = PREFIX
LCA file format, TSV, same for inputs and outputs:
md5 list, feature id, identity list, length list, evalue list, lca string, depth of lca (1-8)"""
# logging
LOG_FORMAT = '[%(asctime)-15s] [%(levelname)-5s] %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
logger = logging.getLogger()
# regex
id_re = re.compile(r"^(\S+?)_\d+_\d+_[-|+]$") # .group(1) == contig name
def process_line(line):
parts = line.strip().split('\t')
if len(parts) < 7:
return "", None
(md5, frag, ident, length, e_val, lca, lvl) = parts[:7]
if not (frag and md5 and lca):
return "", None
id_match = id_re.match(frag)
if not id_match:
return "", None
return id_match.group(1), [ md5.split(';'), frag, ident.split(';'), length.split(';'), e_val.split(';'), lca.split(';'), lvl ]
def merge_rows(cid, rows, scgs=set()):
md5, ident, length, e_val = (set() for i in range(4))
lca = ["-"] * 8
lvl = 0
lca_scg = [] # [ [length, lca] ]
lcamatrix = [] # [ lcas ]
for row in rows:
# get scg lcas if any
if scgs and any(m in scgs for m in row[0]):
lca_scg.append([max(row[3]), row[5]])
md5.update(row[0])
ident.update(row[2])
length.update(row[3])
e_val.update(row[4])
if len(lca_scg) == 0:
# no SCGs, use all LCAs
lcamatrix = [row[5] for row in rows]
else:
# get LCA for best hit SCG
maxlen = max([x[0] for x in lca_scg])
lcamatrix = [x[1] for x in filter(lambda y: y[0] == maxlen, lca_scg)]
# find LCA of LCAs
lcarotate = zip(*lcamatrix)
for i, x in enumerate(lcarotate):
if all_equal(x):
lca[i] = x[0]
lvl = i + 1
else:
break
return [ list(md5), cid, list(ident), list(length), list(e_val), lca, lvl ]
def all_equal(iterable):
"Returns True if all the elements are equal to each other"
g = itertools.groupby(iterable)
return next(g, True) and not next(g, False)
def print_row(hdl, row):
for i, r in enumerate(row):
if isinstance(r, list):
row[i] = ";".join(r)
if isinstance(r, int):
row[i] = str(r)
hdl.write("\t".join(row)+"\n")
def load_scgs(fname):
md5s = set()
try:
data = json.load(open(fname, 'r'))
md5s = set(data.keys())
except:
pass
return md5s
usage = "usage: %prog [options]\n" + __doc__
def main(args):
parser = OptionParser(usage=usage)
parser.add_option("--rna", dest="rna", default=None, help="input file: expanded rna lca")
parser.add_option("--prot", dest="prot", default=None, help="input file: expanded protein lca")
parser.add_option("--scg", dest="scg", default=None, help="input file: json format map of md5s that are SCGs")
parser.add_option("-o", "--output", dest="output", default=None, help="output file: expanded contig lca")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages.")
(opts, args) = parser.parse_args()
if not (opts.rna and os.path.isfile(opts.rna)):
logger.error("missing required input rna lca file")
return 1
if not (opts.prot and os.path.isfile(opts.prot)):
logger.error("missing required input protein lca file")
return 1
if not opts.output:
logger.error("missing required output file")
return 1
rna_contigs = set()
ohdl = open(opts.output, 'w')
rhdl = open(opts.rna, 'rU')
# create contig LCAs for RNA features first, take precidence over protein features
if opts.verbose:
print "Reading file %s ... "%(opts.rna)
# get first parsable line
prev = ""
row = []
data = []
while prev == "":
firstline = rhdl.readline()
prev, row = process_line(firstline)
data.append(row)
rctg = 0
rrna = 1
# process remaining lines
for line in rhdl:
cid, row = process_line(line)
if cid == "":
continue
if cid != prev:
# new contig found, process old
mrow = merge_rows(prev, data)
rna_contigs.add(prev)
print_row(ohdl, mrow)
# reset
prev = cid
data = []
rctg += 1
data.append(row)
rrna += 1
# process last contig
if len(data) > 0:
mrow = merge_rows(prev, data)
rna_contigs.add(prev)
print_row(ohdl, mrow)
rctg += 1
rhdl.close()
if opts.verbose:
print "Done: %d contigs with %d rRNAs processed"%(rctg, rrna)
md5_scgs = load_scgs(opts.scg)
phdl = open(opts.prot, 'rU')
if opts.verbose:
"Reading file %s ... "%(opts.prot)
# get first parsable line
prev = ""
row = []
data = []
while prev == "":
firstline = phdl.readline()
prev, row = process_line(firstline)
data.append(row)
pctg = 0
prot = 1
# process remaining lines
for line in phdl:
cid, row = process_line(line)
if (cid == "") or (cid in rna_contigs):
# skip those found with rnas
continue
if cid != prev:
# new contig found, process old
mrow = merge_rows(prev, data, md5_scgs)
print_row(ohdl, mrow)
# reset
prev = cid
data = []
pctg += 1
data.append(row)
prot += 1
# process last contig
if len(data) > 0:
mrow = merge_rows(prev, data)
print_row(ohdl, mrow)
prot += 1
phdl.close()
ohdl.close()
if opts.verbose:
print "Done: %d contigs with %d proteins processed"%(pctg, prot)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| |
import pygame
import math
import serial
import RPi.GPIO as GPIO
from numpy import matrix, nan_to_num
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
SERIAL_MARKER = 128
VECTOR_MOTORS_IDNT = 0
UP_DOWN_MOTOR_IDNT = 2
LIGHT_TOGGLE_IDNT = 5
CLAW_OPEN_CLOSE_IDNT = 4
CLAW_SPIN_IDNT = 3
CLAW_OPEN_CLOSE_GPIO_PIN = 2
TRIPPLE_ROTATE_RIGHT_IDNT = 6
TRIPPLE_ROTATE_LEFT_IDNT = 7
def pmap( value, istart, istop, ostart, ostop):
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))
def sendSerial(serialPort , data):
serialPort.write(data)
##### CONFIGURE T HESE AXES #####
X_AXIS = 0
Y_AXIS = 1
YAW_AXIS = 4
INVERT_X = False
INVERT_Y = False
INVERT_Yaw = False
################################
# Define some motor control matrices
Y_AXIS_MATRIX = matrix('-1, -1; 1, 1')
X_AXIS_MATRIX = matrix('1, -1; 1, -1')
YAW_MATRIX = matrix('1, -1; -1, 1')
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printScreen(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def print2DMatrix(self, screen, matrix):
array = matrix.getA()
for row in array:
self.printScreen(screen, "[{0:07.3f}, {1:07.3f}]".format(row[0].item(), row[1].item()))
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
#Opens Serial port on Rasberry PI3
ser = serial.Serial('/dev/ttyS0', 19200, timeout=5)
# Set the width and height of the screen [width,height]
size = [200, 420]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Four Motor Demo")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done == False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
try:
# Get joystick to work with.
joystick = pygame.joystick.Joystick(0)
except pygame.error:
print('Please connect a controller')
exit(1)
joystick.init()
# Print axis values.
for i in range (joystick.get_numaxes()):
axis = joystick.get_axis(i)
axis = pmap(math.fabs(axis), math.sqrt(2 * math.pow(0.17, 2)), 1, 0, 1)
axis = max(0, axis)
axis = math.copysign(axis, joystick.get_axis(i))
textPrint.printScreen(screen, "Axis {0} value: {1:07.3f}".format(i, axis))
textPrint.printScreen(screen, "")
# Get axes to work with.
# Set tolerance
axisX = joystick.get_axis(X_AXIS)
axisX = pmap(math.fabs(axisX), math.sqrt(2 * math.pow(0.17, 2)), 1, 0, 1)
axisX = max(0, axisX)
axisX = math.copysign(axisX, joystick.get_axis(X_AXIS))
axisY = joystick.get_axis(Y_AXIS)
axisY = pmap(math.fabs(axisY), math.sqrt(2 * math.pow(0.17, 2)), 1, 0, 1)
axisY = max(0, axisY)
axisY = math.copysign(axisY, joystick.get_axis(Y_AXIS))
axisYaw = joystick.get_axis(YAW_AXIS)
axisYaw = pmap(math.fabs(axisYaw), math.sqrt(2 * math.pow(0.17, 2)), 1, 0, 1)
axisYaw = max(0, axisYaw)
axisYaw = math.copysign(axisYaw, joystick.get_axis(YAW_AXIS))
xAxis = axisX * (-1 if INVERT_X else 1)
yAxis = axisY * (-1 if INVERT_Y else 1)
yawAxis = axisYaw * (-1 if INVERT_Yaw else 1)
# Construct individual thrust matrices.
xMatrix = (X_AXIS_MATRIX * xAxis)
yMatrix = (Y_AXIS_MATRIX * yAxis)
yawMatrix = (YAW_MATRIX * yawAxis)
# Combine individual thrust matrices into complete motor thrust matrix.
motorMatrix = (xMatrix + yMatrix + yawMatrix)
# Calculate thrust matrix scaling factor.
maxInputMag = max(abs(xAxis), abs(yAxis), abs(yawAxis))
maxThrust = max(abs(motorMatrix.min()), motorMatrix.max())
motorScalar = nan_to_num(maxInputMag / maxThrust)
# Scale thrust matrix down to within motor thrust range.
motorMatrix = (motorMatrix * motorScalar) * 128
# Clip off thrust matrix values less than -127.
motorMatrix = motorMatrix.clip(min=-127)
# Cast matrix values to integers.
motorMatrix = motorMatrix.astype(int)
# Print matrices to screen.
textPrint.printScreen(screen, "xMatrix: ")
textPrint.print2DMatrix(screen, xMatrix)
textPrint.printScreen(screen, "")
textPrint.printScreen(screen, "yMatrix: ")
textPrint.print2DMatrix(screen, yMatrix)
textPrint.printScreen(screen, "")
textPrint.printScreen(screen, "yawMatrix: ")
textPrint.print2DMatrix(screen, yawMatrix)
textPrint.printScreen(screen, "")
textPrint.printScreen(screen, "motorMatrix: ")
textPrint.print2DMatrix(screen, motorMatrix)
textPrint.printScreen(screen, "")
# Print motor values.
textPrint.printScreen(screen, "Fore-Port Motor: {:03d}".format(motorMatrix.item(0)))
textPrint.printScreen(screen, "Fore-Starboard Motor: {:03d}".format(motorMatrix.item(1)))
textPrint.printScreen(screen, "Aft-Port Motor: {:03d}".format(motorMatrix.item(2)))
textPrint.printScreen(screen, "Aft-Starboard Motor: {:03d}".format(motorMatrix.item(3)))
textPrint.printScreen(screen, "")
FPM = motorMatrix.item(0) % 256
FSM = motorMatrix.item(1) % 256
APM = motorMatrix.item(2) % 256
ASM = motorMatrix.item(3) % 256
axisBytes = bytes([SERIAL_MARKER, VECTOR_MOTORS_IDNT , FPM, FSM, APM, ASM])
sendSerial(ser, axisBytes)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
| |
'''
A suite of common exponential family distributions.
'''
import numpy as np
import scipy.sparse as sps
from scipy.special import psi, polygamma, gammaln, gammasgn
from copy import deepcopy
from utils import pretty_str, safe_exp, safe_sq
import csv
def get_node(name):
if name == 'bernoulli' or name == 'b':
return Bernoulli()
if name == 'gaussian' or name == 'normal' or name == 'n':
return Gaussian()
if name == 'gamma' or name == 'g':
return Gamma()
if name.startswith('dirichlet') or name.startswith('d') or name.startswith('dir'):
num_params = int(name.replace('dirichlet', '').replace('dir', '').replace('d',''))
return Dirichlet(num_params)
if name.startswith('zi') or name.startswith('zeroinflated'):
name = name[len('zi'):] if name.startswith('zi') else name[len('zeroinflated'):]
return ZeroInflated(get_node(name))
def get_node_from_file(target, filename):
with open(filename, 'rb') as f:
reader = csv.reader(f)
line = reader.next()
return get_node(line[target])
def load_nodes(filename):
with open(filename, 'rb') as f:
reader = csv.reader(f)
return [get_node(x) for x in reader.next()]
def save_nodes(nodes, filename):
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerow(nodes)
class ExponentialFamily:
def log_likelihood(self, eta, x):
if sps.issparse(x):
# CSC format
y = x.multiply(eta.T).sum()
else:
y = (eta.T * x).sum()
return y + self.log_base_measure(x).sum() - self.log_partition(eta).sum()
def log_base_measure(self, x):
pass
def sufficient_statistics(self, x):
pass
def log_partition(self, eta):
pass
def grad_log_partition(self, eta):
pass
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
pass
def sample(self, eta, count=1):
pass
def starting_eta(self):
pass
def starting_x(self):
pass
class Bernoulli(ExponentialFamily):
def __init__(self):
self.num_params = 1
self.domain_size = 1
def sufficient_statistics(self, x):
if type(x) is not np.ndarray or len(x.shape) == 1:
return np.array([x]).T
return np.copy(x)
def log_base_measure(self, x):
return np.zeros(x.shape)
def log_partition(self, eta):
return np.log(1 + safe_exp(eta))
def grad_log_partition(self, eta):
exp_eta = safe_exp(eta)
return exp_eta / (exp_eta + 1.0)
def hessian_log_partition(self, eta):
exp_eta = safe_exp(eta)
return -exp_eta / safe_sq(exp_eta + 1)
def diagonal_hessian_log_partition(self, eta):
return self.hessian_log_partition(eta)
def sample(self, eta, count=1):
exp_eta = safe_exp(eta)
p = exp_eta / (1 + exp_eta)
return np.random.random(size=count) < p
def eta_constraints(self, eta):
return np.array([[]])
def grad_eta_constraints(self, eta):
return np.array([[]])
def diagonal_hessian_eta_constraints(self, eta):
return np.array([[]])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Bernoulli'
class Gamma(ExponentialFamily):
def __init__(self):
self.num_params = 2
self.domain_size = 1
def sufficient_statistics(self, x):
return np.array([np.log(x), x]).T
def log_base_measure(self, x):
return np.zeros(x.shape)
def log_partition(self, eta):
#assert np.all(gammasgn(eta[0]+1) == 1)
return gammaln(eta[0] + 1) - (eta[0] + 1) * np.log(-eta[1])
def grad_log_partition(self, eta):
return np.array([psi(eta[0] + 1) - np.log(-eta[1]), -(eta[0] + 1) / eta[1]])
def hessian_log_partition(self, eta):
return np.array([[polygamma(1, eta[0] + 1), -1.0 / eta[1]],
[-1.0 / eta[1], (eta[0] + 1) / safe_sq(eta[1])]])
def diagonal_hessian_log_partition(self, eta):
return np.array([polygamma(1, eta[0] + 1), (eta[0] + 1) / safe_sq(eta[1])])
def sample(self, eta, count=1):
return np.random.gamma(eta[0] + 1, -1.0 / eta[1], size=count)
def eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([-1 - eta[0], eta[1]])
return np.array([-1 - eta[:,0], eta[:,1]])
def grad_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([-1., 1.])
return np.array([np.zeros(eta.shape[0]) - 1., np.ones(eta.shape[0])])
def diagonal_hessian_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([0., 0.])
return np.array([np.zeros(eta.shape[0]), np.zeros(eta.shape[0])])
def starting_x(self):
return np.ones(1)
def __repr__(self):
return 'Gamma'
class Gaussian(ExponentialFamily):
def __init__(self):
self.num_params = 2
self.domain_size = 1
def sufficient_statistics(self, x):
return np.array([x, safe_sq(x)]).T
def log_base_measure(self, x):
return np.repeat(np.log(1./np.sqrt(2*np.pi)), x.shape[0])
def log_partition(self, eta):
return -safe_sq(eta[0]) / (4*eta[1]) - 0.5 * np.log(-2 * eta[1])
def grad_log_partition(self, eta):
return np.array([-0.5 * eta[0] / eta[1], (safe_sq(eta[0]) - 2*eta[1]) / (4 * safe_sq(eta[1]))])
def hessian_log_partition(self, eta):
return np.array([[-0.5 / eta[1],0.5 * eta[0] / safe_sq(eta[1])],
[0.5 * eta[0] / safe_sq(eta[1]),(eta[1] - safe_sq(eta[0])) / (2*eta[1]**3)]])
def diagonal_hessian_log_partition(self, eta):
return np.array([-0.5 / eta[1], (eta[1] - safe_sq(eta[0])) / (2*eta[1]**3)])
def sample(self, eta, count=1):
variance = -1. / (2. * eta[1])
mu = eta[0] * variance
sigma = np.sqrt(variance)
return np.random.normal(mu, sigma, size=count)
def eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([eta[1]])
return np.array([np.zeros(0),eta[:,1]])
def grad_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([1.])
return np.array([np.zeros(0),np.ones(eta.shape[0])])
def diagonal_hessian_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([0., 0.])
return np.array([np.zeros(0),np.zeros(eta.shape[0])])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Gaussian'
class Dirichlet(ExponentialFamily):
def __init__(self, num_params):
self.num_params = num_params
self.domain_size = num_params
def sufficient_statistics(self, x):
if len(x.shape) == 1:
return np.array([np.log(x)])
return np.log(x)
def log_base_measure(self, x):
return np.zeros(x.shape[0])
def log_partition(self, eta):
p = eta+1
np.log(p.min())
return gammaln(p).sum(axis=0) - gammaln(p.sum(axis=0))
def grad_log_partition(self, eta):
p = eta+1
np.log(p.min())
return psi(p) - psi(p.sum(axis=0))
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
p = eta+1
np.log(p.min())
return polygamma(1, p) - polygamma(1, p.sum(axis=0))
def sample(self, eta, count=1):
return np.random.dirichlet(eta+1, size=count)
def eta_constraints(self, eta):
return (-eta - 1.).T
def grad_eta_constraints(self, eta):
return np.zeros(eta.shape).T - 1.
def diagonal_hessian_eta_constraints(self, eta):
return np.zeros(eta.shape).T
def starting_x(self):
return np.ones(self.num_params) / float(self.num_params)
def __repr__(self):
return 'Dirichlet'
class ZeroInflated(ExponentialFamily):
def __init__(self, base_model):
self.base_model = base_model
self.num_params = 1 + base_model.num_params
self.domain_size = 1
# TODO: generalize to multivariate and arbitrary points that may be in the domain of the base model
def sufficient_statistics(self, x):
ss = np.zeros((x.shape[0], 3))
ss[x == 0, 0] = 1
ss[x != 0, 1:] = self.base_model.sufficient_statistics(x[x != 0])
return ss
def log_base_measure(self, x):
if sps.issparse(x):
# TODO: handle sparse data better
x = x.todense()
result = np.zeros(x.shape[0])
idx = np.where(x[:,0]==0)[0][0]
result[idx] = self.base_model.log_base_measure(x[:,1:][idx])
return result
def log_partition(self, eta):
return np.log(safe_exp(eta[0]) + safe_exp(self.base_model.log_partition(eta[1:])))
def grad_log_partition(self, eta):
exp_base_log_partition = safe_exp(self.base_model.log_partition(eta[1:]))
exp_x0 = safe_exp(eta[0])
denominator = exp_base_log_partition + exp_x0
w = (exp_base_log_partition / denominator)
return np.concatenate(((exp_x0 / denominator)[:,np.newaxis].T,
self.base_model.grad_log_partition(eta[1:]) * w), axis=0)
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
base_log_partition = self.base_model.log_partition(eta[1:])
exp_base_log_partition = safe_exp(base_log_partition)
exp_x0 = safe_exp(eta[0])
exp_sum = safe_exp(eta[0] + base_log_partition)
sum_exp = exp_base_log_partition + exp_x0
sq_sum_exp = safe_sq(sum_exp)
diag_hess_base = self.base_model.diagonal_hessian_log_partition(eta[1:])
sq_grad_base = safe_sq(self.base_model.grad_log_partition(eta[1:]))
numerator = np.zeros(diag_hess_base.shape)
numerator[:,sq_sum_exp != np.inf] = (sum_exp[sq_sum_exp != np.inf] * diag_hess_base[:, sq_sum_exp != np.inf] + exp_x0[sq_sum_exp != np.inf] * sq_grad_base[:, sq_sum_exp != np.inf]) / sq_sum_exp[sq_sum_exp != np.inf]
return np.concatenate(((exp_sum / sq_sum_exp)[:,np.newaxis].T,
exp_base_log_partition * numerator), axis=0)
def sample(self, eta, count=1):
exp_x0 = safe_exp(eta[0])
prob_x0 = exp_x0 / (exp_x0 + safe_exp(self.base_model.log_partition(eta[1:])))
results = np.zeros(count)
nonzero = np.random.random(size=count) > prob_x0
results[nonzero] = self.base_model.sample(eta[1:], count=nonzero.sum())
return results
def eta_constraints(self, eta):
base_constraints = self.base_model.eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def grad_eta_constraints(self, eta):
base_constraints = self.base_model.grad_eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.grad_eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def diagonal_hessian_eta_constraints(self, eta):
base_constraints = self.base_model.diagonal_hessian_eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.diagonal_hessian_eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Zero-Inflated {0}'.format(self.base_model)
| |
import warnings
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra', 'is_unsigned'))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
if description.is_unsigned:
if field_type == 'IntegerField':
return 'PositiveIntegerField'
elif field_type == 'SmallIntegerField':
return 'PositiveSmallIntegerField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT
column_name, data_type, character_maximum_length,
numeric_precision, numeric_scale, extra, column_default,
CASE
WHEN column_type LIKE '%% unsigned' THEN 1
ELSE 0
END AS is_unsigned
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = line[0]
fields.append(
FieldInfo(*(
(col_name,) +
line[1:3] +
(
to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5],
line[6],
field_info[col_name].column_default,
field_info[col_name].extra,
field_info[col_name].is_unsigned,
)
))
)
return fields
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
| |
import pygame
import time
import pyaudio
import analyse
import numpy
import wave
import sys
from RPi import GPIO
from array import array
from pygame.locals import *
pygame.mixer.pre_init(44100, -16, 1, 1024)
pygame.init()
recorded_note = []
KEY_C = 12
KEY_D = 16
KEY_E = 18
KEY_F = 19
KEY_G = 21
KEY_H = 22
KEY_I = 23
KEY_J = 24
KEY_K = 26
KEY_L = 29
KEY_M = 31
KEY_N = 32
KEY_O = 33
KEY_P = 35
KEY_Q = 38
KEY_R = 40
KEY_S = 27
KEY_T = 28
RECORD = 37
LED = 36
freq_C = 261.6
freq_D = 293.7
freq_E = 329.6
freq_F = 349.2
freq_G = 184.9
freq_H = 220.0
freq_I = 146.8
freq_J = 130.8
freq_K = 123.4
freq_L = 116.5
freq_M = 174.6
freq_N = 277.1
freq_O = 293.6
freq_P = 311.1
freq_Q = 207.6
freq_R = 87.3
freq_S = 82.4
freq_T = 110.0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(KEY_C, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_D, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_E, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_F, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_G, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_H, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_I, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_J, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_K, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_L, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_M, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_N, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_O, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_P, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_Q, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_R, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_S, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(KEY_T, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(RECORD, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(LED, GPIO.OUT)
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 18
RATE = 44100
RECORD_SECONDS = ()
WAVE_OUTPUT_FILENAME = "%s filename.wav"
class ToneSound(pygame.mixer.Sound):
def __init__(self, frequency, volume):
self.frequency = frequency
pygame.mixer.Sound.__init__(self, self.build_samples())
self.set_volume(volume)
def build_samples(self):
period = int(round(pygame.mixer.get_init()[0] / self.frequency))
samples = array("h", [0] * period)
amplitude = 2 ** (abs(pygame.mixer.get_init()[1]) - 1) - 1
for time in xrange(period):
if time < period / 2:
samples[time] = amplitude
else:
samples[time] = -amplitude
return samples
note_C = ToneSound(frequency = freq_C, volume = 1)
note_D = ToneSound(frequency = freq_D, volume = 1)
note_E = ToneSound(frequency = freq_E, volume = 1)
note_F = ToneSound(frequency = freq_F, volume = 1)
note_G = ToneSound(frequency = freq_G, volume = 1)
note_H = ToneSound(frequency = freq_H, volume = 1)
note_I = ToneSound(frequency = freq_I, volume = 1)
note_J = ToneSound(frequency = freq_J, volume = 1)
note_K = ToneSound(frequency = freq_K, volume = 1)
note_L = ToneSound(frequency = freq_L, volume = 1)
note_M = ToneSound(frequency = freq_M, volume = 1)
note_N = ToneSound(frequency = freq_N, volume = 1)
note_O = ToneSound(frequency = freq_O, volume = 1)
note_P = ToneSound(frequency = freq_P, volume = 1)
note_Q = ToneSound(frequency = freq_Q, volume = 1)
note_R = ToneSound(frequency = freq_R, volume = 1)
note_S = ToneSound(frequency = freq_S, volume = 1)
note_T = ToneSound(frequency = freq_T, volume = 1)
def light_off():
GPIO.output(LED, False)
def wait_for_keydown():
while not GPIO.input(KEY_C) and not GPIO.input(KEY_D) and not GPIO.input(KEY_E) and not GPIO.input(KEY_F) and not GPIO.input(KEY_G) and not GPIO.input(KEY_H) and not GPIO.input(KEY_I) and not GPIO.input(KEY_J) and not GPIO.input(KEY_K) and not GPIO.input(KEY_L) and not GPIO.input(KEY_M) and not GPIO.input(KEY_N) and not GPIO.input(KEY_O) and not GPIO.input(KEY_P) and not GPIO.input(KEY_Q) and not GPIO.input(KEY_R) and not GPIO.input(KEY_S) and not GPIO.input(KEY_T) and not GPIO.input(RECORD):
time.sleep(0.01)
def wait_for_keyup(pin):
while GPIO.input(pin):
time.sleep(0.1)
def play_note():
for i, v in enumerate(recorded_note):
if v[0] is 'B':
print(v[0], v[1])
time.sleep(v[1])
elif v[0] is 'C':
print(v[0], v[1])
note_C.play(-1)
time.sleep(v[1])
note_C.stop()
elif v[0] is 'D':
print(v[0], v[1])
note_D.play(-1)
time.sleep(v[1])
note_D.stop()
elif v[0] is 'E':
print(v[0], v[1])
note_E.play(-1)
time.sleep(v[1])
note_E.stop()
elif v[0] is 'F':
print(v[0], v[1])
note_F.play(-1)
time.sleep(v[1])
note_F.stop()
elif v[0] is 'G':
print(v[0], v[1])
note_G.play(-1)
time.sleep(v[1])
note_G.stop()
elif v[0] is 'H':
print(v[0], v[1])
note_H.play(-1)
time.sleep(v[1])
note_H.stop()
elif v[0] is 'I':
print(v[0], v[1])
note_I.play(-1)
time.sleep(v[1])
note_I.stop()
elif v[0] is 'J':
print(v[0], v[1])
note_J.play(-1)
time.sleep(v[1])
note_J.stop()
elif v[0] is 'K':
print(v[0], v[1])
note_K.play(-1)
time.sleep(v[1])
note_K.stop()
elif v[0] is 'L':
print(v[0], v[1])
note_L.play(-1)
time.sleep(v[1])
note_L.stop()
elif v[0] is 'M':
print(v[0], v[1])
note_M.play(-1)
time.sleep(v[1])
note_M.stop()
elif v[0] is 'N':
print(v[0], v[1])
note_N.play(-1)
time.sleep(v[1])
note_N.stop()
elif v[0] is 'O':
print(v[0], v[1])
note_O.play(-1)
time.sleep(v[1])
note_O.stop()
elif v[0] is 'P':
print(v[0], v[1])
note_P.play(-1)
time.sleep(v[1])
note_P.stop()
elif v[0] is 'Q':
print(v[0], v[1])
note_Q.play(-1)
time.sleep(v[1])
note_Q.stop()
elif v[0] is 'R':
print(v[0], v[1])
note_R.play(-1)
time.sleep(v[1])
note_R.stop()
elif v[0] is 'S':
print(v[0], v[1])
note_S.play(-1)
time.sleep(v[1])
note_S.stop()
elif v[0] is 'T':
print(v[0], v[1])
note_T.play(-1)
time.sleep(v[1])
note_T.stop()
record_flag = True
pyaudio = pyaudio.PyAudio()
frames = []
while True:
start_time = time.time()
wait_for_keydown()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('B', diff_time))
if GPIO.input(KEY_C):
start_time = time.time()
note_C.play(-1)
wait_for_keyup(KEY_C)
note_C.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('C', diff_time))
elif GPIO.input(KEY_D):
start_time = time.time()
note_D.play(-1)
wait_for_keyup(KEY_D)
note_D.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('D', diff_time))
elif GPIO.input(KEY_E):
start_time = time.time()
note_E.play(-1)
wait_for_keyup(KEY_E)
note_E.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('E', diff_time))
elif GPIO.input(KEY_F):
start_time = time.time()
note_F.play(-1)
wait_for_keyup(KEY_F)
note_F.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('F', diff_time))
elif GPIO.input(KEY_G):
start_time = time.time()
note_G.play(-1)
wait_for_keyup(KEY_G)
note_G.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('G', diff_time))
elif GPIO.input(KEY_H):
start_time = time.time()
note_H.play(-1)
wait_for_keyup(KEY_H)
note_H.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('H', diff_time))
elif GPIO.input(KEY_I):
start_time = time.time()
note_I.play(-1)
wait_for_keyup(KEY_I)
note_I.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('I', diff_time))
elif GPIO.input(KEY_J):
start_time = time.time()
note_J.play(-1)
wait_for_keyup(KEY_J)
note_J.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('J', diff_time))
elif GPIO.input(KEY_K):
start_time = time.time()
note_K.play(-1)
wait_for_keyup(KEY_K)
note_K.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('K', diff_time))
elif GPIO.input(KEY_L):
start_time = time.time()
note_L.play(-1)
wait_for_keyup(KEY_L)
note_L.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('L', diff_time))
elif GPIO.input(KEY_M):
start_time = time.time()
note_M.play(-1)
wait_for_keyup(KEY_M)
note_M.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('M', diff_time))
elif GPIO.input(KEY_N):
start_time = time.time()
note_N.play(-1)
wait_for_keyup(KEY_N)
note_N.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('N', diff_time))
elif GPIO.input(KEY_O):
start_time = time.time()
note_O.play(-1)
wait_for_keyup(KEY_O)
note_O.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('O', diff_time))
elif GPIO.input(KEY_P):
start_time = time.time()
note_P.play(-1)
wait_for_keyup(KEY_P)
note_P.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('P', diff_time))
elif GPIO.input(KEY_Q):
start_time = time.time()
note_Q.play(-1)
wait_for_keyup(KEY_Q)
note_Q.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('Q', diff_time))
elif GPIO.input(KEY_R):
start_time = time.time()
note_R.play(-1)
wait_for_keyup(KEY_R)
note_R.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('R', diff_time))
elif GPIO.input(KEY_S):
start_time = time.time()
note_S.play(-1)
wait_for_keyup(KEY_S)
note_S.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('S', diff_time))
elif GPIO.input(KEY_T):
start_time = time.time()
note_T.play(-1)
wait_for_keyup(KEY_T)
note_T.stop()
diff_time = time.time() - start_time
if record_flag:
recorded_note.append(('T', diff_time))
elif GPIO.input(RECORD):
if record_flag == False:
recorded_note = []
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print('Recording started!')
else:
print('Recording stopped!')
record_flag = not record_flag
stream.stop_stream()
stream.close()
p.terminate()
RECORD1 = wave.open(WAVE_OUTPUT_FILENAME, '1')
RECORD1.setnchannels(CHANNELS)
RECORD1.setsampwidth(p.get_sample_size(FORMAT))
RECORD1.setframerate(RATE)
RECORD1.writeframes(B''.join(frames))
RECORD1.close()
time.sleep(.5)
GPIO.output(LED, record_flag)
light_off()
| |
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = (
'WebSocketAdapterProtocol',
'WebSocketServerProtocol',
'WebSocketClientProtocol',
'WebSocketAdapterFactory',
'WebSocketServerFactory',
'WebSocketClientFactory',
'WrappingWebSocketAdapter',
'WrappingWebSocketServerProtocol',
'WrappingWebSocketClientProtocol',
'WrappingWebSocketServerFactory',
'WrappingWebSocketClientFactory',
'listenWS',
'connectWS',
'WampWebSocketServerProtocol',
'WampWebSocketServerFactory',
'WampWebSocketClientProtocol',
'WampWebSocketClientFactory',
)
from base64 import b64encode, b64decode
from zope.interface import implementer
import twisted.internet.protocol
from twisted.internet.defer import maybeDeferred
from twisted.python import log
from twisted.internet.interfaces import ITransport
from autobahn.wamp import websocket
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept, \
PerMessageDeflateResponse, \
PerMessageDeflateResponseAccept
class WebSocketAdapterProtocol(twisted.internet.protocol.Protocol):
"""
Adapter class for Twisted WebSocket client and server protocols.
"""
def connectionMade(self):
## the peer we are connected to
try:
peer = self.transport.getPeer()
except:
## ProcessProtocols lack getPeer()
self.peer = "?"
else:
try:
self.peer = "%s:%d" % (peer.host, peer.port)
except:
## eg Unix Domain sockets don't have host/port
self.peer = str(peer)
self._connectionMade()
## Set "Nagle"
try:
self.transport.setTcpNoDelay(self.tcpNoDelay)
except:
## eg Unix Domain sockets throw Errno 22 on this
pass
def connectionLost(self, reason):
self._connectionLost(reason)
def dataReceived(self, data):
self._dataReceived(data)
def _closeConnection(self, abort = False):
if abort and hasattr(self.transport, 'abortConnection'):
## ProcessProtocol lacks abortConnection()
self.transport.abortConnection()
else:
self.transport.loseConnection()
def _onOpen(self):
self.onOpen()
def _onMessageBegin(self, isBinary):
self.onMessageBegin(isBinary)
def _onMessageFrameBegin(self, length):
self.onMessageFrameBegin(length)
def _onMessageFrameData(self, payload):
self.onMessageFrameData(payload)
def _onMessageFrameEnd(self):
self.onMessageFrameEnd()
def _onMessageFrame(self, payload):
self.onMessageFrame(payload)
def _onMessageEnd(self):
self.onMessageEnd()
def _onMessage(self, payload, isBinary):
self.onMessage(payload, isBinary)
def _onPing(self, payload):
self.onPing(payload)
def _onPong(self, payload):
self.onPong(payload)
def _onClose(self, wasClean, code, reason):
self.onClose(wasClean, code, reason)
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol):
"""
Base class for Twisted-based WebSocket server protocols.
"""
def _onConnect(self, request):
## onConnect() will return the selected subprotocol or None
## or a pair (protocol, headers) or raise an HttpException
##
res = maybeDeferred(self.onConnect, request)
res.addCallback(self.succeedHandshake)
def forwardError(failure):
if failure.check(http.HttpException):
return self.failHandshake(failure.value.reason, failure.value.code)
else:
if self.debug:
self.factory._log("Unexpected exception in onConnect ['%s']" % failure.value)
return self.failHandshake(http.INTERNAL_SERVER_ERROR[1], http.INTERNAL_SERVER_ERROR[0])
res.addErrback(forwardError)
class WebSocketClientProtocol(WebSocketAdapterProtocol, protocol.WebSocketClientProtocol):
"""
Base class for Twisted-based WebSocket client protocols.
"""
def _onConnect(self, response):
self.onConnect(response)
class WebSocketAdapterFactory:
"""
Adapter class for Twisted-based WebSocket client and server factories.
"""
def _log(self, msg):
log.msg(msg)
def _callLater(self, delay, fun):
return self.reactor.callLater(delay, fun)
class WebSocketServerFactory(WebSocketAdapterFactory, protocol.WebSocketServerFactory, twisted.internet.protocol.ServerFactory):
"""
Base class for Twisted-based WebSocket server factories.
.. seealso:: `twisted.internet.protocol.ServerFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ServerFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketServerFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
class WebSocketClientFactory(WebSocketAdapterFactory, protocol.WebSocketClientFactory, twisted.internet.protocol.ClientFactory):
"""
Base class for Twisted-based WebSocket client factories.
.. seealso:: `twisted.internet.protocol.ClientFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ClientFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketClientFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketClientFactory.__init__(self, *args, **kwargs)
@implementer(ITransport)
class WrappingWebSocketAdapter:
"""
An adapter for stream-based transport over WebSocket.
This follows `websockify <https://github.com/kanaka/websockify>`_
and should be compatible with that.
It uses WebSocket subprotocol negotiation and supports the
following WebSocket subprotocols:
- ``binary`` (or a compatible subprotocol)
- ``base64``
Octets are either transmitted as the payload of WebSocket binary
messages when using the ``binary`` subprotocol (or an alternative
binary compatible subprotocol), or encoded with Base64 and then
transmitted as the payload of WebSocket text messages when using
the ``base64`` subprotocol.
"""
def onConnect(self, requestOrResponse):
## Negotiate either the 'binary' or the 'base64' WebSocket subprotocol
##
if isinstance(requestOrResponse, protocol.ConnectionRequest):
request = requestOrResponse
for p in request.protocols:
if p in self.factory._subprotocols:
self._binaryMode = (p != 'base64')
return p
raise http.HttpException(http.NOT_ACCEPTABLE[0], "this server only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
elif isinstance(requestOrResponse, protocol.ConnectionResponse):
response = requestOrResponse
if response.protocol not in self.factory._subprotocols:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, "this client only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
self._binaryMode = (response.protocol != 'base64')
else:
## should not arrive here
raise Exception("logic error")
def onOpen(self):
self._proto.connectionMade()
def onMessage(self, payload, isBinary):
if isBinary != self._binaryMode:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_UNSUPPORTED_DATA, "message payload type does not match the negotiated subprotocol")
else:
if not isBinary:
try:
payload = b64decode(payload)
except Exception as e:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, "message payload base64 decoding error: {}".format(e))
#print("forwarding payload: {}".format(binascii.hexlify(payload)))
self._proto.dataReceived(payload)
def onClose(self, wasClean, code, reason):
self._proto.connectionLost(None)
def write(self, data):
#print("sending payload: {}".format(binascii.hexlify(data)))
## part of ITransport
assert(type(data) == bytes)
if self._binaryMode:
self.sendMessage(data, isBinary = True)
else:
data = b64encode(data)
self.sendMessage(data, isBinary = False)
def writeSequence(self, data):
## part of ITransport
for d in data:
self.write(d)
def loseConnection(self):
## part of ITransport
self.sendClose()
def getPeer(self):
## part of ITransport
return self.transport.getPeer()
def getHost(self):
## part of ITransport
return self.transport.getHost()
class WrappingWebSocketServerProtocol(WrappingWebSocketAdapter, WebSocketServerProtocol):
"""
Server protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketClientProtocol(WrappingWebSocketAdapter, WebSocketClientProtocol):
"""
Client protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketServerFactory(WebSocketServerFactory):
"""
Wrapping server factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
subprotocol = None,
debug = False):
"""
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of ``twisted.internet.protocol.Factory``
:param url: WebSocket URL of the server this server factory will work for.
:type url: unicode
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketServerFactory.__init__(self,
url = url,
reactor = reactor,
protocols = self._subprotocols,
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketServerProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def startFactory(self):
self._factory.startFactory()
WebSocketServerFactory.startFactory(self)
def stopFactory(self):
self._factory.stopFactory()
WebSocketServerFactory.stopFactory(self)
class WrappingWebSocketClientFactory(WebSocketClientFactory):
"""
Wrapping client factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
subprotocol = None,
debug = False):
"""
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of ``twisted.internet.protocol.Factory``
:param url: WebSocket URL of the server this client factory will connect to.
:type url: unicode
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketClientFactory.__init__(self,
url = url,
reactor = reactor,
protocols = self._subprotocols,
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## The extensions offered to the server ..
offers = [PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers = offers)
## Function to accept responses from the server ..
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketClientProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: The connector.
:rtype: An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: The listening port.
:rtype: An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol, WebSocketServerProtocol):
"""
Base class for Twisted-based WAMP-over-WebSocket server protocols.
"""
class WampWebSocketServerFactory(websocket.WampWebSocketServerFactory, WebSocketServerFactory):
"""
Base class for Twisted-based WAMP-over-WebSocket server factories.
"""
protocol = WampWebSocketServerProtocol
def __init__(self, factory, *args, **kwargs):
if 'serializers' in kwargs:
serializers = kwargs['serializers']
del kwargs['serializers']
else:
serializers = None
if 'debug_wamp' in kwargs:
debug_wamp = kwargs['debug_wamp']
del kwargs['debug_wamp']
else:
debug_wamp = False
websocket.WampWebSocketServerFactory.__init__(self, factory, serializers, debug_wamp = debug_wamp)
kwargs['protocols'] = self._protocols
# noinspection PyCallByClass
WebSocketServerFactory.__init__(self, *args, **kwargs)
class WampWebSocketClientProtocol(websocket.WampWebSocketClientProtocol, WebSocketClientProtocol):
"""
Base class for Twisted-based WAMP-over-WebSocket client protocols.
"""
class WampWebSocketClientFactory(websocket.WampWebSocketClientFactory, WebSocketClientFactory):
"""
Base class for Twisted-based WAMP-over-WebSocket client factories.
"""
protocol = WampWebSocketClientProtocol
def __init__(self, factory, *args, **kwargs):
if 'serializers' in kwargs:
serializers = kwargs['serializers']
del kwargs['serializers']
else:
serializers = None
if 'debug_wamp' in kwargs:
debug_wamp = kwargs['debug_wamp']
del kwargs['debug_wamp']
else:
debug_wamp = False
websocket.WampWebSocketClientFactory.__init__(self, factory, serializers, debug_wamp = debug_wamp)
kwargs['protocols'] = self._protocols
WebSocketClientFactory.__init__(self, *args, **kwargs)
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Smoke tests for gclient.py.
Shell out 'gclient' and run basic conformance tests.
This test assumes GClientSmokeBase.URL_BASE is valid.
"""
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from testing_support.fake_repos import join, write
from testing_support.fake_repos import FakeReposTestBase, FakeRepoTransitive
import gclient_utils
import subprocess2
GCLIENT_PATH = os.path.join(ROOT_DIR, 'gclient')
COVERAGE = False
class GClientSmokeBase(FakeReposTestBase):
def setUp(self):
super(GClientSmokeBase, self).setUp()
# Make sure it doesn't try to auto update when testing!
self.env = os.environ.copy()
self.env['DEPOT_TOOLS_UPDATE'] = '0'
def gclient(self, cmd, cwd=None):
if not cwd:
cwd = self.root_dir
if COVERAGE:
# Don't use the wrapper script.
cmd_base = ['coverage', 'run', '-a', GCLIENT_PATH + '.py']
else:
cmd_base = [GCLIENT_PATH]
cmd = cmd_base + cmd
process = subprocess.Popen(cmd, cwd=cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=sys.platform.startswith('win'))
(stdout, stderr) = process.communicate()
logging.debug("XXX: %s\n%s\nXXX" % (' '.join(cmd), stdout))
logging.debug("YYY: %s\n%s\nYYY" % (' '.join(cmd), stderr))
# pylint: disable=E1103
return (stdout.replace('\r\n', '\n'), stderr.replace('\r\n', '\n'),
process.returncode)
def untangle(self, stdout):
tasks = {}
remaining = []
for line in stdout.splitlines(False):
m = re.match(r'^(\d)+>(.*)$', line)
if not m:
remaining.append(line)
else:
self.assertEquals([], remaining)
tasks.setdefault(int(m.group(1)), []).append(m.group(2))
out = []
for key in sorted(tasks.iterkeys()):
out.extend(tasks[key])
out.extend(remaining)
return '\n'.join(out)
def parseGclient(self, cmd, items, expected_stderr='', untangle=False):
"""Parse gclient's output to make it easier to test.
If untangle is True, tries to sort out the output from parallel checkout."""
(stdout, stderr, returncode) = self.gclient(cmd)
if untangle:
stdout = self.untangle(stdout)
self.checkString(expected_stderr, stderr)
self.assertEquals(0, returncode)
return self.checkBlock(stdout, items)
def splitBlock(self, stdout):
"""Split gclient's output into logical execution blocks.
___ running 'foo' at '/bar'
(...)
___ running 'baz' at '/bar'
(...)
will result in 2 items of len((...).splitlines()) each.
"""
results = []
for line in stdout.splitlines(False):
# Intentionally skips empty lines.
if not line:
continue
if line.startswith('__'):
match = re.match(r'^________ ([a-z]+) \'(.*)\' in \'(.*)\'$', line)
if not match:
match = re.match(r'^_____ (.*) is missing, synching instead$', line)
if match:
# Blah, it's when a dependency is deleted, we should probably not
# output this message.
results.append([line])
elif (
not re.match(
r'_____ [^ ]+ : Attempting rebase onto [0-9a-f]+...',
line) and
not re.match(r'_____ [^ ]+ at [^ ]+', line)):
# The two regexp above are a bit too broad, they are necessary only
# for git checkouts.
self.fail(line)
else:
results.append([[match.group(1), match.group(2), match.group(3)]])
else:
if not results:
# TODO(maruel): gclient's git stdout is inconsistent.
# This should fail the test instead!!
pass
else:
results[-1].append(line)
return results
def checkBlock(self, stdout, items):
results = self.splitBlock(stdout)
for i in xrange(min(len(results), len(items))):
if isinstance(items[i], (list, tuple)):
verb = items[i][0]
path = items[i][1]
else:
verb = items[i]
path = self.root_dir
self.checkString(results[i][0][0], verb, (i, results[i][0][0], verb))
if sys.platform == 'win32':
# Make path lower case since casing can change randomly.
self.checkString(
results[i][0][2].lower(),
path.lower(),
(i, results[i][0][2].lower(), path.lower()))
else:
self.checkString(results[i][0][2], path, (i, results[i][0][2], path))
self.assertEquals(len(results), len(items), (stdout, items, len(results)))
return results
@staticmethod
def svnBlockCleanup(out):
"""Work around svn status difference between svn 1.5 and svn 1.6
I don't know why but on Windows they are reversed. So sorts the items."""
for i in xrange(len(out)):
if len(out[i]) < 2:
continue
out[i] = [out[i][0]] + sorted([x[1:].strip() for x in out[i][1:]])
return out
class GClientSmoke(GClientSmokeBase):
"""Doesn't require either svnserve nor git-daemon."""
@property
def svn_base(self):
return 'svn://random.server/svn/'
@property
def git_base(self):
return 'git://random.server/git/'
def testHelp(self):
"""testHelp: make sure no new command was added."""
result = self.gclient(['help'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testUnknown(self):
result = self.gclient(['foo'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testNotConfigured(self):
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['cleanup']))
self.check(res, self.gclient(['diff']))
self.check(res, self.gclient(['pack']))
self.check(res, self.gclient(['revert']))
self.check(res, self.gclient(['revinfo']))
self.check(res, self.gclient(['runhooks']))
self.check(res, self.gclient(['status']))
self.check(res, self.gclient(['sync']))
self.check(res, self.gclient(['update']))
def testConfig(self):
p = join(self.root_dir, '.gclient')
def test(cmd, expected):
if os.path.exists(p):
os.remove(p)
results = self.gclient(cmd)
self.check(('', '', 0), results)
self.checkString(expected, open(p, 'rU').read())
test(['config', self.svn_base + 'trunk/src/'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%strunk/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.svn_base)
test(['config', self.git_base + 'repo_1', '--name', 'src'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%srepo_1",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.git_base)
test(['config', 'foo', 'faa'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "faa",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', 'foo', '--deps', 'blah'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "blah",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', '--spec', '["blah blah"]'], '["blah blah"]')
os.remove(p)
results = self.gclient(['config', 'foo', 'faa', 'fuu'])
err = ('Usage: gclient.py config [options] [url] [safesync url]\n\n'
'gclient.py: error: Inconsistent arguments. Use either --spec or one'
' or 2 args\n')
self.check(('', err, 2), results)
self.assertFalse(os.path.exists(join(self.root_dir, '.gclient')))
def testSolutionNone(self):
results = self.gclient(['config', '--spec',
'solutions=[{"name": "./", "url": None}]'])
self.check(('', '', 0), results)
results = self.gclient(['sync'])
self.check(('', '', 0), results)
self.assertTree({})
results = self.gclient(['revinfo'])
self.check(('./: None\n', '', 0), results)
self.check(('', '', 0), self.gclient(['cleanup']))
self.check(('', '', 0), self.gclient(['diff']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['pack']))
self.check(('', '', 0), self.gclient(['revert']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['runhooks']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['status']))
def testDifferentTopLevelDirectory(self):
# Check that even if the .gclient file does not mention the directory src
# itself, but it is included via dependencies, the .gclient file is used.
self.gclient(['config', self.svn_base + 'trunk/src.DEPS'])
deps = join(self.root_dir, 'src.DEPS')
os.mkdir(deps)
write(join(deps, 'DEPS'),
'deps = { "src": "%strunk/src" }' % (self.svn_base))
src = join(self.root_dir, 'src')
os.mkdir(src)
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', deps), ('running', src)])
class GClientSmokeSVN(GClientSmokeBase):
def setUp(self):
super(GClientSmokeSVN, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSync(self):
# TODO(maruel): safesync.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '1'],
['running', 'running', 'running', 'running', 'deleting'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
results = self.gclient(
['sync', '--deps', 'mac', '-r', 'invalid@1', '--jobs', '1'])
self.checkBlock(results[0], [
'running', 'running',
# This is due to the way svn update is called for a single file when
# File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
self.checkString('Please fix your script, having invalid --revision flags '
'will soon considered an error.\n', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncNoSolutionName(self):
# When no solution name is provided, gclient uses the first solution listed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(['sync', '--deps', 'mac', '-r', '1', '--jobs', '1'],
['running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
# Use --jobs 1 otherwise the order is not deterministic.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
[
'running',
'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running',
'running',
'running',
'running',
],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '8'],
['running', 'running', 'running', 'running', 'deleting'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "custom_deps" : {\n'
# Remove 2 deps, change 1, add 1.
' "src/other": None,\n'
' "src/third_party/foo": \'%(base)s/third_party/prout\',\n'
' "src/file/other": None,\n'
' "new_deps": "/trunk/src/third_party",\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/prout@2', 'src/third_party/foo'),
('trunk/src/third_party@2', 'new_deps'))
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDepsNoDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
# This directory has no DEPS file.
' { "name" : "src/third_party",\n'
' "url" : "%(base)s/src/third_party",\n'
' "custom_deps" : {\n'
# Add 1.
' "src/other": \'/trunk/other\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src/third_party@2', 'src/third_party'),
('trunk/other@2', 'src/other'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString(join('third_party', 'foo'), out[0][4])
self.checkString('hi', out[1][1])
self.assertEquals(5, len(out[0]))
self.assertEquals(2, len(out[1]))
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
# src, src/other is missing, src/other, src/third_party/foo is missing,
# src/third_party/foo, 2 svn hooks, 3 related to File().
self.assertEquals(10, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
tree['src/svn_hooked2'] = 'svn_hooked2'
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString('svn_hooked2', out[0][4])
self.checkString(join('third_party', 'foo'), out[0][5])
self.assertEquals(6, len(out[0]))
self.assertEquals(1, len(out))
def testRevertAndStatusDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
# Without --verbose, gclient won't output the directories without
# modification.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.checkString('hi', out[1][1])
self.assertEquals(4, len(out[0]))
self.assertEquals(2, len(out[1]))
# So verify it works with --verbose.
out = self.parseGclient(
['status', '--deps', 'mac', '--verbose', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')],
['running', join(self.root_dir, 'src', 'third_party', 'fpp')],
['running', join(self.root_dir, 'src', 'third_party', 'prout')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.checkString('hi', out[1][1])
self.assertEquals(4, len(out[0]))
self.assertEquals(2, len(out[1]))
self.assertEquals(1, len(out[2]))
self.assertEquals(1, len(out[3]))
self.assertEquals(4, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching.
# TODO(maruel): In general, gclient revert output is wrong. It should output
# the file list after some ___ running 'svn status'
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(7, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/prout@2', 'src/third_party/prout'))
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.assertEquals(4, len(out[0]))
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.checkString(1, len(out[0]))
self.checkString(1, len(out[1]))
def testRunHooksDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], [])
self.assertEquals([], out)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)s/src\n'
'src/file/other: File("%(base)s/other/DEPS")\n'
'src/other: %(base)s/other\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)s/src@2\n'
'src/file/other: %(base)s/other/DEPS@2\n'
'src/other: %(base)s/other@2\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "foo/bar": None,\n'
' "invalid": None,\n'
' "src/file/other": \'%(base)s/other/DEPS@2\',\n'
' "src/other": \'%(base)s/other@2\',\n'
' "src/third_party/foo": '
'\'%(base)s/third_party/foo@1\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testRevInfoAltDeps(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/', '--deps-file',
'DEPS.alt'])
self.gclient(['sync'])
results = self.gclient(['revinfo', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS.alt",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "src/other2": \'%(base)s/other@2\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testWrongDirectory(self):
# Check that we're not using a .gclient configuration which only talks
# about a subdirectory src when we're in a different subdirectory src-other.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
other_src = join(self.root_dir, 'src-other')
os.mkdir(other_src)
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['status'], other_src))
def testCorrectDirectory(self):
# Check that when we're in the subdirectory src, the .gclient configuration
# is used.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
src = join(self.root_dir, 'src')
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', src)])
def testInitialCheckoutNotYetDone(self):
# Check that gclient can be executed when the initial checkout hasn't been
# done yet.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(
['sync', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
def testInitialCheckoutFailed(self):
# Check that gclient can be executed from an arbitrary sub directory if the
# initial checkout has failed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
# Cripple the checkout.
os.remove(join(self.root_dir, '.gclient_entries'))
src = join(self.root_dir, 'src')
res = self.gclient(['sync', '--jobs', '1'], src)
self.checkBlock(res[0],
['running', 'running', 'running'])
def testUnversionedRepository(self):
# Check that gclient automatically deletes crippled SVN repositories.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
cmd = ['sync', '--jobs', '1', '--delete_unversioned_trees', '--reset']
self.assertEquals(0, self.gclient(cmd)[-1])
third_party = join(self.root_dir, 'src', 'third_party')
subprocess2.check_call(['svn', 'propset', '-q', 'svn:ignore', 'foo', '.'],
cwd=third_party)
# Cripple src/third_party/foo and make sure gclient still succeeds.
gclient_utils.rmtree(join(third_party, 'foo', '.svn'))
self.assertEquals(0, self.gclient(cmd)[-1])
class GClientSmokeSVNTransitive(GClientSmokeBase):
FAKE_REPOS_CLASS = FakeRepoTransitive
def setUp(self):
super(GClientSmokeSVNTransitive, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSyncTransitive(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
def test_case(parent, timestamp, fixed, output):
# We check out revision 'parent' and expect the following:
# - src/ is checked out at r'parent'
# - src/same_repo is checked out at r'parent' (due to --transitive)
# - src/same_repo_fixed is checked out at r'fixed'
# - src/different_repo is checked out at r'timestamp'
# (due to --transitive)
# - src/different_repo_fixed is checked out at r'fixed'
revisions = self.FAKE_REPOS.svn_revs
self.parseGclient(
['sync', '--transitive', '--revision', 'src@%d' % parent,
'--jobs', '1'], output)
self.assertTree({
'src/origin': revisions[parent]['trunk/src/origin'],
'src/DEPS': revisions[parent]['trunk/src/DEPS'],
'src/same_repo/origin': revisions[parent]['trunk/third_party/origin'],
'src/same_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
'src/different_repo/origin':
revisions[timestamp]['trunk/third_party/origin'],
'src/different_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
})
# Here are the test cases for checking out 'trunk/src' at r1, r2 and r3
# r1: Everything is normal
test_case(parent=1, timestamp=1, fixed=1,
output=['running', 'running', 'running', 'running', 'running'])
# r2: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one (which is r2 in this
# case).
test_case(parent=2, timestamp=2, fixed=1,
output=['running', 'running', 'running'])
# r3: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one. Since
# timestamp(r3) < timestamp(r2) svn will checkout r1.
# This happens often on http://googlecode.com but is unexpected to happen
# with svnserve (unless you manually change 'svn:date')
test_case(parent=3, timestamp=1, fixed=1,
output=['running', 'running', 'running'])
class GClientSmokeGIT(GClientSmokeBase):
def setUp(self):
super(GClientSmokeGIT, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testSync(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', ('running', self.root_dir + '/src'),
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running'])
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
diffdir = os.path.join(self.root_dir, 'src', 'repo2', 'repo_renamed')
self.parseGclient(
['sync', '--jobs', '1', '--revision',
'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees'],
['running', ('running', self.root_dir + '/src/repo2/repo3'),
'running', ('running', self.root_dir + '/src/repo4'),
('running', diffdir), 'deleting'])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
expect3 = ('running', os.path.join(self.root_dir, 'src', 'repo2', 'repo3'))
expect4 = ('running', os.path.join(self.root_dir, 'src', 'repo4'))
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running', expect3, expect4])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', 'invalid@' + self.githash('repo_1', 1)],
['running', ('running', self.root_dir + '/src'),
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running'],
'Please fix your script, having invalid --revision flags '
'will soon considered an error.\n')
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncNoSolutionName(self):
if not self.enabled:
return
# When no solution name is provided, gclient uses the first solution listed.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', self.githash('repo_1', 1)],
['running', ('running', self.root_dir + '/src'),
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo3'),
'running', ('running', self.root_dir + '/src/repo4')])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', ('running', self.root_dir + '/src'),
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running'],
untangle=True)
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
expect3 = ('running',
os.path.join(self.root_dir, 'src', 'repo2', 'repo_renamed'))
# Use --jobs 1 otherwise the order is not deterministic.
self.parseGclient(
['sync', '--revision', 'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees', '--jobs', '1'],
[
'running',
('running', self.root_dir + '/src/repo2/repo3'),
'running',
('running', self.root_dir + '/src/repo4'),
expect3,
'deleting',
],
untangle=True)
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
expect4 = os.path.join(self.root_dir, 'src', 'repo2', 'repo3')
expect5 = os.path.join(self.root_dir, 'src', 'repo4')
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running', ('running', expect4), ('running', expect5)],
untangle=True)
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'repo2', 'hi'), 'Hey!')
expected1 = ('running', os.path.join(self.root_dir, 'src'))
expected2 = ('running', os.path.join(expected1[1], 'repo2'))
expected3 = ('running', os.path.join(expected2[1], 'repo_renamed'))
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[expected1, expected2, expected3])
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(3, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching. For each expected path, 'git reset' and 'git clean' are run, so
# there should be two results for each. The last two results should reflect
# writing git_hooked1 and git_hooked2.
expected4 = ('running', self.root_dir)
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
[expected1, expected1,
expected2, expected2,
expected3, expected3,
expected4, expected4])
self.assertEquals(8, len(out))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
results = self.gclient(['status', '--deps', 'mac', '--jobs', '1'])
out = results[0].splitlines(False)
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(6, len(out))
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
os.remove(join(self.root_dir, 'src', 'git_hooked2'))
# runhooks runs all hooks even if not matching by design.
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.assertEquals(1, len(out[0]))
self.assertEquals(1, len(out[1]))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testPreDepsHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectation = [
('running', self.root_dir), # git clone repo_5
('running', self.root_dir + '/src'), # git checkout src
('running', self.root_dir), # pre-deps hook
('running', self.root_dir), # git clone repo_1
('running', self.root_dir + '/src/repo1'), # git checkout repo1
('running', self.root_dir), # git clone repo_1
('running', self.root_dir + '/src/repo2'), # git checkout repo2
]
out = self.parseGclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision', 'src@' + self.githash('repo_5', 2)],
expectation)
self.assertEquals(2, len(out[2]))
self.assertEquals('pre-deps hook', out[2][1])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with runhooks.
self.gclient(['runhooks', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
# Pre-DEPS hooks run when syncing with --nohooks.
self.gclient(['sync', '--deps', 'mac', '--nohooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with --noprehooks
self.gclient(['sync', '--deps', 'mac', '--noprehooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
def testPreDepsHooksError(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectated_stdout = [
('running', self.root_dir), # git clone repo_5
('running', self.root_dir + '/src'), # git checkout src
('running', self.root_dir), # pre-deps hook
('running', self.root_dir), # pre-deps hook (fails)
]
expected_stderr = ('Error: Command /usr/bin/python -c import sys; '
'sys.exit(1) returned non-zero exit status 1 in %s\n'
% self.root_dir)
stdout, stderr, retcode = self.gclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision',
'src@' + self.githash('repo_5', 3)])
self.assertEquals(stderr, expected_stderr)
self.assertEquals(2, retcode)
self.checkBlock(stdout, expectated_stdout)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)srepo_1\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3\n' %
{
'base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
})
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)srepo_1@%(hash1)s\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3@%(hash3)s\n' %
{
'base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
})
self.check((out, '', 0), results)
class GClientSmokeBoth(GClientSmokeBase):
def setUp(self):
super(GClientSmokeBoth, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn() and self.FAKE_REPOS.set_up_git()
def testMultiSolutions(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running',
'running', ('running', self.root_dir + '/src-git'),
'running',
# This is due to the way svn update is called for a single
# file when File() is used in a DEPS file.
('running', self.root_dir + '/src/file/other'),
'running', 'running', 'running',
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo_renamed'),
'running', 'running', 'running'])
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsJobs(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
# There is no guarantee that the ordering will be consistent.
(stdout, stderr, returncode) = self.gclient(
['sync', '--deps', 'mac', '--jobs', '8'])
stdout = self.untangle(stdout)
self.checkString('', stderr)
self.assertEquals(0, returncode)
results = self.splitBlock(stdout)
self.assertEquals(15, len(results))
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsMultiRev(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1', '--revision', '1',
'-r', 'src-git@' + self.githash('repo_1', 1)],
['running',
'running', ('running', self.root_dir + '/src-git'),
'running', 'running', 'running',
'running', ('running', self.root_dir + '/src/repo2'),
'running', ('running', self.root_dir + '/src/repo2/repo3'),
'running', ('running', self.root_dir + '/src/repo4')],
expected_stderr=
'You must specify the full solution name like --revision src@1\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: src-git.\n')
tree = self.mangle_git_tree(('repo_1@1', 'src-git'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree.update(self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout')))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(svn_base)s/src/\n'
'src-git: %(git_base)srepo_1\n'
'src/file/other: File("%(svn_base)s/other/DEPS")\n'
'src/other: %(svn_base)s/other\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
}
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(svn_base)s/src/@2\n'
'src-git: %(git_base)srepo_1@%(hash1)s\n'
'src/file/other: %(svn_base)s/other/DEPS@2\n'
'src/other: %(svn_base)s/other@2\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3@%(hash3)s\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
}
self.check((out, '', 0), results)
def testRecurse(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['recurse', '-j1', 'sh', '-c',
'echo $GCLIENT_SCM,$GCLIENT_URL,`pwd`'])
entries = [tuple(line.split(','))
for line in results[0].strip().split('\n')]
logging.debug(entries)
bases = {'svn': self.svn_base, 'git': self.git_base}
expected_source = [
('svn', 'trunk/src/', 'src'),
('git', 'repo_1', 'src-git'),
('svn', 'trunk/other', 'src/other'),
('git', 'repo_2@' + self.githash('repo_2', 1)[:7], 'src/repo2'),
('git', 'repo_3', 'src/repo2/repo_renamed'),
('svn', 'trunk/third_party/foo@1', 'src/third_party/foo'),
]
expected = [(scm, bases[scm] + url, os.path.join(self.root_dir, path))
for (scm, url, path) in expected_source]
self.assertEquals(sorted(entries), sorted(expected))
class GClientSmokeFromCheckout(GClientSmokeBase):
# WebKit abuses this. It has a .gclient and a DEPS from a checkout.
def setUp(self):
super(GClientSmokeFromCheckout, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
os.rmdir(self.root_dir)
if self.enabled:
usr, pwd = self.FAKE_REPOS.USERS[0]
subprocess2.check_call(
['svn', 'checkout', self.svn_base + '/trunk/webkit',
self.root_dir, '-q',
'--non-interactive', '--no-auth-cache',
'--username', usr, '--password', pwd])
def testSync(self):
if not self.enabled:
return
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'], [])
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(2, len(out))
self.checkString(2, len(out[0]))
self.checkString(2, len(out[1]))
self.checkString('foo', out[1][1])
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac'], [])
def testRunHooks(self):
if not self.enabled:
return
# Hooks aren't really tested for now since there is no hook defined.
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], ['running'])
self.assertEquals(1, len(out))
self.assertEquals(2, len(out[0]))
self.assertEquals(3, len(out[0][0]))
self.checkString('foo', out[0][1])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
expected = (
'./: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
'', 0)
self.check(expected, results)
# TODO(maruel): To be added after the refactor.
#results = self.gclient(['revinfo', '--snapshot'])
#expected = (
# './: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
# '', 0)
#self.check(expected, results)
def testRest(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect, it should run on ./ too.
self.parseGclient(
['cleanup', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
self.parseGclient(
['diff', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
if __name__ == '__main__':
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
if '-c' in sys.argv:
COVERAGE = True
sys.argv.remove('-c')
if os.path.exists('.coverage'):
os.remove('.coverage')
os.environ['COVERAGE_FILE'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.coverage')
unittest.main()
| |
"""
The code for generating the site.
"""
import os
from random import randrange
from twisted.logger import Logger
from twisted.web import resource, static
from twisted.web.util import redirectTo
from jinja2 import Environment, FileSystemLoader, select_autoescape
from .resources import Assets, render_file
from .session import get_session_state
log = Logger("etg.site")
COLORS = ["rgb(255, 255, 0)", # yellow
"rgb(0, 102, 255)", # blue
"rgb(255, 102, 0)", # orange
"rgb(0, 204, 0)", # green
"rgb(102, 255, 255)", # light blue
"rgb(255, 0, 0)", # red
"rgb(255, 102, 255)", # pink
"rgb(153, 255, 153)", # light green
"rgb(0, 186, 179)"] # Renes Moeder
# pylint: disable=too-many-instance-attributes
class ETGSite(resource.Resource):
"""
The class resposible for grabbing the correct pages from a resource.
"""
def __init__(self, options, service):
super(ETGSite, self).__init__()
self.service = service
self.assets = Assets(path=options['assets'])
self.styles_dir = options['styles']
self.javascript_dir = options['javascript']
self.html_templates = options['templates']
self.image_path = options['images']
self.env = Environment(
loader=FileSystemLoader(self.html_templates),
autoescape=select_autoescape(['html', 'xml']))
self.html_info = options['html_info']
self._generate_assets()
self.putChild(b'', self)
def _generate_assets(self):
"""
A method for generating all the assets for the website. Used when creating the resource.
"""
self.putChild(b'styles', self.assets.get_styles_resource(self.styles_dir))
self.putChild(b'js', self.assets.get_javascript_resource(self.javascript_dir))
self.pages = self.assets.get_static_html(self.env, self.html_info)
self.putChild(b'images', static.File(self.image_path))
self.putChild(b'party.html', PartyResource(self.service, self.env))
self.putChild(b'create_party.html', PartyCreationResource(self.service, self.env))
self.putChild(b'company.html', CompanyResource(self.service, self.env))
self.putChild(b'create_company.html', CompanyCreationResource(self.service, self.env))
self.putChild(b'admin.html', AdminResource(self.service, self.env))
with open(os.path.join(self.html_info, 'index.html'), 'r') as html_content:
content = render_file(html_content, self.env, 'index')
self.content = content.encode('utf-8')
def render_GET(self, _):
"""
Render the index page without keeping up the server.
"""
return self.content
def getChild(self, path, request):
if path[-5:] == b".html":
return self.pages.getChild(path, request)
else:
return resource.Resource.getChild(self, path, request)
class PartyCreationResource(resource.Resource):
"""
This class is responsible for creating the Parties from the webinterface and then redirecting
the players to the party interface.
"""
isLeaf = True
def __init__(self, service, env, template_name="create_party"):
super().__init__()
self.service = service
self.template = env.get_template(template_name + ".html")
def render_GET(self, request):
"""
Render a page to show if we do not yet have all the necessary information for this party.
"""
state = get_session_state(request)
try:
state.taxes
except AttributeError:
energy_types = []
for etype in self.service.simulation.energy_types:
energy_types.append({'name': etype.name, 'taxes': randrange(-20, 20)})
state.taxes = energy_types
return self.template.render(title="Party Creation", energy_types=state.taxes) \
.encode('utf-8')
class PartyResource(resource.Resource):
"""
The class that builds the party interface.
"""
isLeaf = True
def __init__(self, service, env, template_name="party"):
super().__init__()
self.env = env
self.service = service
self.template = env.get_template(template_name + ".html")
def render_POST(self, request):
"""
Render the page after a post request, so we got a new name for the party.
"""
state = get_session_state(request)
try:
state.taxes
except AttributeError:
return redirectTo(b"/create_party.html", request)
name = request.args[b'partyname'][0].decode('utf-8')
parties = list(filter(lambda p: p.name == name, self.service.simulation.parties))
companies = list(filter(lambda p: p.name == name, self.service.simulation.companies))
if len(parties) != 0 or len(companies) != 0:
log.warn("Player tried to name party {name}, but this entity already exists!",
name=name)
return redirectTo(b"/create_party.html", request)
state.name = name
color_num = randrange(0, len(COLORS))
color = COLORS[color_num]
del COLORS[color_num]
with self.service.simulation as sim:
sim.add_party(state.name, state.taxes, color)
return self.render_interface(request)
def render_GET(self, request):
"""
Render the page for a party based on the session information from the request.
"""
state = get_session_state(request)
if state.name == '':
return redirectTo(b"/create_party.html", request)
else:
return self.render_interface(request)
def render_interface(self, request):
"""
Render the party interface.
"""
state = get_session_state(request)
return self.template.render(title=state.name,
name=state.name,
starttab='Inputs',
energy_types=self.service.simulation.energy_types) \
.encode('utf-8')
class CompanyCreationResource(resource.Resource):
"""
This class is responsible for giving the players a way to create a new company.
"""
isLeaf = True
def __init__(self, service, env, template_name="create_company"):
super().__init__()
self.service = service
self.template = env.get_template(template_name + ".html")
def render_GET(self, request):
"""
Generates a random company and then presents it to the player.
"""
state = get_session_state(request)
types = self.service.simulation.energy_types
try:
state.tiers
except AttributeError:
tiers = []
if randrange(5) == 0:
idx = randrange(len(types))
tiers.append({'name': types[idx].name, 'tier': 2})
else:
idx = nidx = randrange(len(types))
tiers.append({'name': types[idx].name, 'tier': 1})
while idx == nidx:
nidx = randrange(len(types))
tiers.append({'name': types[nidx].name, 'tier':1})
state.tiers = tiers
string = ' and '.join("a {} plant".format(tier['name']) for tier in state.tiers)
return self.template.render(title="Company Creation",
plants=string, tier=state.tiers[0]['tier']).encode('utf-8')
class CompanyResource(resource.Resource):
"""
The class that builds the company interface.
"""
isLeaf = True
def __init__(self, service, env, template_name="company"):
super().__init__()
self.service = service
self.template = env.get_template(template_name + ".html")
def render_POST(self, request):
"""
Render the page after a post request, so we got a new name for the company.
"""
state = get_session_state(request)
try:
state.tiers
except AttributeError:
return redirectTo(b"/create_company.html", request)
name = request.args[b'companyname'][0].decode('utf-8')
parties = list(filter(lambda p: p.name == name, self.service.simulation.parties))
companies = list(filter(lambda p: p.name == name, self.service.simulation.companies))
if len(parties) != 0 or len(companies) != 0:
log.warn("Player tried to name company {name}, but this entity already exists!",
name=name)
return redirectTo(b"/create_company.html", request)
state.name = name
color_num = randrange(0, len(COLORS))
color = COLORS[color_num]
del COLORS[color_num]
with self.service.simulation as sim:
sim.add_company(state.name, state.tiers, color)
return self.render_interface(request)
def render_GET(self, request):
"""
Render the page for a party based on the session information from the request.
"""
state = get_session_state(request)
if state.name == '':
return redirectTo(b"/create_company.html", request)
else:
return self.render_interface(request)
def render_interface(self, request):
"""
Render the party interface.
"""
state = get_session_state(request)
return self.template.render(title=state.name,
name=state.name,
starttab='Owned',
parties=self.service.simulation.parties,
energy_types=self.service.simulation.energy_types) \
.encode('utf-8')
class AdminResource(resource.Resource):
"""
A resource to render the Admin panel with start/stop buttons.
"""
isLeaf = True
def __init__(self, service, env, template_name="admin"):
super().__init__()
self.service = service
self.template = env.get_template(template_name + ".html")
def render_GET(self, _):
"""
Render the actual Admin Interface.
"""
return self.template.render(name="admin", starttab="Chat").encode('utf-8')
| |
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import json
import cgi
from django.contrib.auth.models import User, Group
from django.contrib.gis.db import models
from django.contrib.gis.geos import MultiPolygon, Polygon
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from managers import AOIManager
from jsonfield import JSONField
from collections import defaultdict
from django.db.models import Q
from geoq.training.models import Training
from geoq.core.utils import clean_dumps
TRUE_FALSE = [(0, 'False'), (1, 'True')]
STATUS_VALUES_LIST = ['Unassigned', 'Assigned', 'In work', 'Awaiting review', 'In review', 'Completed']
class AssigneeType:
USER, GROUP = range(1, 3)
class Setting(models.Model):
"""
Model for site-wide settings.
"""
name = models.CharField(max_length=200, help_text="Name of site-wide variable")
value = JSONField(null=True, blank=True,
help_text="Value of site-wide variable that scripts can reference - must be valid JSON")
def __unicode__(self):
return self.name
class Assignment(models.Model):
"""
A generic relation to either a user or group
"""
assignee_type = models.ForeignKey(ContentType, null=True)
assignee_id = models.PositiveIntegerField(null=True)
content_object = generic.GenericForeignKey('assignee_type', 'assignee_id')
class Meta:
abstract = True
class GeoQBase(models.Model):
"""
A generic model for GeoQ objects.
"""
active = models.BooleanField(default=True, help_text="Check to make project 'Active' and visible to all users. Uncheck this to 'Archive' the project")
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200)
description = models.TextField()
updated_at = models.DateTimeField(auto_now=True)
properties = JSONField(null=True, blank=True,
help_text='JSON key/value pairs associated with this object, e.g. {"usng":"18 S TJ 87308 14549", "favorite":"true"}')
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ('active', '-created_at',)
class Project(GeoQBase):
"""
Top-level organizational object.
"""
PROJECT_TYPES = [
("Hurricane/Cyclone", "Hurricane/Cyclone"),
("Tornado", "Tornado"),
("Earthquake", "Earthquake"),
("Extreme Weather", "Extreme Weather"),
("Fire", "Fire"),
("Flood", "Flood"),
("Tsunami", "Tsunami"),
("Volcano", "Volcano"),
("Pandemic", "Pandemic"),
("Exercise", "Exercise"),
("Special Event", "Special Event"),
("Training", "Training"),
]
project_type = models.CharField(max_length=50, choices=PROJECT_TYPES)
private = models.BooleanField(default=False, help_text="Check this to make this project 'Private' and available only to users assigned to it.")
project_admins = models.ManyToManyField(
User, blank=True, null=True,
related_name="project_admins", help_text='User that has admin rights to project.')
contributors = models.ManyToManyField(
User, blank=True, null=True,
related_name="contributors", help_text='User that will be able to take on jobs.')
class Meta:
permissions = (
('open_project', 'Open Project'), ('close_project', 'Close Project'),
('archive_project', 'Archive Project'),
)
ordering = ('-created_at',)
@property
def jobs(self):
return Job.objects.filter(project=self)
@property
def job_count(self):
return self.jobs.count()
@property
def user_count(self):
return User.objects.filter(analysts__project__id=self.id).distinct().count()
@property
def aois(self):
return AOI.objects.filter(job__project__id=self.id)
@property
def aoi_count(self):
return self.aois.count()
@property
def aois_envelope(self):
return MultiPolygon([n.aois_envelope() for n in self.jobs if n.aois.count()])
@property
def aois_envelope_by_job(self):
jobs = []
for job in self.jobs:
if job.aois.count():
job_envelope = job.aois_envelope()
envelope_string = job_envelope.json
if envelope_string:
job_poly = json.loads(envelope_string)
job_poly['properties'] = {"job_id": str(job.id), "link": str(job.get_absolute_url()),
"name": str(job.name)}
jobs.append(job_poly)
return clean_dumps(jobs, ensure_ascii=True)
def get_absolute_url(self):
return reverse('project-detail', args=[self.id])
def get_update_url(self):
return reverse('project-update', args=[self.id])
class Job(GeoQBase, Assignment):
"""
Mid-level organizational object.
"""
GRID_SERVICE_VALUES = ['usng', 'mgrs']
GRID_SERVICE_CHOICES = [(choice, choice) for choice in GRID_SERVICE_VALUES]
EDITORS = ['geoq','osm']
EDITOR_CHOICES = [(choice, choice) for choice in EDITORS]
analysts = models.ManyToManyField(User, blank=True, null=True, related_name="analysts")
teams = models.ManyToManyField(Group, blank=True, null=True, related_name="teams")
reviewers = models.ManyToManyField(User, blank=True, null=True, related_name="reviewers")
progress = models.SmallIntegerField(max_length=2, blank=True, null=True)
project = models.ForeignKey(Project, related_name="project")
grid = models.CharField(max_length=5, choices=GRID_SERVICE_CHOICES, default=GRID_SERVICE_VALUES[0],
help_text='Select usng for Jobs inside the US, otherwise use mgrs')
tags = models.CharField(max_length=50, blank=True, null=True, help_text='Useful tags to search social media with')
editor = models.CharField(max_length=20, help_text='Editor to be used for creating features', choices=EDITOR_CHOICES, default=EDITOR_CHOICES[0])
editable_layer = models.ForeignKey( 'maps.EditableMapLayer', blank=True, null=True)
map = models.ForeignKey('maps.Map', blank=True, null=True)
feature_types = models.ManyToManyField('maps.FeatureType', blank=True, null=True)
required_courses = models.ManyToManyField(Training, blank=True, null=True, help_text="Courses that must be passed to open these cells")
class Meta:
permissions = (
)
ordering = ('-created_at',)
def get_absolute_url(self):
return reverse('job-detail', args=[self.id])
def get_update_url(self):
return reverse('job-update', args=[self.id])
def aois_geometry(self):
return self.aois.all().collect()
def aois_envelope(self):
"""
Returns the envelope of related AOIs geometry.
"""
return getattr(self.aois.all().collect(), 'envelope', None)
def aoi_count(self):
return self.aois.count()
@property
def aoi_counts_html(self):
count = defaultdict(int)
for cell in AOI.objects.filter(job__id=self.id):
count[cell.status] += 1
return str(', '.join("%s: <b>%r</b>" % (key, val) for (key, val) in count.iteritems()))
@property
def user_count(self):
return self.analysts.count()
@property
def base_layer(self):
if self.map is not None and self.map.layers is not None:
layers = sorted([l for l in self.map.layers if l.is_base_layer], key = lambda x: x.stack_order)
if len(layers) > 0:
layer = layers[0].layer
return [layer.name, layer.url, layer.attribution]
else:
return []
else:
return []
def features_table_html(self):
counts = {}
for feature_item in self.feature_set.all():
status = str(feature_item.status)
featuretype = str(feature_item.template.name)
if not featuretype in counts:
counts[featuretype] = {}
if not status in counts[featuretype]:
counts[featuretype][status] = 0
counts[featuretype][status] += 1
#TODO: Also return this as JSON
if len(counts):
output = "<table class='job_feature_list'>"
header = "<th><i>Feature Counts</i></th>"
for (featuretype, status_obj) in counts.iteritems():
header = header + "<th><b>" + cgi.escape(featuretype) + "</b></th>"
output += "<tr>" + header + "</tr>"
for status in STATUS_VALUES_LIST:
status = str(status)
row = "<td><b>" + status + "</b></td>"
for (featuretype, status_obj) in counts.iteritems():
if status in status_obj:
val = status_obj[status]
else:
val = 0
row += "<td>" + cgi.escape(str(val)) + "</td>"
output += "<tr>" + row + "</tr>"
output += "</table>"
else:
output = ""
return output
def complete(self):
"""
Returns the completed AOIs.
"""
return self.aois.filter(status='Completed')
def in_work(self):
"""
Returns the AOIs currently being worked on or in review
"""
return self.aois.filter(Q(status='In work') | Q(status='Awaiting review') | Q(status='In review'))
def in_work_count(self):
return self.in_work().count()
def complete_count(self):
return self.complete().count()
def complete_percent(self):
if self.aois.count() > 0:
return round(float(self.complete().count() * 100) / self.aois.count(), 2)
return 0.0
def total_count(self):
return self.aois.count()
def geoJSON(self, as_json=True):
"""
Returns geoJSON of the feature.
"""
geojson = SortedDict()
geojson["type"] = "FeatureCollection"
geojson["features"] = [json.loads(aoi.geoJSON()) for aoi in self.aois.all()]
return clean_dumps(geojson) if as_json else geojson
def features_geoJSON(self, as_json=True, using_style_template=True):
geojson = SortedDict()
geojson["type"] = "FeatureCollection"
geojson["properties"] = dict(id=self.id)
geojson["features"] = [n.geoJSON(as_json=False, using_style_template=using_style_template) for n in self.feature_set.all()]
return clean_dumps(geojson, indent=2) if as_json else geojson
def grid_geoJSON(self, as_json=True):
"""
Return geoJSON of grid for export
"""
geojson = SortedDict()
geojson["type"] = "FeatureCollection"
geojson["features"] = [json.loads(aoi.grid_geoJSON()) for aoi in self.aois.all()]
return clean_dumps(geojson) if as_json else geojson
def base_layer_object(self):
"""
create base layer object that can override leaflet base OSM map
"""
obj = {}
if len(self.base_layer) > 0:
obj["layers"] = [self.base_layer]
return obj
class AOI(GeoQBase, Assignment):
"""
Low-level organizational object. Now (6/1/14) referred to as a 'Workcell'
"""
STATUS_VALUES = STATUS_VALUES_LIST
STATUS_CHOICES = [(choice, choice) for choice in STATUS_VALUES]
PRIORITIES = [(n, n) for n in range(1, 6)]
analyst = models.ForeignKey(User, blank=True, null=True, help_text="User assigned to work the workcell.")
job = models.ForeignKey(Job, related_name="aois")
reviewers = models.ManyToManyField(User, blank=True, null=True, related_name="aoi_reviewers",
help_text='Users that actually reviewed this work.')
objects = AOIManager()
polygon = models.MultiPolygonField()
priority = models.SmallIntegerField(choices=PRIORITIES, max_length=1, default=5)
status = models.CharField(max_length=15, choices=STATUS_CHOICES, default='Unassigned')
class Meta:
verbose_name = 'Area of Interest'
verbose_name_plural = 'Areas of Interest'
permissions = (
('assign_workcells', 'Assign Workcells'), ('certify_workcells', 'Certify Workcells'),
)
def __unicode__(self):
aoi_obj = '%s - AOI %s' % (self.name, self.id)
return aoi_obj
@property
def log(self):
return Comment.objects.filter(aoi=self).order_by('created_at')
@property
def assignee_name(self):
if self.assignee_id is None:
return 'Unknown'
else:
if self.assignee_type_id == AssigneeType.USER:
return User.objects.get(id=self.assignee_id).username
else:
return Group.objects.get(id=self.assignee_id).name
#def save(self):
# if analyst or reviewer updated, then create policy to give them permission to edit this object.....
# -- Afterwards -- check how this will work with the views.
def get_absolute_url(self):
if self.job.editable_layer_id is None:
return reverse('aoi-work', args=[self.id])
else:
return reverse('aoi-mapedit', args=[self.id])
def geoJSON(self):
"""
Returns geoJSON of the feature.
"""
if self.id is None:
self.id = 1
geojson = SortedDict()
geojson["type"] = "Feature"
geojson["properties"] = dict(
id=self.id,
status=self.status,
analyst=(self.analyst.username if self.analyst is not None else 'None'),
assignee=self.assignee_name,
priority=self.priority,
delete_url=reverse('aoi-deleter', args=[self.id]))
geojson["geometry"] = json.loads(self.polygon.json)
geojson["properties"]["absolute_url"] = self.get_absolute_url()
return clean_dumps(geojson)
def logJSON(self):
return [ob.to_dict() for ob in self.log]
def properties_json(self):
"""
Returns json of the feature properties.
"""
if self.id is None:
self.id = 1
properties_main = self.properties or {}
properties_built = dict(
status=self.status,
analyst=(self.analyst.username if self.analyst is not None else 'Unassigned'),
priority=self.priority)
prop_json = dict(properties_built.items() + properties_main.items())
return clean_dumps(prop_json)
def map_detail(self):
"""
Get map coordinates for MapEdit
"""
center = self.polygon.centroid
return "15/%f/%f" % (center.y, center.x)
def grid_geoJSON(self):
"""
Return geoJSON of workcells for export
"""
if self.id is None:
self.id = 1
geojson = SortedDict()
geojson["type"] = "Feature"
geojson["properties"] = dict(
id=self.id,
priority=self.priority,
status=self.status)
geojson["geometry"] = json.loads(self.polygon.json)
return clean_dumps(geojson)
def user_can_complete(self, user):
"""
Returns whether the user can update the AOI as complete.
"""
return user == self.analyst or user in self.job.reviewers.all()
class Comment(models.Model):
"""
Track comments regarding work on a Workcell
"""
user = models.ForeignKey(User, blank=True, null=True, help_text="User who made comment")
aoi = models.ForeignKey(AOI, blank=False, null=False, help_text="Associated AOI for comment")
text = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
comment_obj = '%s Comment on %s' % (self.user, self.aoi.id)
return comment_obj
def to_dict(self):
format = "%D %H:%M:%S"
if self.user:
username = self.user.username
else:
username = "Anonymous or Removed User"
o = {'user': username, 'timestamp': self.created_at.strftime(format), 'text': self.text}
return o
class Organization(models.Model):
"""
Organizations and Agencies that we work with.
"""
name = models.CharField(max_length=200, unique=True, help_text="Short name of this organization")
url = models.CharField(max_length=600, blank=True, null=True, help_text="Link that users should be directed to if icon is clicked")
icon = models.ImageField(upload_to="static/organizations/", blank=True, null=True, help_text="Upload an icon of the organization here")
show_on_front = models.BooleanField(default=False, help_text="Show on the front of the GeoQ App")
order = models.IntegerField(default=0, null=True, blank=True, help_text='Optionally specify the order orgs should appear on the front page. Lower numbers appear sooner.')
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Organizations'
ordering = ['order', 'name']
| |
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple, Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_DEFAULT_SELECTOR = None
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Determine which function to use to wrap system calls because Python 3.5+
# already handles the case when system calls are interrupted.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all system calls automatically restart
and recalculate their timeouts. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as e: # Platform-specific: Windows.
if e.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
if not hasattr(select, 'select'): # Platform-specific: AppEngine
HAS_SELECT = False
def _can_allocate(struct):
""" Checks that select structs can be allocated by the underlying
operating system, not just advertised by the select module. We don't
check select() because we'll be hopeful that most platforms that
don't have it available will not advertise it. (ie: GAE) """
try:
# select.poll() objects won't fail until used.
if struct == 'poll':
p = select.poll()
p.poll(0)
# All others will fail on allocation.
else:
getattr(select, struct)().close()
return True
except (OSError, AttributeError) as e:
return False
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
def DefaultSelector():
""" This function serves as a first call for DefaultSelector to
detect if the select module is being monkey-patched incorrectly
by eventlet, greenlet, and preserve proper behavior. """
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None:
if _can_allocate('kqueue'):
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate('epoll'):
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate('poll'):
_DEFAULT_SELECTOR = PollSelector
elif hasattr(select, 'select'):
_DEFAULT_SELECTOR = SelectSelector
else: # Platform-specific: AppEngine
raise ValueError('Platform does not have a selector')
return _DEFAULT_SELECTOR()
| |
from rpy2.robjects.packages import importr
import os
import numpy as np
from scipy.stats import norm as ndist
from selection.covtest import reduced_covtest, covtest
from selection.affine import constraints, gibbs_test
from selection.forward_step import forward_stepwise
n, p, sigma = 50, 80, 1.0
from multi_forward_step import forward_step
def sample_split(X, Y, sigma=None,
nstep=10,
burnin=1000,
ndraw=5000,
reduced=True):
n, p = X.shape
half_n = int(n/2)
X1, Y1 = X[:half_n,:]*1., Y[:half_n]*1.
X1 -= X1.mean(0)[None,:]
Y1 -= Y1.mean()
X2, Y2 = X[half_n:], Y[half_n:]
X2 -= X2.mean(0)[None,:]
Y2 -= Y2.mean()
FS_half = forward_stepwise(X1, Y1) # sample splitting model
FS_full = forward_stepwise(X.copy(), Y.copy()) # full data model
spacings_P = []
split_P = []
reduced_Pknown = []
reduced_Punknown = []
covtest_P = []
for i in range(nstep):
FS_half.next()
if FS_half.P[i] is not None:
RX = FS_half.X - FS_half.P[i](FS_half.X)
RY = FS_half.Y - FS_half.P[i](FS_half.Y)
covariance = centering(FS_half.Y.shape[0]) - np.dot(FS_half.P[i].U, FS_half.P[i].U.T)
else:
RX = FS_half.X
RY = FS_half.Y
covariance = centering(FS_half.Y.shape[0])
RX -= RX.mean(0)[None,:]
RX /= (RX.std(0)[None,:] * np.sqrt(RX.shape[0]))
# covtest on half -- not saved
con, pval, idx, sign = covtest(RX, RY, sigma=sigma,
covariance=covariance,
exact=True)
# spacings on half -- not saved
eta1 = RX[:,idx] * sign
Acon = constraints(FS_half.A, np.zeros(FS_half.A.shape[0]),
covariance=centering(FS_half.Y.shape[0]))
Acon.covariance *= sigma**2
Acon.pivot(eta1, FS_half.Y)
# sample split
eta2 = np.linalg.pinv(X2[:,FS_half.variables])[-1]
eta_sigma = np.linalg.norm(eta2) * sigma
split_P.append(2*ndist.sf(np.fabs((eta2*Y2).sum() / eta_sigma)))
# inference on full mu using split model, this \beta^+_s.
zero_block = np.zeros((Acon.linear_part.shape[0], (n-half_n)))
linear_part = np.hstack([Acon.linear_part, zero_block])
Fcon = constraints(linear_part, Acon.offset,
covariance=centering(n))
Fcon.covariance *= sigma**2
if i > 0:
U = np.linalg.pinv(X[:,FS_half.variables[:-1]])
Uy = np.dot(U, Y)
Fcon = Fcon.conditional(U, Uy)
else:
Fcon = Fcon
eta_full = np.linalg.pinv(X[:,FS_half.variables])[-1]
if reduced:
reduced_pval = gibbs_test(Fcon, Y, eta_full,
ndraw=ndraw,
burnin=burnin,
sigma_known=sigma is not None,
alternative='twosided')[0]
reduced_Pknown.append(reduced_pval)
reduced_pval = gibbs_test(Fcon, Y, eta_full,
ndraw=ndraw,
burnin=burnin,
sigma_known=False,
alternative='twosided')[0]
reduced_Punknown.append(reduced_pval)
# now use all the data
FS_full.next()
if FS_full.P[i] is not None:
RX = X - FS_full.P[i](X)
RY = Y - FS_full.P[i](Y)
covariance = centering(RY.shape[0]) - np.dot(FS_full.P[i].U, FS_full.P[i].U.T)
else:
RX = X
RY = Y.copy()
covariance = centering(RY.shape[0])
RX -= RX.mean(0)[None,:]
RX /= RX.std(0)[None,:]
con, pval, idx, sign = covtest(RX, RY, sigma=sigma,
covariance=covariance,
exact=False)
covtest_P.append(pval)
# spacings on full data
eta1 = RX[:,idx] * sign
Acon = constraints(FS_full.A, np.zeros(FS_full.A.shape[0]),
centering(RY.shape[0]))
Acon.covariance *= sigma**2
spacings_P.append(Acon.pivot(eta1, Y))
return split_P, reduced_Pknown, reduced_Punknown, spacings_P, covtest_P, FS_half.variables
def centering(n):
return np.identity(n) - np.ones((n,n)) / n
import rpy2.robjects as rpy
from rpy2.robjects.numpy2ri import numpy2ri
rpy.conversion.py2ri = numpy2ri
def instance():
rpy.r('''
n <- 200 # number of observations
p <- 100 # number of variables
k <- 5 # number of signal variables
sigma <- 1 # noise standard deviation
MAX_STEPS <- 25 # set to p in actual simulations
# Parameter vector
beta_factor <- 0.35 # signal factor (others are 0.25, 0.175)
scale_factor <- sqrt(2)*gamma((n+1)/2)/gamma(n/2)
beta <- beta_factor*scale_factor*c(seq(2,sqrt(2*log(p)), length.out=k), rep(0,p-k))
# Generate data
X <- matrix(rnorm(p*n), n, p) # iid design
# Standardize X
mx<-colMeans(X); sx<-sqrt(apply(X,2,var)); X<-scale(X,mx,sx)/sqrt(n-1)
y <- X%*%beta + sigma*rnorm(n)
''')
X = np.array(rpy.r('X'))
y = np.array(rpy.r('y')).reshape(-1)
k = np.array(rpy.r('k')).reshape(-1)
print X.shape, y.shape
return X, y, int(k[0])
def simulation(n, p, sigma, nsim=500, label=0,
reduced=True,
reduced_full=True): # nnz = number nonzero
splitP = []
covtestP = []
spacings = []
reduced_known = []
reduced_unknown = []
reduced_known_full = []
reduced_unknown_full = []
hypotheses = []
hypotheses_full = []
for i in range(nsim):
X, Y, nnz = instance()
Y -= Y.mean()
split = sample_split(X.copy(),
Y.copy(),
sigma=sigma,
burnin=1000,
ndraw=2000,
nstep=10,
reduced=reduced)
splitP.append(split[0])
reduced_known.append(split[1])
reduced_unknown.append(split[2])
spacings.append(split[3])
covtestP.append(split[4])
hypotheses.append([var in range(nnz) for var in split[5]])
if reduced_full:
fs = forward_step(X, Y,
sigma=sigma,
burnin=1000,
ndraw=2000,
nstep=10)
reduced_known_full.append(fs[1])
reduced_unknown_full.append(fs[2])
hypotheses_full.append([var in range(nnz) for var in fs[4]])
for D, name in zip([splitP, spacings, covtestP], ['split', 'spacings', 'covtest']):
means = map(lambda x: x[~np.isnan(x)].mean(), np.array(D).T)[:(nnz+3)]
SDs = map(lambda x: x[~np.isnan(x)].std(), np.array(D).T)[:(nnz+3)]
print means, SDs, name
if reduced:
print (np.mean(np.array(reduced_known)[:,:(nnz+3)],0),
np.std(np.array(reduced_known)[:,:(nnz+3)],0), 'reduced known split')
print (np.mean(np.array(reduced_unknown)[:,:(nnz+3)],0),
np.std(np.array(reduced_unknown)[:,:(nnz+3)],0), 'reduced unknown split'), i
if reduced_full:
print (np.mean(np.array(reduced_unknown_full)[:,:(nnz+3)],0),
np.std(np.array(reduced_unknown_full)[:,:(nnz+3)],0), 'reduced unknown full'), i
print (np.mean(np.array(reduced_known_full)[:,:(nnz+3)],0),
np.std(np.array(reduced_known_full)[:,:(nnz+3)],0), 'reduced known full'), i
if reduced:
np.save('reduced_split_known_alex%d.npy' % (label,), np.array(reduced_known))
np.save('reduced_split_unknown_alex%d.npy' % (label,), np.array(reduced_unknown))
np.save('split_alex%d.npy' % (label,), np.array(splitP))
np.save('spacings_split_alex%d.npy' % (label,), np.array(spacings))
np.save('covtest_split_alex%d.npy' % (label,), np.array(covtestP))
np.save('hypotheses_split__alex%d.npy' % (label,), np.array(hypotheses))
if reduced_full:
np.save('hypotheses_splitfull__alex%d.npy' % (label,), np.array(hypotheses_full))
np.save('reduced_splitfull_known_alex%d.npy' % (label,), np.array(reduced_known_full))
np.save('reduced_splitfull_unknown_alex%d.npy' % (label,), np.array(reduced_unknown_full))
#os.system('cp *split*npy ~/Dropbox/sample_split')
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
simulation(n, p, sigma, label=int(sys.argv[1]), reduced_full=True, reduced=False)
| |
import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertQuerysetEqual(Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m1], lambda x: x)
self.assertQuerysetEqual(Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m2], lambda x: x)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_foward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertQuerysetEqual(
NewsArticle.objects.select_related('active_translation'),
[na], lambda x: x
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
""" See: https://code.djangoproject.com/ticket/21566 """
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
class TestModelCheckTests(SimpleTestCase):
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
| |
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ['WebSocketServerProtocol',
'WebSocketServerFactory',
'WebSocketClientProtocol',
'WebSocketClientFactory',
'WrappingWebSocketServerFactory',
'WrappingWebSocketClientFactory',
'listenWS',
'connectWS']
from base64 import b64encode, b64decode
from zope.interface import implementer
import twisted.internet.protocol
from twisted.internet.defer import maybeDeferred
from twisted.python import log
from twisted.internet.interfaces import ITransport
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept, \
PerMessageDeflateResponse, \
PerMessageDeflateResponseAccept
class WebSocketAdapterProtocol(twisted.internet.protocol.Protocol):
"""
Adapter class for Twisted WebSocket client and server protocols.
"""
def connectionMade(self):
## the peer we are connected to
peer = self.transport.getPeer()
try:
self.peer = "%s:%d" % (peer.host, peer.port)
except:
## eg Unix Domain sockets don't have host/port
self.peer = str(peer)
self._connectionMade()
## Set "Nagle"
try:
self.transport.setTcpNoDelay(self.tcpNoDelay)
except:
## eg Unix Domain sockets throw Errno 22 on this
pass
def connectionLost(self, reason):
self._connectionLost(reason)
def dataReceived(self, data):
self._dataReceived(data)
def _closeConnection(self, abort = False):
if abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
def _onOpen(self):
self.onOpen()
def _onMessageBegin(self, isBinary):
self.onMessageBegin(isBinary)
def _onMessageFrameBegin(self, length):
self.onMessageFrameBegin(length)
def _onMessageFrameData(self, payload):
self.onMessageFrameData(payload)
def _onMessageFrameEnd(self):
self.onMessageFrameEnd()
def _onMessageFrame(self, payload):
self.onMessageFrame(payload)
def _onMessageEnd(self):
self.onMessageEnd()
def _onMessage(self, payload, isBinary):
self.onMessage(payload, isBinary)
def _onPing(self, payload):
self.onPing(payload)
def _onPong(self, payload):
self.onPong(payload)
def _onClose(self, wasClean, code, reason):
self.onClose(wasClean, code, reason)
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol):
"""
Base class for Twisted WebSocket server protocols.
"""
def _onConnect(self, request):
## onConnect() will return the selected subprotocol or None
## or a pair (protocol, headers) or raise an HttpException
##
res = maybeDeferred(self.onConnect, request)
res.addCallback(self.succeedHandshake)
def forwardError(failure):
if failure.check(http.HttpException):
return self.failHandshake(failure.value.reason, failure.value.code)
else:
if self.debug:
self.factory._log("Unexpected exception in onConnect ['%s']" % failure.value)
return self.failHandshake(http.INTERNAL_SERVER_ERROR[1], http.INTERNAL_SERVER_ERROR[0])
res.addErrback(forwardError)
class WebSocketClientProtocol(WebSocketAdapterProtocol, protocol.WebSocketClientProtocol):
"""
Base class for Twisted WebSocket client protocols.
"""
def _onConnect(self, response):
self.onConnect(response)
class WebSocketAdapterFactory:
"""
Adapter class for Twisted WebSocket client and server factories.
"""
def _log(self, msg):
log.msg(msg)
def _callLater(self, delay, fun):
return self.reactor.callLater(delay, fun)
class WebSocketServerFactory(WebSocketAdapterFactory, protocol.WebSocketServerFactory, twisted.internet.protocol.ServerFactory):
"""
Base class for Twisted WebSocket server factories.
.. seealso:: `twisted.internet.protocol.ServerFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ServerFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketServerFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
class WebSocketClientFactory(WebSocketAdapterFactory, protocol.WebSocketClientFactory, twisted.internet.protocol.ClientFactory):
"""
Base class for Twisted WebSocket client factories.
.. seealso:: `twisted.internet.protocol.ClientFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ClientFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketClientFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketClientFactory.__init__(self, *args, **kwargs)
@implementer(ITransport)
class WrappingWebSocketAdapter:
"""
An adapter for stream-based transport over WebSocket.
This follows "websockify" (https://github.com/kanaka/websockify)
and should be compatible with that.
It uses WebSocket subprotocol negotiation and 2 subprotocols:
- binary
- base64
Octets are either transmitted as the payload of WebSocket binary
messages when using the 'binary' subprotocol, or encoded with Base64
and then transmitted as the payload of WebSocket text messages when
using the 'base64' subprotocol.
"""
def onConnect(self, requestOrResponse):
## Negotiate either the 'binary' or the 'base64' WebSocket subprotocol
##
if isinstance(requestOrResponse, protocol.ConnectionRequest):
request = requestOrResponse
for p in request.protocols:
if p in ['binary', 'base64']:
self._binaryMode = (p == 'binary')
return p
raise http.HttpException(http.NOT_ACCEPTABLE[0], "this server only speaks 'binary' and 'base64' WebSocket subprotocols")
elif isinstance(requestOrResponse, protocol.ConnectionResponse):
response = requestOrResponse
if response.protocol not in ['binary', 'base64']:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, "this client only speaks 'binary' and 'base64' WebSocket subprotocols")
self._binaryMode = (response.protocol == 'binary')
else:
## should not arrive here
raise Exception("logic error")
def onOpen(self):
self._proto.connectionMade()
def onMessage(self, payload, isBinary):
if isBinary != self._binaryMode:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_UNSUPPORTED_DATA, "message payload type does not match the negotiated subprotocol")
else:
if not isBinary:
try:
payload = b64decode(payload)
except Exception as e:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, "message payload base64 decoding error: {}".format(e))
#print("forwarding payload: {}".format(binascii.hexlify(payload)))
self._proto.dataReceived(payload)
def onClose(self, wasClean, code, reason):
self._proto.connectionLost(None)
def write(self, data):
#print("sending payload: {}".format(binascii.hexlify(data)))
## part of ITransport
assert(type(data) == bytes)
if self._binaryMode:
self.sendMessage(data, isBinary = True)
else:
data = b64encode(data)
self.sendMessage(data, isBinary = False)
def writeSequence(self, data):
## part of ITransport
for d in data:
self.write(data)
def loseConnection(self):
## part of ITransport
self.sendClose()
class WrappingWebSocketServerProtocol(WrappingWebSocketAdapter, WebSocketServerProtocol):
"""
Server protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketClientProtocol(WrappingWebSocketAdapter, WebSocketClientProtocol):
"""
Client protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketServerFactory(WebSocketServerFactory):
"""
Wrapping server factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
debug = False):
"""
Constructor.
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of `twisted.internet.protocol.Factory`
:param url: WebSocket URL of the server this server factory will work for.
:type url: str
"""
self._factory = factory
WebSocketServerFactory.__init__(self,
url = url,
reactor = reactor,
protocols = ['binary', 'base64'],
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketServerProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def startFactory(self):
self._factory.startFactory()
WebSocketServerFactory.startFactory(self)
def stopFactory(self):
self._factory.stopFactory()
WebSocketServerFactory.stopFactory(self)
class WrappingWebSocketClientFactory(WebSocketClientFactory):
"""
Wrapping client factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
debug = False):
"""
Constructor.
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of `twisted.internet.protocol.Factory`
:param url: WebSocket URL of the server this client factory will connect to.
:type url: str
"""
self._factory = factory
WebSocketClientFactory.__init__(self,
url = url,
reactor = reactor,
protocols = ['binary', 'base64'],
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## The extensions offered to the server ..
offers = [PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers = offers)
## Function to accept responses from the server ..
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketClientProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets.
The general design philosophy of these dataset loaders is to keep them as simple
as possible. Data processing or manipulation of conditioning information should
be kept in an experiment's main.py, not here.
When data augmentation is enabled, nondeterministic behavior is expected.
"""
# pylint: disable=logging-format-interpolation
# pylint: disable=g-long-lambda
import functools
from typing import Any, Mapping, Optional, Tuple
from . import utils
from absl import logging
from clu import deterministic_data
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
def batch_dataset(dataset, batch_shape):
for b in reversed(batch_shape):
dataset = dataset.batch(b, drop_remainder=True)
return dataset
class Dataset:
"""Generic dataset."""
@property
def info(self):
raise NotImplementedError
@property
def data_shape(self):
return self.info['data_shape']
@property
def num_train(self):
return self.info['num_train']
@property
def num_eval(self):
return self.info['num_eval']
@property
def num_classes(self):
return self.info['num_classes']
def _load_tfds(self, *, split, shuffle_seed):
raise NotImplementedError
def _preprocess(self, x, *, split, augment):
"""Preprocess one example."""
raise NotImplementedError
def _shuffle_buffer_size(self, split):
del split
return 50000
def get_shuffled_repeated_dataset(self, *, batch_shape,
split, local_rng, augment):
"""Shuffled and repeated dataset suitable for training.
Shuffling is determined by local_rng, which should be different for
each shard.
Args:
batch_shape: leading shape of batches
split: which dataset split to load
local_rng: rng for shuffling (should be different for each host/shard)
augment: whether to enable data augmentation
Returns:
dataset
"""
local_rng = utils.RngGen(local_rng)
ds = self._load_tfds( # file-level shuffling here
split=split, shuffle_seed=utils.jax_randint(next(local_rng)))
ds = ds.shuffle(
self._shuffle_buffer_size(split),
seed=utils.jax_randint(next(local_rng)))
ds = ds.repeat()
ds = ds.map(
functools.partial(self._preprocess, split=split, augment=augment),
num_parallel_calls=tf.data.AUTOTUNE)
ds = batch_dataset(ds, batch_shape=batch_shape)
return ds.prefetch(tf.data.AUTOTUNE)
def get_padded_one_shot_dataset(self, *, batch_shape,
split, shard_id, num_shards):
"""Non-repeated non-shuffled sharded dataset with padding.
Should not drop any examples. Augmentation is disabled.
Args:
batch_shape: leading shape of batches
split: which dataset split to load
shard_id: current shard id (e.g. process_index)
num_shards: number of shards (e.g. process_count)
Returns:
dataset
"""
ds = self._load_tfds(split=split, shuffle_seed=None)
ds = ds.map(
functools.partial(self._preprocess, split=split, augment=False),
num_parallel_calls=tf.data.AUTOTUNE)
ds = deterministic_data.pad_dataset(
ds, batch_dims=(num_shards, *batch_shape),
cardinality={'train': self.num_train, 'eval': self.num_eval}[split])
ds = ds.shard(index=shard_id, num_shards=num_shards)
ds = batch_dataset(ds, batch_shape=batch_shape)
return ds.prefetch(tf.data.AUTOTUNE)
class CIFAR10(Dataset):
"""CIFAR10 dataset."""
def __init__(self, *, class_conditional, randflip):
self._class_conditional = class_conditional
self._randflip = randflip
self._info = {
'data_shape': (32, 32, 3),
'num_train': 50000,
'num_eval': 10000,
'num_classes': 10 if self._class_conditional else 1
}
@property
def info(self):
return self._info
def _load_tfds(self, *, split, shuffle_seed):
return tfds.load(
'cifar10',
split={'train': 'train', 'eval': 'test'}[split],
shuffle_files=shuffle_seed is not None,
read_config=None if shuffle_seed is None else tfds.ReadConfig(
shuffle_seed=shuffle_seed))
def _preprocess(self, x, *, split, augment):
del split
img = tf.cast(x['image'], tf.float32)
if augment: # NOTE: this makes training nondeterministic
if self._randflip:
aug_img = tf.image.flip_left_right(img)
aug = tf.random.uniform(shape=[]) > 0.5
img = tf.where(aug, aug_img, img)
out = {'image': img}
if self._class_conditional:
out['label'] = tf.cast(x['label'], tf.int32)
return out
def central_square_crop(img):
"""Crop to square along the long edge."""
h, w, _ = tf.unstack(tf.shape(img))
box = tf.where(h > w, [h // 2 - w // 2, 0, w, w], [0, w // 2 - h // 2, h, h])
offset_height, offset_width, target_height, target_width = tf.unstack(box)
return tf.image.crop_to_bounding_box(
img, offset_height, offset_width, target_height, target_width)
def decode_and_central_square_crop(img):
"""Crop to square along the long edge."""
h, w, _ = tf.unstack(tf.io.extract_jpeg_shape(img))
box = tf.where(h > w, [h // 2 - w // 2, 0, w, w], [0, w // 2 - h // 2, h, h])
return tf.image.decode_and_crop_jpeg(img, box, channels=3)
class ImageNet(Dataset):
"""ImageNet dataset."""
def __init__(self,
*,
class_conditional,
image_size,
randflip,
extra_image_sizes=()):
"""ImageNet dataset.
Args:
class_conditional: bool: class conditional generation problem; if True,
generated examples will contain a label.
image_size: int: size of image to model
randflip: bool: random flip augmentation
extra_image_sizes: Tuple[int]: also provide image at these resolutions
"""
self._class_conditional = class_conditional
self._image_size = image_size
self._randflip = randflip
self._extra_image_sizes = extra_image_sizes
self._info = {
'data_shape': (self._image_size, self._image_size, 3),
'num_train': 1281167,
'num_eval': 50000,
'num_classes': 1000 if self._class_conditional else 1
}
@property
def info(self):
return self._info
def _load_tfds(self, *, split, shuffle_seed):
return tfds.load(
'imagenet2012',
split={'train': 'train', 'eval': 'validation'}[split],
shuffle_files=shuffle_seed is not None,
read_config=None if shuffle_seed is None else tfds.ReadConfig(
shuffle_seed=shuffle_seed),
decoders={'image': tfds.decode.SkipDecoding()})
def _preprocess(self, x, *, split, augment):
del split # unused
out = {}
# Decode the image and resize
img = tf.cast(decode_and_central_square_crop(x['image']), tf.float32)
if augment:
# NOTE: this makes training nondeterministic
if self._randflip:
logging.info('ImageNet: randflip=True')
img = tf.image.random_flip_left_right(img)
# Standard area resizing
out['image'] = tf.clip_by_value(
tf.image.resize(img, [self._image_size, self._image_size], 'area'),
0, 255)
# Optionally provide the image at other resolutions too
for s in self._extra_image_sizes:
assert isinstance(s, int)
out[f'extra_image_{s}'] = tf.clip_by_value(
tf.image.resize(img, [s, s], 'area'), 0, 255)
# Class label
if self._class_conditional:
out['label'] = tf.cast(x['label'], tf.int32)
return out
class LSUN(Dataset):
"""LSUN dataset."""
def __init__(self, *, subset, image_size, randflip,
extra_image_sizes=()):
"""LSUN datasets.
Args:
subset: str: 'church' or 'bedroom'
image_size: int: size of image to model, 64 or 128
randflip: bool: random flip augmentation
extra_image_sizes: optional extra image sizes
"""
self._subset = subset
self._image_size = image_size
self._randflip = randflip
self._extra_image_sizes = extra_image_sizes
self._info = {
'data_shape': (self._image_size, self._image_size, 3),
'num_train': {'bedroom': 3033042, 'church': 126227}[self._subset],
'num_eval': 300,
'num_classes': 1,
}
@property
def info(self):
return self._info
def _load_tfds(self, *, split, shuffle_seed):
tfds_name = {'church': 'lsun/church_outdoor',
'bedroom': 'lsun/bedroom'}[self._subset]
return tfds.load(
tfds_name,
split={'train': 'train', 'eval': 'validation'}[split],
shuffle_files=shuffle_seed is not None,
read_config=None if shuffle_seed is None else tfds.ReadConfig(
shuffle_seed=shuffle_seed),
decoders={'image': tfds.decode.SkipDecoding()})
def _preprocess(self, x, *, split, augment):
del split # unused
# Decode the image and resize
img = tf.cast(decode_and_central_square_crop(x['image']), tf.float32)
if augment: # NOTE: nondeterministic
if self._randflip:
aug_img = tf.image.flip_left_right(img)
aug = tf.random.uniform(shape=[]) > 0.5
img = tf.where(aug, aug_img, img)
out = {}
out['image'] = tf.clip_by_value(tf.image.resize(
img, [self._image_size, self._image_size], antialias=True), 0, 255)
# Optionally provide the image at other resolutions too
for s in self._extra_image_sizes:
assert isinstance(s, int)
out[f'extra_image_{s}'] = tf.clip_by_value(
tf.image.resize(img, [s, s], antialias=True), 0, 255)
return out
| |
from __future__ import unicode_literals
import re
from django import forms
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm,
UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import TestCase, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
| |
from decimal import Decimal as D
import sys
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.importlib import import_module
from django.test.utils import override_settings
from oscar.test.factories import (
create_product, create_voucher, create_offer)
from oscar.test.testcases import ClientTestCase
from oscar.apps.basket.models import Basket
from oscar.apps.order.models import Order
from oscar.apps.address.models import Country
from oscar.apps.voucher.models import Voucher
from oscar.apps.offer.models import ConditionalOffer
from oscar.test.basket import add_product
class CheckoutMixin(object):
def add_product_to_basket(self):
product = create_product(price=D('12.00'), num_in_stock=10)
self.client.post(reverse('basket:add'), {'product_id': product.id,
'quantity': 1})
def add_voucher_to_basket(self, voucher=None):
if voucher is None:
voucher = create_voucher()
self.client.post(reverse('basket:vouchers-add'),
{'code': voucher.code})
def complete_guest_email_form(self, email='test@example.com'):
response = self.client.post(reverse('checkout:index'),
{'username': email,
'options': 'new'})
self.assertIsRedirect(response)
def complete_shipping_address(self):
Country.objects.get_or_create(
iso_3166_1_a2='GB',
is_shipping_country=True
)
response = self.client.post(reverse('checkout:shipping-address'),
{'last_name': 'Doe',
'first_name': 'John',
'line1': '1 Egg Street',
'line4': 'City',
'postcode': 'N1 9RT',
'country': 'GB',
})
self.assertRedirectUrlName(response, 'checkout:shipping-method')
def complete_shipping_method(self):
self.client.get(reverse('checkout:shipping-method'))
def submit(self):
return self.client.post(reverse('checkout:preview'), {'action': 'place_order'})
class DisabledAnonymousCheckoutViewsTests(ClientTestCase):
is_anonymous = True
def test_index_does_require_login(self):
url = reverse('checkout:index')
response = self.client.get(url)
self.assertIsRedirect(response)
def test_user_address_views_require_a_login(self):
urls = [reverse('checkout:user-address-update', kwargs={'pk': 1}),
reverse('checkout:user-address-delete', kwargs={'pk': 1}),]
for url in urls:
response = self.client.get(url)
self.assertIsRedirect(response)
def test_core_checkout_requires_login(self):
urls = [reverse('checkout:shipping-address'),
reverse('checkout:payment-method'),
reverse('checkout:shipping-method'),
reverse('checkout:payment-details')]
for url in urls:
response = self.client.get(url)
self.assertIsRedirect(response)
class EnabledAnonymousCheckoutViewsTests(ClientTestCase, CheckoutMixin):
is_anonymous = True
def reload_urlconf(self):
if settings.ROOT_URLCONF in sys.modules:
reload(sys.modules[settings.ROOT_URLCONF])
return import_module(settings.ROOT_URLCONF)
def add_product_to_basket(self):
product = create_product(price=D('12.00'), num_in_stock=10)
self.client.post(reverse('basket:add'), {'product_id': product.id,
'quantity': 1})
def test_shipping_address_does_require_session_email_address(self):
with override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True):
self.reload_urlconf()
url = reverse('checkout:shipping-address')
response = self.client.get(url)
self.assertIsRedirect(response)
def test_email_address_is_saved_with_order(self):
with override_settings(OSCAR_ALLOW_ANON_CHECKOUT=True):
self.reload_urlconf()
self.add_product_to_basket()
self.complete_guest_email_form('barry@example.com')
self.complete_shipping_address()
self.complete_shipping_method()
response = self.client.post(reverse('checkout:preview'), {'action': 'place_order'})
response = self.client.get(reverse('checkout:thank-you'))
order = response.context['order']
self.assertEqual('barry@example.com', order.guest_email)
class TestShippingAddressView(ClientTestCase, CheckoutMixin):
fixtures = ['countries.json']
def test_pages_returns_200(self):
self.add_product_to_basket()
response = self.client.get(reverse('checkout:shipping-address'))
self.assertIsOk(response)
def test_anon_checkout_disabled_by_default(self):
self.assertFalse(settings.OSCAR_ALLOW_ANON_CHECKOUT)
def test_create_shipping_address_adds_address_to_session(self):
response = self.client.post(reverse('checkout:shipping-address'),
{'last_name': 'Doe',
'first_name': 'John',
'line1': '1 Egg Street',
'line4': 'City',
'postcode': 'N1 9RT',
'country': 'GB',
})
self.assertIsRedirect(response)
session_address = self.client.session['checkout_data']['shipping']['new_address_fields']
self.assertEqual('Doe', session_address['last_name'])
self.assertEqual('1 Egg Street', session_address['line1'])
self.assertEqual('N1 9RT', session_address['postcode'])
def test_invalid_shipping_address_fails(self):
response = self.client.post(reverse('checkout:shipping-address'),
{'last_name': 'Doe',
'first_name': 'John',
'postcode': 'N1 9RT',
'country': 'GB',
})
self.assertIsOk(response) # no redirect
def test_user_must_have_a_nonempty_basket(self):
response = self.client.get(reverse('checkout:shipping-address'))
self.assertRedirectUrlName(response, 'basket:summary')
class TestShippingMethodView(ClientTestCase, CheckoutMixin):
fixtures = ['countries.json']
def test_shipping_method_view_redirects_if_no_shipping_address(self):
self.add_product_to_basket()
response = self.client.get(reverse('checkout:shipping-method'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_redirects_by_default(self):
self.add_product_to_basket()
self.complete_shipping_address()
response = self.client.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'checkout:payment-method')
def test_user_must_have_a_nonempty_basket(self):
response = self.client.get(reverse('checkout:shipping-method'))
self.assertRedirectUrlName(response, 'basket:summary')
class TestPaymentMethodView(ClientTestCase, CheckoutMixin):
def test_view_redirects_if_no_shipping_address(self):
self.add_product_to_basket()
response = self.client.get(reverse('checkout:payment-method'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_view_redirects_if_no_shipping_method(self):
self.add_product_to_basket()
self.complete_shipping_address()
response = self.client.get(reverse('checkout:payment-method'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-method')
def test_user_must_have_a_nonempty_basket(self):
response = self.client.get(reverse('checkout:payment-method'))
self.assertRedirectUrlName(response, 'basket:summary')
class TestPreviewView(ClientTestCase, CheckoutMixin):
def test_user_must_have_a_nonempty_basket(self):
response = self.client.get(reverse('checkout:preview'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_view_redirects_if_no_shipping_address(self):
self.add_product_to_basket()
response = self.client.get(reverse('checkout:preview'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_view_redirects_if_no_shipping_method(self):
self.add_product_to_basket()
self.complete_shipping_address()
response = self.client.get(reverse('checkout:preview'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-method')
def test_ok_response_if_previous_steps_complete(self):
self.add_product_to_basket()
self.complete_shipping_address()
self.complete_shipping_method()
response = self.client.get(reverse('checkout:preview'))
self.assertIsOk(response)
class TestPaymentDetailsView(ClientTestCase, CheckoutMixin):
def test_user_must_have_a_nonempty_basket(self):
response = self.client.get(reverse('checkout:payment-details'))
self.assertRedirectUrlName(response, 'basket:summary')
def test_view_redirects_if_no_shipping_address(self):
self.add_product_to_basket()
response = self.client.post(reverse('checkout:payment-details'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-address')
def test_view_redirects_if_no_shipping_method(self):
self.add_product_to_basket()
self.complete_shipping_address()
response = self.client.post(reverse('checkout:payment-details'))
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'checkout:shipping-method')
def test_placing_order_with_empty_basket_redirects(self):
response = self.client.post(reverse('checkout:preview'), {'action': 'place_order'})
self.assertIsRedirect(response)
self.assertRedirectUrlName(response, 'basket:summary')
class TestOrderPlacement(ClientTestCase, CheckoutMixin):
def setUp(self):
super(TestOrderPlacement, self).setUp()
self.basket = Basket.objects.create(owner=self.user)
add_product(self.basket, D('12.00'))
self.complete_shipping_address()
self.complete_shipping_method()
self.response = self.client.post(reverse('checkout:preview'), {'action': 'place_order'})
def test_placing_order_with_nonempty_basket_redirects(self):
self.assertIsRedirect(self.response)
self.assertRedirectUrlName(self.response, 'checkout:thank-you')
def test_order_is_created(self):
self.assertIsRedirect(self.response)
orders = Order.objects.all()
self.assertEqual(1, len(orders))
class TestPlacingOrderUsingAVoucher(ClientTestCase, CheckoutMixin):
def setUp(self):
self.login()
self.add_product_to_basket()
voucher = create_voucher()
self.add_voucher_to_basket(voucher)
self.complete_shipping_address()
self.complete_shipping_method()
self.response = self.submit()
# Reload voucher
self.voucher = Voucher.objects.get(id=voucher.id)
def test_is_successful(self):
self.assertRedirectUrlName(self.response, 'checkout:thank-you')
def test_records_use(self):
self.assertEquals(1, self.voucher.num_orders)
def test_records_discount(self):
self.assertEquals(1, self.voucher.num_orders)
class TestPlacingOrderUsingAnOffer(ClientTestCase, CheckoutMixin):
def setUp(self):
offer = create_offer()
self.login()
self.add_product_to_basket()
self.complete_shipping_address()
self.complete_shipping_method()
self.response = self.submit()
# Reload offer
self.offer = ConditionalOffer.objects.get(id=offer.id)
def test_is_successful(self):
self.assertRedirectUrlName(self.response, 'checkout:thank-you')
def test_records_use(self):
self.assertEquals(1, self.offer.num_orders)
self.assertEquals(1, self.offer.num_applications)
| |
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from proboscis import SkipTest
from trove.tests.config import CONFIG
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import CheckInstance
from trove.tests.scenario.runners.test_runners import InstanceTestInfo
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceCreateRunner(TestRunner):
def __init__(self):
super(InstanceCreateRunner, self).__init__()
self.init_inst_id = None
self.init_inst_dbs = None
self.init_inst_users = None
self.init_inst_host = None
self.init_inst_data = None
self.init_inst_config_group_id = None
self.config_group_id = None
def run_empty_instance_create(
self, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200):
name = self.instance_info.name
flavor = self._get_instance_flavor()
trove_volume_size = CONFIG.get('trove_volume_size', 1)
instance_info = self.assert_instance_create(
name, flavor, trove_volume_size, [], [], None, None,
CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version,
expected_states, expected_http_code, create_helper_user=True,
locality='affinity')
# Update the shared instance info.
self.instance_info.id = instance_info.id
self.instance_info.name = instance_info.name
self.instance_info.databases = instance_info.databases
self.instance_info.users = instance_info.users
self.instance_info.dbaas_datastore = instance_info.dbaas_datastore
self.instance_info.dbaas_datastore_version = (
instance_info.dbaas_datastore_version)
self.instance_info.dbaas_flavor_href = instance_info.dbaas_flavor_href
self.instance_info.volume = instance_info.volume
self.instance_info.srv_grp_id = self.assert_server_group_exists(
self.instance_info.id)
def run_initial_configuration_create(self, expected_http_code=200):
dynamic_config = self.test_helper.get_dynamic_group()
non_dynamic_config = self.test_helper.get_non_dynamic_group()
values = dynamic_config or non_dynamic_config
if values:
json_def = json.dumps(values)
result = self.auth_client.configurations.create(
'initial_configuration_for_instance_create',
json_def,
"Configuration group used by instance create tests.",
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
self.assert_client_code(expected_http_code)
self.config_group_id = result.id
else:
raise SkipTest("No groups defined.")
def run_initialized_instance_create(
self, with_dbs=True, with_users=True, configuration_id=None,
expected_states=['BUILD', 'ACTIVE'], expected_http_code=200,
create_helper_user=True, name_suffix='_init'):
if self.is_using_existing_instance:
# The user requested to run the tests using an existing instance.
# We therefore skip any scenarios that involve creating new
# test instances.
raise SkipTest("Using an existing instance.")
configuration_id = configuration_id or self.config_group_id
name = self.instance_info.name + name_suffix
flavor = self._get_instance_flavor()
trove_volume_size = CONFIG.get('trove_volume_size', 1)
self.init_inst_dbs = (self.test_helper.get_valid_database_definitions()
if with_dbs else [])
self.init_inst_users = (self.test_helper.get_valid_user_definitions()
if with_users else [])
self.init_inst_config_group_id = configuration_id
if (self.init_inst_dbs or self.init_inst_users or configuration_id):
info = self.assert_instance_create(
name, flavor, trove_volume_size,
self.init_inst_dbs, self.init_inst_users,
configuration_id, None,
CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version,
expected_states, expected_http_code,
create_helper_user=create_helper_user)
self.init_inst_id = info.id
else:
# There is no need to run this test as it's effectively the same as
# the empty instance test.
raise SkipTest("No testable initial properties provided.")
def _get_instance_flavor(self):
if self.EPHEMERAL_SUPPORT:
flavor_name = CONFIG.values.get('instance_eph_flavor_name',
'eph.rd-tiny')
else:
flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny')
return self.get_flavor(flavor_name)
def _get_flavor_href(self, flavor):
return self.auth_client.find_flavor_self_href(flavor)
def assert_instance_create(
self, name, flavor, trove_volume_size,
database_definitions, user_definitions,
configuration_id, root_password, datastore, datastore_version,
expected_states, expected_http_code, create_helper_user=False,
locality=None):
"""This assert method executes a 'create' call and verifies the server
response. It neither waits for the instance to become available
nor it performs any other validations itself.
It has been designed this way to increase test granularity
(other tests may run while the instance is building) and also to allow
its reuse in other runners.
"""
databases = database_definitions
users = [{'name': item['name'], 'password': item['password']}
for item in user_definitions]
# Here we add helper user/database if any.
if create_helper_user:
helper_db_def, helper_user_def, root_def = self.build_helper_defs()
if helper_db_def:
self.report.log(
"Appending a helper database '%s' to the instance "
"definition." % helper_db_def['name'])
databases.append(helper_db_def)
if helper_user_def:
self.report.log(
"Appending a helper user '%s:%s' to the instance "
"definition."
% (helper_user_def['name'], helper_user_def['password']))
users.append(helper_user_def)
instance_info = InstanceTestInfo()
instance_info.name = name
instance_info.databases = databases
instance_info.users = users
instance_info.dbaas_datastore = CONFIG.dbaas_datastore
instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version
instance_info.dbaas_flavor_href = self._get_flavor_href(flavor)
if self.VOLUME_SUPPORT:
instance_info.volume = {'size': trove_volume_size}
else:
instance_info.volume = None
shared_network = CONFIG.get('shared_network', None)
if shared_network:
instance_info.nics = [{'net-id': shared_network}]
self.report.log("Testing create instance: %s"
% {'name': name,
'flavor': flavor.id,
'volume': trove_volume_size,
'nics': instance_info.nics,
'databases': databases,
'users': users,
'configuration': configuration_id,
'root password': root_password,
'datastore': datastore,
'datastore version': datastore_version})
instance = self.get_existing_instance()
if instance:
self.report.log("Using an existing instance: %s" % instance.id)
self.assert_equal(expected_states[-1], instance.status,
"Given instance is in a bad state.")
instance_info.name = instance.name
else:
self.report.log("Creating a new instance.")
instance = self.auth_client.instances.create(
instance_info.name,
instance_info.dbaas_flavor_href,
instance_info.volume,
instance_info.databases,
instance_info.users,
nics=instance_info.nics,
configuration=configuration_id,
availability_zone="nova",
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version,
locality=locality)
self.assert_instance_action(
instance.id, expected_states[0:1], expected_http_code)
instance_info.id = instance.id
with CheckInstance(instance._info) as check:
check.flavor()
check.datastore()
check.links(instance._info['links'])
if self.VOLUME_SUPPORT:
check.volume()
self.assert_equal(trove_volume_size,
instance._info['volume']['size'],
"Unexpected Trove volume size")
self.assert_equal(instance_info.name, instance._info['name'],
"Unexpected instance name")
self.assert_equal(flavor.id,
int(instance._info['flavor']['id']),
"Unexpected instance flavor")
self.assert_equal(instance_info.dbaas_datastore,
instance._info['datastore']['type'],
"Unexpected instance datastore version")
self.assert_equal(instance_info.dbaas_datastore_version,
instance._info['datastore']['version'],
"Unexpected instance datastore version")
self.assert_configuration_group(instance_info.id, configuration_id)
if locality:
self.assert_equal(locality, instance._info['locality'],
"Unexpected locality")
return instance_info
def wait_for_created_instances(self, expected_states=['BUILD', 'ACTIVE']):
instances = [self.instance_info.id]
if self.init_inst_id:
instances.append(self.init_inst_id)
self.assert_all_instance_states(instances, expected_states)
def run_add_initialized_instance_data(self):
self.init_inst_data = DataType.small
self.init_inst_host = self.get_instance_host(self.init_inst_id)
self.test_helper.add_data(self.init_inst_data, self.init_inst_host)
def run_validate_initialized_instance(self):
if self.init_inst_id:
self.assert_instance_properties(
self.init_inst_id, self.init_inst_dbs, self.init_inst_users,
self.init_inst_config_group_id, self.init_inst_data)
def assert_instance_properties(
self, instance_id, expected_dbs_definitions,
expected_user_definitions, expected_config_group_id,
expected_data_type):
if expected_dbs_definitions:
self.assert_database_list(instance_id, expected_dbs_definitions)
else:
self.report.log("No databases to validate for instance: %s"
% instance_id)
if expected_user_definitions:
self.assert_user_list(instance_id, expected_user_definitions)
else:
self.report.log("No users to validate for instance: %s"
% instance_id)
self.assert_configuration_group(instance_id, expected_config_group_id)
if self.init_inst_host:
self.test_helper.verify_data(
expected_data_type, self.init_inst_host)
else:
self.report.log("No data to validate for instance: %s"
% instance_id)
def assert_configuration_group(self, instance_id, expected_group_id):
instance = self.get_instance(instance_id)
if expected_group_id:
self.assert_equal(expected_group_id, instance.configuration['id'],
"Wrong configuration group attached")
else:
self.assert_false(hasattr(instance, 'configuration'),
"No configuration group expected")
def assert_database_list(self, instance_id, expected_databases):
expected_names = self._get_names(expected_databases)
full_list = self.auth_client.databases.list(instance_id)
self.assert_is_none(full_list.next,
"Unexpected pagination in the database list.")
listed_names = [database.name for database in full_list]
self.assert_is_sublist(expected_names, listed_names,
"Mismatch in instance databases.")
def _get_names(self, definitions):
return [item['name'] for item in definitions]
def assert_user_list(self, instance_id, expected_users):
expected_names = self._get_names(expected_users)
full_list = self.auth_client.users.list(instance_id)
self.assert_is_none(full_list.next,
"Unexpected pagination in the user list.")
listed_names = [user.name for user in full_list]
self.assert_is_sublist(expected_names, listed_names,
"Mismatch in instance users.")
# Verify that user definitions include only created databases.
all_databases = self._get_names(
self.test_helper.get_valid_database_definitions())
for user in expected_users:
self.assert_is_sublist(
self._get_names(user['databases']), all_databases,
"Definition of user '%s' specifies databases not included in "
"the list of initial databases." % user['name'])
def run_initialized_instance_delete(self, expected_http_code=202):
if self.init_inst_id:
self.auth_client.instances.delete(self.init_inst_id)
self.assert_client_code(expected_http_code)
else:
raise SkipTest("Cleanup is not required.")
def run_wait_for_initialized_instance_delete(self,
expected_states=['SHUTDOWN']):
if self.init_inst_id:
self.assert_all_gone(self.init_inst_id, expected_states[-1])
else:
raise SkipTest("Cleanup is not required.")
self.init_inst_id = None
self.init_inst_dbs = None
self.init_inst_users = None
self.init_inst_host = None
self.init_inst_data = None
self.init_inst_config_group_id = None
def run_initial_configuration_delete(self, expected_http_code=202):
if self.config_group_id:
self.auth_client.configurations.delete(self.config_group_id)
self.assert_client_code(expected_http_code)
else:
raise SkipTest("Cleanup is not required.")
self.config_group_id = None
| |
#!/usr/bin/env python
import atexit
import contextlib
import errno
import platform
import re
import shutil
import ssl
import subprocess
import sys
import tarfile
import tempfile
import urllib2
import os
import zipfile
from config import is_verbose_mode, PLATFORM
from env_util import get_vs_env
BOTO_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'vendor',
'boto'))
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def get_host_arch():
"""Returns the host architecture with a predictable string."""
host_arch = platform.machine()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
def tempdir(prefix=''):
directory = tempfile.mkdtemp(prefix=prefix)
atexit.register(shutil.rmtree, directory)
return directory
@contextlib.contextmanager
def scoped_cwd(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def scoped_env(key, value):
origin = ''
if key in os.environ:
origin = os.environ[key]
os.environ[key] = value
try:
yield
finally:
os.environ[key] = origin
def download(text, url, path):
safe_mkdir(os.path.dirname(path))
with open(path, 'wb') as local_file:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
web_file = urllib2.urlopen(url)
file_size = int(web_file.info().getheaders("Content-Length")[0])
downloaded_size = 0
block_size = 128
ci = os.environ.get('CI') == '1'
while True:
buf = web_file.read(block_size)
if not buf:
break
downloaded_size += len(buf)
local_file.write(buf)
if not ci:
percent = downloaded_size * 100. / file_size
status = "\r%s %10d [%3.1f%%]" % (text, downloaded_size, percent)
print status,
if ci:
print "%s done." % (text)
else:
print
return path
def extract_tarball(tarball_path, member, destination):
with tarfile.open(tarball_path) as tarball:
tarball.extract(member, destination)
def extract_zip(zip_path, destination):
if sys.platform == 'darwin':
# Use unzip command on Mac to keep symbol links in zip file work.
execute(['unzip', zip_path, '-d', destination])
else:
with zipfile.ZipFile(zip_path) as z:
z.extractall(destination)
def make_zip(zip_file_path, files, dirs):
safe_unlink(zip_file_path)
if sys.platform == 'darwin':
files += dirs
execute(['zip', '-r', '-y', zip_file_path] + files)
else:
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_DEFLATED)
for filename in files:
zip_file.write(filename, filename)
for dirname in dirs:
for root, _, filenames in os.walk(dirname):
for f in filenames:
zip_file.write(os.path.join(root, f))
zip_file.close()
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError:
pass
def safe_unlink(path):
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def execute(argv, env=os.environ, cwd=None):
if is_verbose_mode():
print ' '.join(argv)
try:
output = subprocess.check_output(argv, stderr=subprocess.STDOUT, env=env, cwd=cwd)
if is_verbose_mode():
print output
return output
except subprocess.CalledProcessError as e:
print e.output
raise e
def execute_stdout(argv, env=os.environ, cwd=None):
if is_verbose_mode():
print ' '.join(argv)
try:
subprocess.check_call(argv, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
print e.output
raise e
else:
execute(argv, env, cwd)
def electron_gyp():
SOURCE_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
gyp = os.path.join(SOURCE_ROOT, 'electron.gyp')
with open(gyp) as f:
obj = eval(f.read());
return obj['variables']
def get_electron_version():
return 'v' + electron_gyp()['version%']
def parse_version(version):
if version[0] == 'v':
version = version[1:]
vs = version.split('.')
if len(vs) > 4:
return vs[0:4]
else:
return vs + ['0'] * (4 - len(vs))
def boto_path_dirs():
return [
os.path.join(BOTO_DIR, 'build', 'lib'),
os.path.join(BOTO_DIR, 'build', 'lib.linux-x86_64-2.7')
]
def run_boto_script(access_key, secret_key, script_name, *args):
env = os.environ.copy()
env['AWS_ACCESS_KEY_ID'] = access_key
env['AWS_SECRET_ACCESS_KEY'] = secret_key
env['PYTHONPATH'] = os.path.pathsep.join(
[env.get('PYTHONPATH', '')] + boto_path_dirs())
boto = os.path.join(BOTO_DIR, 'bin', script_name)
execute([sys.executable, boto] + list(args), env)
def s3put(bucket, access_key, secret_key, prefix, key_prefix, files):
args = [
'--bucket', bucket,
'--prefix', prefix,
'--key_prefix', key_prefix,
'--grant', 'public-read'
] + files
run_boto_script(access_key, secret_key, 's3put', *args)
def import_vs_env(target_arch):
if sys.platform != 'win32':
return
if target_arch == 'ia32':
vs_arch = 'amd64_x86'
else:
vs_arch = 'x86_amd64'
env = get_vs_env('14.0', vs_arch)
os.environ.update(env)
def set_clang_env(env):
SOURCE_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
def update_electron_modules(dirname, target_arch, nodedir):
env = os.environ.copy()
version = get_electron_version()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = version
env['npm_config_nodedir'] = nodedir
update_node_modules(dirname, env)
execute_stdout([NPM, 'rebuild'], env, dirname)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ.copy()
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
set_clang_env(env)
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
| |
"""Manages Git."""
from __future__ import unicode_literals
import os
from dvc.utils.compat import str, open
from dvc.utils import fix_env
from dvc.scm.base import (
Base,
SCMError,
FileNotInRepoError,
FileNotInTargetSubdirError,
)
from dvc.scm.git.tree import GitTree
import dvc.logger as logger
class Git(Base):
"""Class for managing Git."""
GITIGNORE = ".gitignore"
GIT_DIR = ".git"
def __init__(self, root_dir=os.curdir, repo=None):
super(Git, self).__init__(root_dir, repo=repo)
import git
from git.exc import InvalidGitRepositoryError
try:
self.git = git.Repo(root_dir)
except InvalidGitRepositoryError:
msg = "{} is not a git repository"
raise SCMError(msg.format(root_dir))
# NOTE: fixing LD_LIBRARY_PATH for binary built by PyInstaller.
# http://pyinstaller.readthedocs.io/en/stable/runtime-information.html
env = fix_env(None)
libpath = env.get("LD_LIBRARY_PATH", None)
self.git.git.update_environment(LD_LIBRARY_PATH=libpath)
self.ignored_paths = []
self.files_to_track = []
@staticmethod
def is_repo(root_dir):
return os.path.isdir(Git._get_git_dir(root_dir))
@staticmethod
def is_submodule(root_dir):
return os.path.isfile(Git._get_git_dir(root_dir))
@staticmethod
def _get_git_dir(root_dir):
return os.path.join(root_dir, Git.GIT_DIR)
@property
def dir(self):
return self.git.git_dir
@property
def ignore_file(self):
return self.GITIGNORE
def _get_gitignore(self, path, ignore_file_dir=None):
if not ignore_file_dir:
ignore_file_dir = os.path.dirname(os.path.realpath(path))
assert os.path.isabs(path)
assert os.path.isabs(ignore_file_dir)
if not path.startswith(ignore_file_dir):
msg = (
"{} file has to be located in one of '{}' subdirectories"
", not outside '{}'"
)
raise FileNotInTargetSubdirError(
msg.format(self.GITIGNORE, path, ignore_file_dir)
)
entry = os.path.relpath(path, ignore_file_dir).replace(os.sep, "/")
# NOTE: using '/' prefix to make path unambiguous
if len(entry) > 0 and entry[0] != "/":
entry = "/" + entry
gitignore = os.path.join(ignore_file_dir, self.GITIGNORE)
if not gitignore.startswith(os.path.realpath(self.root_dir)):
raise FileNotInRepoError(path)
return entry, gitignore
def ignore(self, path, in_curr_dir=False):
base_dir = (
os.path.realpath(os.curdir)
if in_curr_dir
else os.path.dirname(path)
)
entry, gitignore = self._get_gitignore(path, base_dir)
ignore_list = []
if os.path.exists(gitignore):
with open(gitignore, "r") as f:
ignore_list = f.readlines()
if any(filter(lambda x: x.strip() == entry.strip(), ignore_list)):
return
msg = "Adding '{}' to '{}'.".format(
os.path.relpath(path), os.path.relpath(gitignore)
)
logger.info(msg)
self._add_entry_to_gitignore(entry, gitignore, ignore_list)
self.track_file(os.path.relpath(gitignore))
self.ignored_paths.append(path)
@staticmethod
def _add_entry_to_gitignore(entry, gitignore, ignore_list):
content = entry
if ignore_list:
content = "\n" + content
with open(gitignore, "a", encoding="utf-8") as fobj:
fobj.write(content)
def ignore_remove(self, path):
entry, gitignore = self._get_gitignore(path)
if not os.path.exists(gitignore):
return
with open(gitignore, "r") as fobj:
lines = fobj.readlines()
filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))
with open(gitignore, "w") as fobj:
fobj.writelines(filtered)
self.track_file(os.path.relpath(gitignore))
def add(self, paths):
# NOTE: GitPython is not currently able to handle index version >= 3.
# See https://github.com/iterative/dvc/issues/610 for more details.
try:
self.git.index.add(paths)
except AssertionError:
msg = (
"failed to add '{}' to git. You can add those files"
" manually using 'git add'."
" See 'https://github.com/iterative/dvc/issues/610'"
" for more details.".format(str(paths))
)
logger.error(msg)
def commit(self, msg):
self.git.index.commit(msg)
def checkout(self, branch, create_new=False):
if create_new:
self.git.git.checkout("HEAD", b=branch)
else:
self.git.git.checkout(branch)
def branch(self, branch):
self.git.git.branch(branch)
def tag(self, tag):
self.git.git.tag(tag)
def untracked_files(self):
files = self.git.untracked_files
return [os.path.join(self.git.working_dir, fname) for fname in files]
def is_tracked(self, path):
# it is equivalent to `bool(self.git.git.ls_files(path))` by
# functionality, but ls_files fails on unicode filenames
path = os.path.relpath(path, self.root_dir)
return path in [i[0] for i in self.git.index.entries]
def is_dirty(self):
return self.git.is_dirty()
def active_branch(self):
return self.git.active_branch.name
def list_branches(self):
return [h.name for h in self.git.heads]
def list_tags(self):
return [t.name for t in self.git.tags]
def _install_hook(self, name, cmd):
hook = os.path.join(self.root_dir, self.GIT_DIR, "hooks", name)
if os.path.isfile(hook):
msg = "git hook '{}' already exists."
raise SCMError(msg.format(os.path.relpath(hook)))
with open(hook, "w+") as fobj:
fobj.write("#!/bin/sh\nexec dvc {}\n".format(cmd))
os.chmod(hook, 0o777)
def install(self):
self._install_hook("post-checkout", "checkout")
self._install_hook("pre-commit", "status")
def cleanup_ignores(self):
for path in self.ignored_paths:
self.ignore_remove(path)
self.reset_ignores()
def reset_ignores(self):
self.ignored_paths = []
def remind_to_track(self):
if not self.files_to_track:
return
logger.info(
"\n"
"To track the changes with git run:\n"
"\n"
"\tgit add {files}".format(files=" ".join(self.files_to_track))
)
def track_file(self, path):
self.files_to_track.append(path)
def belongs_to_scm(self, path):
basename = os.path.basename(path)
path_parts = os.path.normpath(path).split(os.path.sep)
return basename == self.ignore_file or Git.GIT_DIR in path_parts
def get_tree(self, rev):
return GitTree(self.git, rev)
| |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.'''
from google.appengine.api import backends
from google.appengine.api import users
import copy
import logging
import pprint as pp
import random
import sys
from client import db_api
def e(msg):
"""Convient method to raise an exception."""
raise Exception(repr(msg))
def w(msg):
"""Log a warning message."""
logging.warning('##### %s' % repr(msg))
class MatchMaker:
"""
Multiple player match making service, allowing players
to come together in an arena to show off their skills.
_game_servers =
{ serverid1: <server_struct>,
serverid2: <server_struct>,
}
<server_struct> =
{ 'server_info': <server_info>,
'games': { name1: <game>,
name2: <game>,
},
}
<server_info> =
{ 'serverid': ...,
'uptime': ...,
}
<game> =
{ serverid': ...,
'name': 'mPdn',
'gameURL': 'http://127.0.0.1:9090/mPdn',
'port': 9090,
'controller_host': '127.0.0.1:12345',
'game_state': {'players': {'324324382934982374823748923': '!1'}, 'min_players': 2, 'max_players': 8},
}
"""
_EMPTY_SERVER = { 'games' : {} }
def __init__(self):
self._game_servers = {}
self._players_waiting = []
self._players_playing = {}
def get_game_server_struct(self, serverid):
assert serverid
return self._game_servers.get(serverid, None)
def get_game_server_info(self, serverid):
assert serverid
server_struct = self.get_game_server_struct(serverid)
return server_struct['server_info']
def _set_game_server_struct(self, serverid, server_struct):
self._game_servers[serverid] = server_struct
def _set_game_server_info(self, serverid, server_info):
assert serverid
assert server_info
server_struct = self.get_game_server_struct(serverid)
if not server_struct:
server_struct = copy.deepcopy(MatchMaker._EMPTY_SERVER)
self._set_game_server_struct(serverid, server_struct)
server_struct['server_info'] = server_info
def get_state(self):
return {
'game_servers': self._game_servers,
'players_waiting': self._players_waiting,
'players_playing': self._players_playing,
}
def get_game_servers(self):
return self._game_servers
def del_game_server(self, serverid):
del self._game_servers[serverid]
remove = []
for player, player_game in self._players_playing.iteritems():
game = player_game['game']
if game['serverid'] == serverid:
remove.append(player)
for r in remove:
self._players_playing.pop(r)
def update_player_names(self, serverid, game_name, new_game_state):
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
game = games[game_name]
game_state = game['game_state']
players = game_state['players']
new_players = new_game_state['players']
logging.info('Updating %s with %s' % (repr(players), repr(new_players)))
assert isinstance(players, dict)
assert isinstance(new_players, dict)
players.update(new_players)
def update_server_info(self, server_info):
serverid = server_info['serverid']
self._set_game_server_info(serverid, server_info)
def update_game(self, game):
serverid = game['serverid']
name = game['name']
assert serverid in self._game_servers
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
games[name] = game
def del_game(self, serverid, game_name):
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
game = games[game_name]
game_state = game['game_state']
players = game_state['players']
for p in players:
self._players_playing.pop(p)
del games[game_name]
def _add_player(self, userID, game):
assert isinstance(userID, str)
game_state = game['game_state']
min_players = int(game_state['min_players'])
max_players = int(game_state['max_players'])
players = game_state['players']
assert max_players >= min_players
assert len(players) < max_players
assert userID not in game_state['players']
players[userID] = 'TBD'
self._players_playing[userID] = {
'game': game,
'player_game_key': str(random.randint(-sys.maxint, sys.maxint)),
'userID': userID, # used by Android client
}
def make_matches(self):
if not self._players_waiting:
return
# TODO match based on skills instead of capacity
players_needed_for_next_game = self.make_matches_min_players()
if self._players_waiting:
self.make_matches_max_players()
return players_needed_for_next_game
def make_matches_min_players(self):
players_needed_for_next_game = -1
for server_struct in self._game_servers.itervalues():
for game in server_struct['games'].itervalues():
game_state = game['game_state']
players_in_game = game_state['players']
player_goal = int(game_state['min_players'])
players_needed = player_goal - len(players_in_game)
if not players_needed:
continue
if len(self._players_waiting) >= players_needed:
# let's get this party started
while len(players_in_game) < player_goal:
self._add_player(self._players_waiting.pop(0), game)
elif (players_needed_for_next_game == -1
or players_needed < players_needed_for_next_game):
players_needed_for_next_game = players_needed
return players_needed_for_next_game
def make_matches_max_players(self):
for server_struct in self._game_servers.itervalues():
for game in server_struct['games'].itervalues():
game_state = game['game_state']
players_in_game = game_state['players']
if len(players_in_game) < int(game_state['min_players']):
continue
player_goal = int(game_state['max_players'])
if len(players_in_game) == player_goal:
continue
while self._players_waiting and len(players_in_game) < player_goal:
self._add_player(self._players_waiting.pop(0), game)
def lookup_player_game(self, userID):
assert isinstance(userID, str)
return self._players_playing.get(userID, None)
def find_player_game(self, userID):
assert isinstance(userID, str)
if userID not in self._players_waiting:
self._players_waiting.append(userID)
players_needed_for_next_game = self.make_matches()
if userID in self._players_waiting:
#logging.info('find_player_game: %s must wait a little bit longer' % userID)
return {'result': 'wait', 'players_needed_for_next_game': players_needed_for_next_game}
player_game = self._players_playing.get(userID, None)
if not player_game:
raise Exception('userID %s is not in self._players_playing' % userID)
return player_game
| |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from threading import Thread
import os
import shutil
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
from test_framework.qtumconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
FEATURE_LATEST = 169900
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for i in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory:"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], INITIAL_BLOCK_REWARD)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], INITIAL_BLOCK_REWARD if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
w1.generatetoaddress(nblocks=COINBASE_MATURITY+1, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 2*INITIAL_BLOCK_REWARD)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(4.0)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 4.0)
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
# Fail to load if wallet is downgraded
shutil.copytree(os.path.join(self.options.data_wallets_dir, 'high_minversion'), wallet_dir('high_minversion'))
self.restart_node(0, extra_args=['-upgradewallet={}'.format(FEATURE_LATEST)])
assert {'name': 'high_minversion'} in self.nodes[0].listwalletdir()['wallets']
self.log.info("Fail -upgradewallet that results in downgrade")
assert_raises_rpc_error(
-4,
'Wallet loading failed: Error loading {}: Wallet requires newer version of {}'.format(
wallet_dir('high_minversion', 'wallet.dat'), self.config['environment']['PACKAGE_NAME']),
lambda: self.nodes[0].loadwallet(filename='high_minversion'),
)
if __name__ == '__main__':
MultiWalletTest().main()
| |
# coding=utf-8
from __future__ import unicode_literals
import os
import re
import warnings
from codecs import BOM_UTF8
from warnings import catch_warnings
from tempfile import NamedTemporaryFile
import pytest
import six
import configobj as co
from configobj import ConfigObj, flatten_errors, ReloadError, DuplicateError, MissingInterpolationOption, InterpolationLoopError, ConfigObjError
from validate import Validator, VdtValueTooSmallError
def cfg_lines(config_string_representation):
"""
:param config_string_representation: string representation of a config
file (typically a triple-quoted string)
:type config_string_representation: str or unicode
:return: a list of lines of that config. Whitespace on the left will be
trimmed based on the indentation level to make it a bit saner to assert
content of a particular line
:rtype: str or unicode
"""
lines = config_string_representation.splitlines()
for idx, line in enumerate(lines):
if line.strip():
line_no_with_content = idx
break
else:
raise ValueError('no content in provided config file: '
'{!r}'.format(config_string_representation))
first_content = lines[line_no_with_content]
if isinstance(first_content, six.binary_type):
first_content = first_content.decode('utf-8')
ws_chars = len(re.search('^(\s*)', first_content).group(1))
def yield_stringified_line():
for line in lines:
if isinstance(line, six.binary_type):
yield line.decode('utf-8')
else:
yield line
return [re.sub('^\s{0,%s}' % ws_chars, '', line).encode('utf-8')
for line in yield_stringified_line()]
@pytest.fixture
def cfg_contents(request):
def make_file_with_contents_and_return_name(config_string_representation):
"""
:param config_string_representation: string representation of a config
file (typically a triple-quoted string)
:type config_string_representation: str or unicode
:return: a list of lines of that config. Whitespace on the left will be
trimmed based on the indentation level to make it a bit saner to assert
content of a particular line
:rtype: basestring
"""
lines = cfg_lines(config_string_representation)
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
for line in lines:
if isinstance(line, six.binary_type):
cfg_file.write(line + os.linesep.encode('utf-8'))
else:
cfg_file.write((line + os.linesep).encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
return cfg_file.name
return make_file_with_contents_and_return_name
def test_order_preserved():
c = ConfigObj()
c['a'] = 1
c['b'] = 2
c['c'] = 3
c['section'] = {}
c['section']['a'] = 1
c['section']['b'] = 2
c['section']['c'] = 3
c['section']['section'] = {}
c['section']['section2'] = {}
c['section']['section3'] = {}
c['section2'] = {}
c['section3'] = {}
c2 = ConfigObj(c)
assert c2.scalars == ['a', 'b', 'c']
assert c2.sections == ['section', 'section2', 'section3']
assert c2['section'].scalars == ['a', 'b', 'c']
assert c2['section'].sections == ['section', 'section2', 'section3']
assert c['section'] is not c2['section']
assert c['section']['section'] is not c2['section']['section']
def test_options_deprecation():
warnings.simplefilter('always', DeprecationWarning)
with catch_warnings(record=True) as log:
ConfigObj(options={})
# unpack the only member of log
try:
warning, = log
except ValueError:
assert len(log) == 1
assert warning.category == DeprecationWarning
def test_list_members():
c = ConfigObj()
c['a'] = []
c['a'].append('foo')
assert c['a'] == ['foo']
def test_list_interpolation_with_pop():
c = ConfigObj()
c['a'] = []
c['a'].append('%(b)s')
c['b'] = 'bar'
assert c.pop('a') == ['bar']
def test_with_default():
c = ConfigObj()
c['a'] = 3
assert c.pop('a') == 3
assert c.pop('b', 3) == 3
with pytest.raises(KeyError):
c.pop('c')
def test_interpolation_with_section_names(cfg_contents):
cfg = cfg_contents("""
item1 = 1234
[section]
[[item1]]
foo='bar'
[[DEFAULT]]
[[[item1]]]
why = would you do this?
[[other-subsection]]
item2 = '$item1'""")
c = ConfigObj(cfg, interpolation='Template')
# This raises an exception in 4.7.1 and earlier due to the section
# being found as the interpolation value
repr(c)
def test_interoplation_repr():
c = ConfigObj(['foo = $bar'], interpolation='Template')
c['baz'] = {}
c['baz']['spam'] = '%(bar)s'
# This raises a MissingInterpolationOption exception in 4.7.1 and earlier
repr(c)
class TestEncoding(object):
@pytest.fixture
def ant_cfg(self):
return """
[tags]
[[bug]]
translated = \U0001f41c
"""
#issue #18
def test_unicode_conversion_when_encoding_is_set(self, cfg_contents):
cfg = cfg_contents(b"test = some string")
c = ConfigObj(cfg, encoding='utf8')
if six.PY2:
assert not isinstance(c['test'], str)
assert isinstance(c['test'], unicode)
else:
assert isinstance(c['test'], str)
#issue #18
def test_no_unicode_conversion_when_encoding_is_omitted(self, cfg_contents):
cfg = cfg_contents(b"test = some string")
c = ConfigObj(cfg)
if six.PY2:
assert isinstance(c['test'], str)
assert not isinstance(c['test'], unicode)
else:
assert isinstance(c['test'], str)
#issue #44
def test_that_encoding_using_list_of_strings(self):
cfg = [b'test = \xf0\x9f\x90\x9c']
c = ConfigObj(cfg, encoding='utf8')
if six.PY2:
assert isinstance(c['test'], unicode)
assert not isinstance(c['test'], str)
else:
assert isinstance(c['test'], str)
assert c['test'] == '\U0001f41c'
#issue #44
def test_encoding_in_subsections(self, ant_cfg, cfg_contents):
c = cfg_contents(ant_cfg)
cfg = ConfigObj(c, encoding='utf-8')
assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
#issue #44 and #55
def test_encoding_in_config_files(self, request, ant_cfg):
# the cfg_contents fixture is doing this too, but be explicit
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
cfg_file.write(ant_cfg.encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
cfg = ConfigObj(cfg_file.name, encoding='utf-8')
assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
cfg.write()
@pytest.fixture
def testconfig1():
"""
copied from the main doctest
"""
return """\
key1= val # comment 1
key2= val # comment 2
# comment 3
[lev1a] # comment 4
key1= val # comment 5
key2= val # comment 6
# comment 7
[lev1b] # comment 8
key1= val # comment 9
key2= val # comment 10
# comment 11
[[lev2ba]] # comment 12
key1= val # comment 13
# comment 14
[[lev2bb]] # comment 15
key1= val # comment 16
# comment 17
[lev1c] # comment 18
# comment 19
[[lev2c]] # comment 20
# comment 21
[[[lev3c]]] # comment 22
key1 = val # comment 23"""
@pytest.fixture
def testconfig2():
return """\
key1 = 'val1'
key2 = "val2"
key3 = val3
["section 1"] # comment
keys11 = val1
keys12 = val2
keys13 = val3
[section 2]
keys21 = val1
keys22 = val2
keys23 = val3
[['section 2 sub 1']]
fish = 3
"""
@pytest.fixture
def testconfig6():
return b'''
name1 = """ a single line value """ # comment
name2 = \''' another single line value \''' # comment
name3 = """ a single line value """
name4 = \''' another single line value \'''
[ "multi section" ]
name1 = """
Well, this is a
multiline value
"""
name2 = \'''
Well, this is a
multiline value
\'''
name3 = """
Well, this is a
multiline value
""" # a comment
name4 = \'''
Well, this is a
multiline value
\''' # I guess this is a comment too
'''
@pytest.fixture
def a(testconfig1, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig1), raise_errors=True)
@pytest.fixture
def b(testconfig2, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig2), raise_errors=True)
@pytest.fixture
def i(testconfig6, cfg_contents):
"""
also copied from main doc tests
"""
return ConfigObj(cfg_contents(testconfig6), raise_errors=True)
def test_configobj_dict_representation(a, b, cfg_contents):
assert a.depth == 0
assert a == {
'key2': 'val',
'key1': 'val',
'lev1c': {
'lev2c': {
'lev3c': {
'key1': 'val',
},
},
},
'lev1b': {
'key2': 'val',
'key1': 'val',
'lev2ba': {
'key1': 'val',
},
'lev2bb': {
'key1': 'val',
},
},
'lev1a': {
'key2': 'val',
'key1': 'val',
},
}
assert b.depth == 0
assert b == {
'key3': 'val3',
'key2': 'val2',
'key1': 'val1',
'section 1': {
'keys11': 'val1',
'keys13': 'val3',
'keys12': 'val2',
},
'section 2': {
'section 2 sub 1': {
'fish': '3',
},
'keys21': 'val1',
'keys22': 'val2',
'keys23': 'val3',
},
}
t = cfg_lines("""
'a' = b # !"$%^&*(),::;'@~#= 33
"b" = b #= 6, 33
""")
t2 = ConfigObj(t)
assert t2 == {'a': 'b', 'b': 'b'}
t2.inline_comments['b'] = ''
del t2['a']
assert t2.write() == ['','b = b', '']
def test_behavior_when_list_values_is_false():
c = '''
key1 = no quotes
key2 = 'single quotes'
key3 = "double quotes"
key4 = "list", 'with', several, "quotes"
'''
cfg = ConfigObj(cfg_lines(c), list_values=False)
assert cfg == {
'key1': 'no quotes',
'key2': "'single quotes'",
'key3': '"double quotes"',
'key4': '"list", \'with\', several, "quotes"'
}
cfg2 = ConfigObj(list_values=False)
cfg2['key1'] = 'Multiline\nValue'
cfg2['key2'] = '''"Value" with 'quotes' !'''
assert cfg2.write() == [
"key1 = '''Multiline\nValue'''",
'key2 = "Value" with \'quotes\' !'
]
cfg2.list_values = True
assert cfg2.write() == [
"key1 = '''Multiline\nValue'''",
'key2 = \'\'\'"Value" with \'quotes\' !\'\'\''
]
def test_flatten_errors(val, cfg_contents):
config = cfg_contents("""
test1=40
test2=hello
test3=3
test4=5.0
[section]
test1=40
test2=hello
test3=3
test4=5.0
[[sub section]]
test1=40
test2=hello
test3=3
test4=5.0
""")
configspec = cfg_contents("""
test1= integer(30,50)
test2= string
test3=integer
test4=float(6.0)
[section]
test1=integer(30,50)
test2=string
test3=integer
test4=float(6.0)
[[sub section]]
test1=integer(30,50)
test2=string
test3=integer
test4=float(6.0)
""")
c1 = ConfigObj(config, configspec=configspec)
res = c1.validate(val)
assert flatten_errors(c1, res) == [([], 'test4', False), (['section'], 'test4', False), (['section', 'sub section'], 'test4', False)]
res = c1.validate(val, preserve_errors=True)
check = flatten_errors(c1, res)
assert check[0][:2] == ([], 'test4')
assert check[1][:2] == (['section'], 'test4')
assert check[2][:2] == (['section', 'sub section'], 'test4')
for entry in check:
assert isinstance(entry[2], VdtValueTooSmallError)
assert str(entry[2]) == 'the value "5.0" is too small.'
def test_unicode_handling():
u_base = '''
# initial comment
# inital comment 2
test1 = some value
# comment
test2 = another value # inline comment
# section comment
[section] # inline comment
test = test # another inline comment
test2 = test2
# final comment
# final comment2
'''
# needing to keep line endings means this isn't a good candidate
# for the cfg_lines utility method
u = u_base.encode('utf_8').splitlines(True)
u[0] = BOM_UTF8 + u[0]
uc = ConfigObj(u)
uc.encoding = None
assert uc.BOM
assert uc == {'test1': 'some value', 'test2': 'another value',
'section': {'test': 'test', 'test2': 'test2'}}
uc = ConfigObj(u, encoding='utf_8', default_encoding='latin-1')
assert uc.BOM
assert isinstance(uc['test1'], six.text_type)
assert uc.encoding == 'utf_8'
assert uc.newlines == '\n'
assert len(uc.write()) == 13
uc['latin1'] = "This costs lot's of "
a_list = uc.write()
assert 'latin1' in str(a_list)
assert len(a_list) == 14
assert isinstance(a_list[0], six.binary_type)
assert a_list[0].startswith(BOM_UTF8)
u = u_base.replace('\n', '\r\n').encode('utf-8').splitlines(True)
uc = ConfigObj(u)
assert uc.newlines == '\r\n'
uc.newlines = '\r'
file_like = six.BytesIO()
uc.write(file_like)
file_like.seek(0)
uc2 = ConfigObj(file_like)
assert uc2 == uc
assert uc2.filename == None
assert uc2.newlines == '\r'
class TestWritingConfigs(object):
def test_validate(self, val):
spec = [
'# Initial Comment',
'',
'key1 = string(default=Hello)',
'',
'# section comment',
'[section] # inline comment',
'# key1 comment',
'key1 = integer(default=6)',
'# key2 comment',
'key2 = boolean(default=True)',
'# subsection comment',
'[[sub-section]] # inline comment',
'# another key1 comment',
'key1 = float(default=3.0)'
]
blank_config = ConfigObj(configspec=spec)
assert blank_config.validate(val, copy=True)
assert blank_config.dict() == {
'key1': 'Hello',
'section': {'key1': 6, 'key2': True, 'sub-section': {'key1': 3.0}}
}
assert blank_config.write() == [
'# Initial Comment',
'',
'key1 = Hello',
'',
'# section comment',
'[section]# inline comment',
'# key1 comment',
'key1 = 6',
'# key2 comment',
'key2 = True',
'# subsection comment',
'[[sub-section]]# inline comment',
'# another key1 comment',
'key1 = 3.0'
]
def test_writing_empty_values(self):
config_with_empty_values = [
'',
'key1 =',
'key2 =# a comment',
]
cfg = ConfigObj(config_with_empty_values)
assert cfg.write() == ['', 'key1 = ""', 'key2 = ""# a comment']
cfg.write_empty_values = True
assert cfg.write() == ['', 'key1 = ', 'key2 = # a comment']
class TestUnrepr(object):
def test_in_reading(self):
config_to_be_unreprd = cfg_lines("""
key1 = (1, 2, 3) # comment
key2 = True
key3 = 'a string'
key4 = [1, 2, 3, 'a mixed list']
""")
cfg = ConfigObj(config_to_be_unreprd, unrepr=True)
assert cfg == {
'key1': (1, 2, 3),
'key2': True,
'key3': 'a string',
'key4': [1, 2, 3, 'a mixed list']
}
assert cfg == ConfigObj(cfg.write(), unrepr=True)
def test_in_multiline_values(self, cfg_contents):
config_with_multiline_value = cfg_contents('''
k = \"""{
'k1': 3,
'k2': 6.0}\"""
''')
cfg = ConfigObj(config_with_multiline_value, unrepr=True)
assert cfg == {'k': {'k1': 3, 'k2': 6.0}}
def test_with_a_dictionary(self):
config_with_dict_value = ['k = {"a": 1}']
cfg = ConfigObj(config_with_dict_value, unrepr=True)
assert isinstance(cfg['k'], dict)
def test_with_hash(self):
config_with_a_hash_in_a_list = [
'key1 = (1, 2, 3) # comment',
'key2 = True',
"key3 = 'a string'",
"key4 = [1, 2, 3, 'a mixed list#']"
]
cfg = ConfigObj(config_with_a_hash_in_a_list, unrepr=True)
assert cfg == {
'key1': (1, 2, 3),
'key2': True,
'key3': 'a string',
'key4': [1, 2, 3, 'a mixed list#']
}
class TestValueErrors(object):
def test_bool(self, empty_cfg):
empty_cfg['a'] = 'fish'
with pytest.raises(ValueError) as excinfo:
empty_cfg.as_bool('a')
assert str(excinfo.value) == 'Value "fish" is neither True nor False'
empty_cfg['b'] = 'True'
assert empty_cfg.as_bool('b') is True
empty_cfg['b'] = 'off'
assert empty_cfg.as_bool('b') is False
def test_int(self, empty_cfg):
for bad in ('fish', '3.2'):
empty_cfg['a'] = bad
with pytest.raises(ValueError) as excinfo:
empty_cfg.as_int('a')
assert str(excinfo.value).startswith('invalid literal for int()')
empty_cfg['b'] = '1'
assert empty_cfg.as_bool('b') is True
empty_cfg['b'] = '3.2'
def test_float(self, empty_cfg):
empty_cfg['a'] = 'fish'
with pytest.raises(ValueError):
empty_cfg.as_float('a')
empty_cfg['b'] = '1'
assert empty_cfg.as_float('b') == 1
empty_cfg['b'] = '3.2'
assert empty_cfg.as_float('b') == 3.2
def test_error_types():
# errors that don't have interesting messages
test_value = 'what'
for ErrorClass in (co.ConfigObjError, co.NestingError, co.ParseError,
co.DuplicateError, co.ConfigspecError,
co.RepeatSectionError):
with pytest.raises(ErrorClass) as excinfo:
# TODO: assert more interesting things
# now that we're not using doctest
raise ErrorClass(test_value)
assert str(excinfo.value) == test_value
for ErrorClassWithMessage, msg in (
(co.InterpolationLoopError,
'interpolation loop detected in value "{0}".'),
(co.MissingInterpolationOption,
'missing option "{0}" in interpolation.'),
):
with pytest.raises(ErrorClassWithMessage) as excinfo:
raise ErrorClassWithMessage(test_value)
assert str(excinfo.value) == msg.format(test_value)
# ReloadError is raised as IOError
with pytest.raises(IOError):
raise co.ReloadError()
class TestSectionBehavior(object):
def test_dictionary_representation(self, a):
n = a.dict()
assert n == a
assert n is not a
def test_merging(self, cfg_contents):
config_with_subsection = cfg_contents("""
[section1]
option1 = True
[[subsection]]
more_options = False
# end of file
""")
config_that_overwrites_parameter = cfg_contents("""
# File is user.ini
[section1]
option1 = False
# end of file
""")
c1 = ConfigObj(config_that_overwrites_parameter)
c2 = ConfigObj(config_with_subsection)
c2.merge(c1)
assert c2.dict() == {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
def test_walking_with_in_place_updates(self, cfg_contents):
config = cfg_contents("""
[XXXXsection]
XXXXkey = XXXXvalue
""")
cfg = ConfigObj(config)
assert cfg.dict() == {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
def transform(section, key):
val = section[key]
newkey = key.replace('XXXX', 'CLIENT1')
section.rename(key, newkey)
if isinstance(val, six.string_types):
val = val.replace('XXXX', 'CLIENT1')
section[newkey] = val
assert cfg.walk(transform, call_on_sections=True) == {
'CLIENT1section': {'CLIENT1key': None}
}
assert cfg.dict() == {
'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}
}
def test_reset_a_configobj():
something = object()
cfg = ConfigObj()
cfg['something'] = something
cfg['section'] = {'something': something}
cfg.filename = 'fish'
cfg.raise_errors = something
cfg.list_values = something
cfg.create_empty = something
cfg.file_error = something
cfg.stringify = something
cfg.indent_type = something
cfg.encoding = something
cfg.default_encoding = something
cfg.BOM = something
cfg.newlines = something
cfg.write_empty_values = something
cfg.unrepr = something
cfg.initial_comment = something
cfg.final_comment = something
cfg.configspec = something
cfg.inline_comments = something
cfg.comments = something
cfg.defaults = something
cfg.default_values = something
cfg.reset()
assert cfg.filename is None
assert cfg.raise_errors is False
assert cfg.list_values is True
assert cfg.create_empty is False
assert cfg.file_error is False
assert cfg.interpolation is True
assert cfg.configspec is None
assert cfg.stringify is True
assert cfg.indent_type is None
assert cfg.encoding is None
assert cfg.default_encoding is None
assert cfg.unrepr is False
assert cfg.write_empty_values is False
assert cfg.inline_comments == {}
assert cfg.comments == {}
assert cfg.defaults == []
assert cfg.default_values == {}
assert cfg == ConfigObj()
assert repr(cfg) == 'ConfigObj({})'
class TestReloading(object):
@pytest.fixture
def reloadable_cfg_content(self):
content = '''
test1=40
test2=hello
test3=3
test4=5.0
[section]
test1=40
test2=hello
test3=3
test4=5.0
[[sub section]]
test1=40
test2=hello
test3=3
test4=5.0
[section2]
test1=40
test2=hello
test3=3
test4=5.0
'''
return content
def test_handle_no_filename(self):
for bad_args in ([six.BytesIO()], [], [[]]):
cfg = ConfigObj(*bad_args)
with pytest.raises(ReloadError) as excinfo:
cfg.reload()
assert str(excinfo.value) == 'reload failed, filename is not set.'
def test_reloading_with_an_actual_file(self, request,
reloadable_cfg_content,
cfg_contents):
with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
cfg_file.write(reloadable_cfg_content.encode('utf-8'))
request.addfinalizer(lambda : os.unlink(cfg_file.name))
configspec = cfg_contents("""
test1= integer(30,50)
test2= string
test3=integer
test4=float(4.5)
[section]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
[[sub section]]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
[section2]
test1=integer(30,50)
test2=string
test3=integer
test4=float(4.5)
""")
cfg = ConfigObj(cfg_file.name, configspec=configspec)
cfg.configspec['test1'] = 'integer(50,60)'
backup = ConfigObj(cfg_file.name)
del cfg['section']
del cfg['test1']
cfg['extra'] = '3'
cfg['section2']['extra'] = '3'
cfg.reload()
assert cfg == backup
assert cfg.validate(Validator())
class TestDuplicates(object):
def test_duplicate_section(self):
cfg = '''
[hello]
member = value
[hello again]
member = value
[ "hello" ]
member = value
'''
with pytest.raises(DuplicateError) as excinfo:
ConfigObj(cfg.splitlines(), raise_errors=True)
assert str(excinfo.value) == 'Duplicate section name at line 6.'
def test_duplicate_members(self):
d = '''
[hello]
member=value
[helloagain]
member1=value
member2=value
'member1'=value
["andagain"]
member=value
'''
with pytest.raises(DuplicateError) as excinfo:
ConfigObj(d.splitlines(),raise_errors=True)
assert str(excinfo.value) == 'Duplicate keyword name at line 7.'
class TestInterpolation(object):
"""
tests various interpolation behaviors using config par
"""
@pytest.fixture
def config_parser_cfg(self):
cfg = ConfigObj()
cfg['DEFAULT'] = {
'b': 'goodbye',
'userdir': r'c:\\home',
'c': '%(d)s',
'd': '%(c)s'
}
cfg['section'] = {
'a': r'%(datadir)s\\some path\\file.py',
'b': r'%(userdir)s\\some path\\file.py',
'c': 'Yo %(a)s',
'd': '%(not_here)s',
'e': '%(e)s',
}
cfg['section']['DEFAULT'] = {
'datadir': r'c:\\silly_test',
'a': 'hello - %(b)s',
}
return cfg
@pytest.fixture
def template_cfg(self, cfg_contents):
interp_cfg = '''
[DEFAULT]
keyword1 = value1
'keyword 2' = 'value 2'
reference = ${keyword1}
foo = 123
[ section ]
templatebare = $keyword1/foo
bar = $$foo
dollar = $$300.00
stophere = $$notinterpolated
with_braces = ${keyword1}s (plural)
with_spaces = ${keyword 2}!!!
with_several = $keyword1/$reference/$keyword1
configparsersample = %(keyword 2)sconfig
deep = ${reference}
[[DEFAULT]]
baz = $foo
[[ sub-section ]]
quux = '$baz + $bar + $foo'
[[[ sub-sub-section ]]]
convoluted = "$bar + $baz + $quux + $bar"
'''
return ConfigObj(cfg_contents(interp_cfg), interpolation='Template')
def test_interpolation(self, config_parser_cfg):
test_section = config_parser_cfg['section']
assert test_section['a'] == r'c:\\silly_test\\some path\\file.py'
assert test_section['b'] == r'c:\\home\\some path\\file.py'
assert test_section['c'] == r'Yo c:\\silly_test\\some path\\file.py'
def test_interpolation_turned_off(self, config_parser_cfg):
config_parser_cfg.interpolation = False
test_section = config_parser_cfg['section']
assert test_section['a'] == r'%(datadir)s\\some path\\file.py'
assert test_section['b'] == r'%(userdir)s\\some path\\file.py'
assert test_section['c'] == r'Yo %(a)s'
def test_handle_errors(self, config_parser_cfg):
with pytest.raises(MissingInterpolationOption) as excinfo:
print(config_parser_cfg['section']['d'])
assert (str(excinfo.value) ==
'missing option "not_here" in interpolation.')
with pytest.raises(InterpolationLoopError) as excinfo:
print(config_parser_cfg['section']['e'])
assert (str(excinfo.value) ==
'interpolation loop detected in value "e".')
def test_template_interpolation(self, template_cfg):
test_sec = template_cfg['section']
assert test_sec['templatebare'] == 'value1/foo'
assert test_sec['dollar'] == '$300.00'
assert test_sec['stophere'] == '$notinterpolated'
assert test_sec['with_braces'] == 'value1s (plural)'
assert test_sec['with_spaces'] == 'value 2!!!'
assert test_sec['with_several'] == 'value1/value1/value1'
assert test_sec['configparsersample'] == '%(keyword 2)sconfig'
assert test_sec['deep'] == 'value1'
assert test_sec['sub-section']['quux'] == '123 + $foo + 123'
assert (test_sec['sub-section']['sub-sub-section']['convoluted'] ==
'$foo + 123 + 123 + $foo + 123 + $foo')
class TestQuotes(object):
"""
tests what happens whn dealing with quotes
"""
def assert_bad_quote_message(self, empty_cfg, to_quote, **kwargs):
#TODO: this should be use repr instead of str
message = 'Value "{0}" cannot be safely quoted.'
with pytest.raises(ConfigObjError) as excinfo:
empty_cfg._quote(to_quote, **kwargs)
assert str(excinfo.value) == message.format(to_quote)
def test_handle_unbalanced(self, i):
self.assert_bad_quote_message(i, '"""\'\'\'')
def test_handle_unallowed_newline(self, i):
newline = '\n'
self.assert_bad_quote_message(i, newline, multiline=False)
def test_handle_unallowed_open_quote(self, i):
open_quote = ' "\' '
self.assert_bad_quote_message(i, open_quote, multiline=False)
def test_handle_multiple_bad_quote_values(self):
testconfig5 = '''
config = "hello # comment
test = 'goodbye
fish = 'goodbye # comment
dummy = "hello again
'''
with pytest.raises(ConfigObjError) as excinfo:
ConfigObj(testconfig5.splitlines())
assert len(excinfo.value.errors) == 4
def test_handle_stringify_off():
c = ConfigObj()
c.stringify = False
with pytest.raises(TypeError) as excinfo:
c['test'] = 1
assert str(excinfo.value) == 'Value is not a string "1".'
class TestValues(object):
"""
Tests specifics about behaviors with types of values
"""
@pytest.fixture
def testconfig3(self, cfg_contents):
return cfg_contents("""
a = ,
b = test,
c = test1, test2 , test3
d = test1, test2, test3,
""")
def test_empty_values(self, cfg_contents):
cfg_with_empty = cfg_contents("""
k =
k2 =# comment test
val = test
val2 = ,
val3 = 1,
val4 = 1, 2
val5 = 1, 2, """)
cwe = ConfigObj(cfg_with_empty)
# see a comma? it's a list
assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': [],
'val3': ['1'], 'val4': ['1', '2'], 'val5': ['1', '2']}
# not any more
cwe = ConfigObj(cfg_with_empty, list_values=False)
assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': ',',
'val3': '1,', 'val4': '1, 2', 'val5': '1, 2,'}
def test_list_values(self, testconfig3):
cfg = ConfigObj(testconfig3, raise_errors=True)
assert cfg['a'] == []
assert cfg['b'] == ['test']
assert cfg['c'] == ['test1', 'test2', 'test3']
assert cfg['d'] == ['test1', 'test2', 'test3']
def test_list_values_off(self, testconfig3):
cfg = ConfigObj(testconfig3, raise_errors=True, list_values=False)
assert cfg['a'] == ','
assert cfg['b'] == 'test,'
assert cfg['c'] == 'test1, test2 , test3'
assert cfg['d'] == 'test1, test2, test3,'
def test_handle_multiple_list_value_errors(self):
testconfig4 = '''
config = 3,4,,
test = 3,,4
fish = ,,
dummy = ,,hello, goodbye
'''
with pytest.raises(ConfigObjError) as excinfo:
ConfigObj(testconfig4.splitlines())
assert len(excinfo.value.errors) == 4
def test_creating_with_a_dictionary():
dictionary_cfg_content = {
'key1': 'val1',
'key2': 'val2',
'section 1': {
'key1': 'val1',
'key2': 'val2',
'section 1b': {
'key1': 'val1',
'key2': 'val2',
},
},
'section 2': {
'key1': 'val1',
'key2': 'val2',
'section 2b': {
'key1': 'val1',
'key2': 'val2',
},
},
'key3': 'val3',
}
cfg = ConfigObj(dictionary_cfg_content)
assert dictionary_cfg_content == cfg
assert dictionary_cfg_content is not cfg
assert dictionary_cfg_content == cfg.dict()
assert dictionary_cfg_content is not cfg.dict()
class TestComments(object):
@pytest.fixture
def comment_filled_cfg(self, cfg_contents):
return cfg_contents("""
# initial comments
# with two lines
key = "value"
# section comment
[section] # inline section comment
# key comment
key = "value"
# final comment
# with two lines"""
)
def test_multiline_comments(self, i):
expected_multiline_value = '\nWell, this is a\nmultiline value\n'
assert i == {
'name4': ' another single line value ',
'multi section': {
'name4': expected_multiline_value,
'name2': expected_multiline_value,
'name3': expected_multiline_value,
'name1': expected_multiline_value,
},
'name2': ' another single line value ',
'name3': ' a single line value ',
'name1': ' a single line value ',
}
def test_starting_and_ending_comments(self, a, testconfig1, cfg_contents):
filename = a.filename
a.filename = None
values = a.write()
index = 0
while index < 23:
index += 1
line = values[index-1]
assert line.endswith('# comment ' + str(index))
a.filename = filename
start_comment = ['# Initial Comment', '', '#']
end_comment = ['', '#', '# Final Comment']
newconfig = start_comment + testconfig1.splitlines() + end_comment
nc = ConfigObj(newconfig)
assert nc.initial_comment == ['# Initial Comment', '', '#']
assert nc.final_comment == ['', '#', '# Final Comment']
assert nc.initial_comment == start_comment
assert nc.final_comment == end_comment
def test_inline_comments(self):
c = ConfigObj()
c['foo'] = 'bar'
c.inline_comments['foo'] = 'Nice bar'
assert c.write() == ['foo = bar # Nice bar']
def test_unrepr_comments(self, comment_filled_cfg):
c = ConfigObj(comment_filled_cfg, unrepr=True)
assert c == { 'key': 'value', 'section': { 'key': 'value'}}
assert c.initial_comment == [
'', '# initial comments', '# with two lines'
]
assert c.comments == {'section': ['# section comment'], 'key': []}
assert c.inline_comments == {
'section': '# inline section comment', 'key': ''
}
assert c['section'].comments == { 'key': ['# key comment']}
assert c.final_comment == ['', '# final comment', '# with two lines']
def test_comments(self, comment_filled_cfg):
c = ConfigObj(comment_filled_cfg)
assert c == { 'key': 'value', 'section': { 'key': 'value'}}
assert c.initial_comment == [
'', '# initial comments', '# with two lines'
]
assert c.comments == {'section': ['# section comment'], 'key': []}
assert c.inline_comments == {
'section': '# inline section comment', 'key': None
}
assert c['section'].comments == { 'key': ['# key comment']}
assert c.final_comment == ['', '# final comment', '# with two lines']
def test_overwriting_filenames(a, b, i):
#TODO: I'm not entirely sure what this test is actually asserting
filename = a.filename
a.filename = 'test.ini'
a.write()
a.filename = filename
assert a == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
b.filename = 'test.ini'
b.write()
assert b == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
i.filename = 'test.ini'
i.write()
assert i == ConfigObj('test.ini', raise_errors=True)
os.remove('test.ini')
def test_interpolation_using_default_sections():
c = ConfigObj()
c['DEFAULT'] = {'a' : 'fish'}
c['a'] = '%(a)s'
assert c.write() == ['a = %(a)s', '[DEFAULT]', 'a = fish']
class TestIndentation(object):
@pytest.fixture
def max_tabbed_cfg(self):
return ['[sect]', ' [[sect]]', ' foo = bar']
def test_write_dictionary(self):
assert ConfigObj({'sect': {'sect': {'foo': 'bar'}}}).write() == [
'[sect]', ' [[sect]]', ' foo = bar'
]
def test_indentation_preserved(self, max_tabbed_cfg):
for cfg_content in (
['[sect]', '[[sect]]', 'foo = bar'],
['[sect]', ' [[sect]]', ' foo = bar'],
max_tabbed_cfg
):
assert ConfigObj(cfg_content).write() == cfg_content
def test_handle_tabs_vs_spaces(self, max_tabbed_cfg):
one_tab = ['[sect]', '\t[[sect]]', '\t\tfoo = bar']
two_tabs = ['[sect]', '\t\t[[sect]]', '\t\t\t\tfoo = bar']
tabs_and_spaces = [b'[sect]', b'\t \t [[sect]]',
b'\t \t \t \t foo = bar']
assert ConfigObj(one_tab).write() == one_tab
assert ConfigObj(two_tabs).write() == two_tabs
assert ConfigObj(tabs_and_spaces).write() == [s.decode('utf-8') for s in tabs_and_spaces]
assert ConfigObj(max_tabbed_cfg, indent_type=chr(9)).write() == one_tab
assert ConfigObj(one_tab, indent_type=' ').write() == max_tabbed_cfg
class TestEdgeCasesWhenWritingOut(object):
def test_newline_terminated(self, empty_cfg):
empty_cfg.newlines = '\n'
empty_cfg['a'] = 'b'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'a = b\n'
def test_hash_escaping(self, empty_cfg):
empty_cfg.newlines = '\n'
empty_cfg['#a'] = 'b # something'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'"#a" = "b # something"\n'
empty_cfg = ConfigObj()
empty_cfg.newlines = '\n'
empty_cfg['a'] = 'b # something', 'c # something'
collector = six.BytesIO()
empty_cfg.write(collector)
assert collector.getvalue() == b'a = "b # something", "c # something"\n'
def test_detecting_line_endings_from_existing_files(self):
for expected_line_ending in ('\r\n', '\n'):
with open('temp', 'w') as h:
h.write(expected_line_ending)
c = ConfigObj('temp')
assert c.newlines == expected_line_ending
os.remove('temp')
def test_writing_out_dict_value_with_unrepr(self):
# issue #42
cfg = [str('thing = {"a": 1}')]
c = ConfigObj(cfg, unrepr=True)
assert repr(c) == "ConfigObj({'thing': {'a': 1}})"
assert c.write() == ["thing = {'a': 1}"]
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
import numpy as np
from nose.tools import raises
from copy import deepcopy
import warnings
import json
def test_parameter():
p = gluon.Parameter('weight', shape=(10, 10))
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assert len(p.list_data()) == 2
assert len(p.list_grad()) == 2
assert p.data(mx.cpu(1)).context == mx.cpu(1)
assert p.data(mx.cpu(0)).shape == (10, 10)
assert p.var().name == 'weight'
p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])
assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]
def test_paramdict():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(10, 10))
assert list(params.keys()) == ['net_weight']
params.initialize(ctx=mx.cpu())
params.save('test.params')
params.load('test.params', mx.cpu())
def test_parameter_sharing():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(5, in_units=5)
self.dense1 = nn.Dense(5, in_units=5)
def forward(self, x):
return self.dense1(self.dense0(x))
net1 = Net(prefix='net1_')
net2 = Net(prefix='net2_', params=net1.collect_params())
net1.collect_params().initialize()
net2(mx.nd.zeros((3, 5)))
net1.save_params('net1.params')
net3 = Net(prefix='net3_')
net3.load_params('net1.params', mx.cpu())
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
def test_dense():
model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
assert outputs.list_outputs() == ['test_tanh_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
assert outs == [(2, 3, 128)]
model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
assert outputs.list_outputs() == ['test2_relu_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
assert outs == [(17, 128)]
def test_symbol_block():
model = nn.HybridSequential()
model.add(nn.Dense(128, activation='tanh'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh'),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
model.initialize()
inputs = mx.sym.var('data')
outputs = model(inputs).get_internals()
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
assert len(smodel(mx.nd.zeros((16, 10)))) == 14
out = smodel(mx.sym.var('in'))
assert len(out) == len(outputs.list_outputs())
class Net(nn.HybridBlock):
def __init__(self, model):
super(Net, self).__init__()
self.model = model
def hybrid_forward(self, F, x):
out = self.model(x)
return F.add_n(*[i.sum() for i in out])
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
inputs = mx.sym.var('data')
outputs = model(inputs)
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
def check_layer_forward(layer, dshape):
layer.collect_params().initialize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
np_out = out.asnumpy()
np_dx = x.grad.asnumpy()
layer.hybridize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_pool():
layers1d = [
nn.MaxPool1D(),
nn.MaxPool1D(3),
nn.MaxPool1D(3, 2),
nn.AvgPool1D(),
nn.GlobalAvgPool1D(),
]
for layer in layers1d:
check_layer_forward(layer, (1, 2, 10))
layers2d = [
nn.MaxPool2D(),
nn.MaxPool2D((3, 3)),
nn.MaxPool2D(3, 2),
nn.AvgPool2D(),
nn.GlobalAvgPool2D(),
]
for layer in layers2d:
check_layer_forward(layer, (1, 2, 10, 10))
layers3d = [
nn.MaxPool3D(),
nn.MaxPool3D((3, 3, 3)),
nn.MaxPool3D(3, 2),
nn.AvgPool3D(),
nn.GlobalAvgPool3D(),
]
for layer in layers3d:
check_layer_forward(layer, (1, 2, 10, 10, 10))
# test ceil_mode
x = mx.nd.zeros((2, 2, 10, 10))
layer = nn.MaxPool2D(3, ceil_mode=False)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 3, 3))
layer = nn.MaxPool2D(3, ceil_mode=True)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 4, 4))
def test_batchnorm():
layer = nn.BatchNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
def test_reshape():
x = mx.nd.ones((2, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x.reshape((-1,))
x = x + 10
x.backward()
def test_slice():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1:3]
x = x + 10
x.backward()
def test_at():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1]
x = x + 10
x.backward()
def test_deferred_init():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2)
layer.collect_params().initialize()
layer(x)
def check_split_data(x, num_slice, batch_axis, **kwargs):
res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)
assert len(res) == num_slice
mx.test_utils.assert_almost_equal(mx.nd.concat(*res, dim=batch_axis).asnumpy(),
x.asnumpy())
def test_split_data():
x = mx.nd.random.uniform(shape=(128, 33, 64))
check_split_data(x, 8, 0)
check_split_data(x, 3, 1)
check_split_data(x, 4, 1, even_split=False)
check_split_data(x, 15, 1, even_split=False)
try:
check_split_data(x, 4, 1)
except ValueError:
return
assert False, "Should have failed"
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
def test_trainer():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
def test_block_attr_hidden():
b = gluon.Block()
# regular attributes can change types
b.a = None
b.a = 1
@raises(TypeError)
def test_block_attr_block():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Block()
b.b = (2,)
@raises(TypeError)
def test_block_attr_param():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Parameter()
b.b = (2,)
def test_block_attr_regular():
b = gluon.Block()
# set block attribute also sets _children
b.c = gluon.Block()
c2 = gluon.Block()
b.c = c2
assert b.c is c2 and b._children[0] is c2
def test_sequential_warning():
with warnings.catch_warnings(record=True) as w:
b = gluon.nn.Sequential()
b.add(gluon.nn.Dense(20))
b.hybridize()
assert len(w) == 1
def test_global_norm_clip():
x1 = mx.nd.ones((3,3))
x2 = mx.nd.ones((4,4))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
x3 = mx.nd.array([1.0, 2.0, float('nan')])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
gluon.utils.clip_global_norm([x1, x3], 2.0)
assert len(w) == 1
def test_embedding():
layer = gluon.nn.Embedding(10, 100)
layer.initialize()
x = mx.nd.array([3,4,2,0,1])
with mx.autograd.record():
y = layer(x)
y.backward()
assert (layer.weight.grad()[:5] == 1).asnumpy().all()
assert (layer.weight.grad()[5:] == 0).asnumpy().all()
def test_export():
ctx = mx.context.current_context()
model = gluon.model_zoo.vision.resnet18_v1(
prefix='resnet', ctx=ctx, pretrained=True)
model.hybridize()
data = mx.nd.random.normal(shape=(1, 3, 224, 224))
out = model(data)
model.export('gluon')
module = mx.mod.Module.load('gluon', 0, label_names=None, context=ctx)
module.bind(data_shapes=[('data', data.shape)])
module.forward(mx.io.DataBatch([data], None), is_train=False)
mod_out, = module.get_outputs()
assert_almost_equal(out.asnumpy(), mod_out.asnumpy())
model2 = gluon.model_zoo.vision.resnet18_v1(prefix='resnet', ctx=ctx)
model2.collect_params().load('gluon-0000.params', ctx)
out2 = model2(data)
assert_almost_equal(out.asnumpy(), out2.asnumpy())
def test_hybrid_stale_cache():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.add(mx.gluon.nn.Flatten())
assert net(mx.nd.ones((2,3,5))).shape == (2, 30)
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=True)
net.initialize()
assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
def test_lambda():
net1 = mx.gluon.nn.HybridSequential()
net1.add(nn.Activation('tanh'),
nn.LeakyReLU(0.1))
net2 = mx.gluon.nn.HybridSequential()
op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
net2.add(nn.HybridLambda('tanh'),
nn.HybridLambda(op3))
op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
net3 = mx.gluon.nn.Sequential()
net3.add(nn.Lambda('tanh'),
nn.Lambda(op4))
input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
assert_almost_equal(out1.asnumpy(), out2.asnumpy())
assert_almost_equal(out1.asnumpy(), out3.asnumpy())
def test_fill_shape_deferred():
net = nn.HybridSequential()
with net.name_scope():
net.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5,7)))
assert net[0].weight.shape[1] == 3, net[0].weight.shape[1]
assert net[1].gamma.shape[0] == 64, net[1].gamma.shape[0]
assert net[2].weight.shape[1] == 3072, net[2].weight.shape[1]
def test_dtype():
net = mx.gluon.model_zoo.vision.resnet18_v1()
net.initialize()
net.cast('float64')
with mx.autograd.record():
y = net(mx.nd.ones((16, 3, 32, 32), dtype='float64'))
y.backward()
net = mx.gluon.model_zoo.vision.resnet18_v1()
net.initialize()
net.hybridize()
net(mx.nd.ones((16, 3, 32, 32), dtype='float32'))
net.cast('float64')
net(mx.nd.ones((16, 3, 32, 32), dtype='float64'))
mx.nd.waitall()
def test_fill_shape_load():
ctx = mx.context.current_context()
net1 = nn.HybridSequential()
with net1.name_scope():
net1.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net1.hybridize()
net1.initialize(ctx=ctx)
net1(mx.nd.ones((2,3,5,7), ctx))
net1.save_params('net_fill.params')
net2 = nn.HybridSequential()
with net2.name_scope():
net2.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net2.hybridize()
net2.initialize()
net2.load_params('net_fill.params', ctx)
assert net2[0].weight.shape[1] == 3, net2[0].weight.shape[1]
assert net2[1].gamma.shape[0] == 64, net2[1].gamma.shape[0]
assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1]
def test_inline():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.initialize()
net.hybridize(inline_limit=3)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
net.hybridize(inline_limit=0)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
assert len_1 == len_2 + 2
if __name__ == '__main__':
import nose
nose.runmodule()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.