gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""The sensor entity for the Youless integration."""
from __future__ import annotations
from youless_api.youless_sensor import YoulessSensor
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.components.youless import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_DEVICE,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_GAS,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Initialize the integration."""
coordinator = hass.data[DOMAIN][entry.entry_id]
device = entry.data[CONF_DEVICE]
if (device := entry.data[CONF_DEVICE]) is None:
device = entry.entry_id
async_add_entities(
[
GasSensor(coordinator, device),
PowerMeterSensor(coordinator, device, "low", STATE_CLASS_TOTAL_INCREASING),
PowerMeterSensor(coordinator, device, "high", STATE_CLASS_TOTAL_INCREASING),
PowerMeterSensor(coordinator, device, "total", STATE_CLASS_TOTAL),
CurrentPowerSensor(coordinator, device),
DeliveryMeterSensor(coordinator, device, "low"),
DeliveryMeterSensor(coordinator, device, "high"),
ExtraMeterSensor(coordinator, device, "total"),
ExtraMeterPowerSensor(coordinator, device, "usage"),
]
)
class YoulessBaseSensor(CoordinatorEntity, SensorEntity):
"""The base sensor for Youless."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
device: str,
device_group: str,
friendly_name: str,
sensor_id: str,
) -> None:
"""Create the sensor."""
super().__init__(coordinator)
self._device = device
self._device_group = device_group
self._sensor_id = sensor_id
self._attr_unique_id = f"{DOMAIN}_{device}_{sensor_id}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, f"{device}_{device_group}")},
manufacturer="YouLess",
model=self.coordinator.data.model,
name=friendly_name,
)
@property
def get_sensor(self) -> YoulessSensor | None:
"""Property to get the underlying sensor object."""
return None
@property
def native_value(self) -> StateType:
"""Determine the state value, only if a sensor is initialized."""
if self.get_sensor is None:
return None
return self.get_sensor.value
@property
def available(self) -> bool:
"""Return a flag to indicate the sensor not being available."""
return super().available and self.get_sensor is not None
class GasSensor(YoulessBaseSensor):
"""The Youless gas sensor."""
_attr_native_unit_of_measurement = VOLUME_CUBIC_METERS
_attr_device_class = DEVICE_CLASS_GAS
_attr_state_class = STATE_CLASS_TOTAL_INCREASING
def __init__(self, coordinator: DataUpdateCoordinator, device: str) -> None:
"""Instantiate a gas sensor."""
super().__init__(coordinator, device, "gas", "Gas meter", "gas")
self._attr_name = "Gas usage"
self._attr_icon = "mdi:fire"
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
return self.coordinator.data.gas_meter
class CurrentPowerSensor(YoulessBaseSensor):
"""The current power usage sensor."""
_attr_native_unit_of_measurement = POWER_WATT
_attr_device_class = DEVICE_CLASS_POWER
_attr_state_class = STATE_CLASS_MEASUREMENT
def __init__(self, coordinator: DataUpdateCoordinator, device: str) -> None:
"""Instantiate the usage meter."""
super().__init__(coordinator, device, "power", "Power usage", "usage")
self._device = device
self._attr_name = "Power Usage"
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
return self.coordinator.data.current_power_usage
class DeliveryMeterSensor(YoulessBaseSensor):
"""The Youless delivery meter value sensor."""
_attr_native_unit_of_measurement = ENERGY_KILO_WATT_HOUR
_attr_device_class = DEVICE_CLASS_ENERGY
_attr_state_class = STATE_CLASS_TOTAL_INCREASING
def __init__(
self, coordinator: DataUpdateCoordinator, device: str, dev_type: str
) -> None:
"""Instantiate a delivery meter sensor."""
super().__init__(
coordinator, device, "delivery", "Power delivery", f"delivery_{dev_type}"
)
self._type = dev_type
self._attr_name = f"Power delivery {dev_type}"
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
if self.coordinator.data.delivery_meter is None:
return None
return getattr(self.coordinator.data.delivery_meter, f"_{self._type}", None)
class PowerMeterSensor(YoulessBaseSensor):
"""The Youless low meter value sensor."""
_attr_native_unit_of_measurement = ENERGY_KILO_WATT_HOUR
_attr_device_class = DEVICE_CLASS_ENERGY
_attr_state_class = STATE_CLASS_TOTAL_INCREASING
def __init__(
self,
coordinator: DataUpdateCoordinator,
device: str,
dev_type: str,
state_class: str,
) -> None:
"""Instantiate a power meter sensor."""
super().__init__(
coordinator, device, "power", "Power usage", f"power_{dev_type}"
)
self._device = device
self._type = dev_type
self._attr_name = f"Power {dev_type}"
self._attr_state_class = state_class
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
if self.coordinator.data.power_meter is None:
return None
return getattr(self.coordinator.data.power_meter, f"_{self._type}", None)
class ExtraMeterSensor(YoulessBaseSensor):
"""The Youless extra meter value sensor (s0)."""
_attr_native_unit_of_measurement = ENERGY_KILO_WATT_HOUR
_attr_device_class = DEVICE_CLASS_ENERGY
_attr_state_class = STATE_CLASS_TOTAL_INCREASING
def __init__(
self, coordinator: DataUpdateCoordinator, device: str, dev_type: str
) -> None:
"""Instantiate an extra meter sensor."""
super().__init__(
coordinator, device, "extra", "Extra meter", f"extra_{dev_type}"
)
self._type = dev_type
self._attr_name = f"Extra {dev_type}"
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
if self.coordinator.data.extra_meter is None:
return None
return getattr(self.coordinator.data.extra_meter, f"_{self._type}", None)
class ExtraMeterPowerSensor(YoulessBaseSensor):
"""The Youless extra meter power value sensor (s0)."""
_attr_native_unit_of_measurement = POWER_WATT
_attr_device_class = DEVICE_CLASS_POWER
_attr_state_class = STATE_CLASS_MEASUREMENT
def __init__(
self, coordinator: DataUpdateCoordinator, device: str, dev_type: str
) -> None:
"""Instantiate an extra meter power sensor."""
super().__init__(
coordinator, device, "extra", "Extra meter", f"extra_{dev_type}"
)
self._type = dev_type
self._attr_name = f"Extra {dev_type}"
@property
def get_sensor(self) -> YoulessSensor | None:
"""Get the sensor for providing the value."""
if self.coordinator.data.extra_meter is None:
return None
return getattr(self.coordinator.data.extra_meter, f"_{self._type}", None)
| |
"""
Summary:
Contains the AUnit, CommentUnit, HeaderUnit and UnknownSection
classes.
The AUnit is an abstract base class for all types of Isis unit read
in through the ISIS data file. All section types that are built should
inherit from the AUnit baseclass.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
from __future__ import unicode_literals
import hashlib
import uuid
import random
import copy
# from abc import ABCMeta, abstractmethod
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures import DATA_TYPES as dt
from ship.fmp.headdata import HeadDataItem
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class AUnit(object):
"""Abstract base class for all Dat file units.
This class must be inherited by all classes representing an isis
data file unit (such as River, Junction, Culvert, etc).
Every subclass should override the readUnitData() and getData() methods to
ensure they are specific to the setup of the individual units variables.
If they are not overridden this class will simply take the data and store it
as read and provide it back in the same state.
All calls from the client to these classes should create the object and then
call the readUnitData() method with the raw data.
There is an UknownSection class at the bottom of this file that can be used
for all parts of the isis dat file that have not had a class defined. It just
calls the basic read-in read-out methods from this class and understands nothing
about the structure of the file section it is holding.
If you are creating subclass of this that has row_data (see below) you
should make sure that you call the setDummyRow() method in each of the
RowDataCollections. Otherwise, if the user doesn't add any rows, FMP will
throw errors.
See Also:
UnknownSection
"""
# __metaclass__ = ABCMeta
def __init__(self, **kwargs):
"""Constructor
Set the defaults for all unit specific variables.
These should be set by each unit at some point in the setup process.
E.g. RiverUnit would set type and UNIT_CATEGORY at __init__() while name
and data_objects are set in the readUnitData() method.
Both of these are called at or immediately after initialisation.
"""
self._name = kwargs.get('name', 'unknown') # Unit label
self._name_ds = kwargs.get('name_ds', 'unknown') # Unit downstream label
self._data = None
"""This is used for catch-all data storage.
Used in units such as UnknownSection.
Classes that override the readUnitData() and getData() methods are
likely to ignore this variable and use row_collection and head_data instead.
"""
self._unit_type = 'Unknown'
"""The type of ISIS unit - e.g. 'River'"""
self._unit_category = 'Unknown'
"""The ISIS unit category - e.g. for type 'Usbpr' it would be 'Bridge'"""
self.row_data = {}
"""Collection containing all of the ADataRow objects.
This is the main collection for row data in any unit that contains it.
In a RiverUnit, for example, this will hold the RowDataObject's
containing the CHAINAGE, ELEVATION, etc.
"""
self.head_data = {}
"""Dictionary containing set values that are always present in the file.
In a RiverUnit this includes values like slope and distance. I.e.
values that appear in set locations, usually at the top of the unit
data in the .dat file.
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def name_ds(self):
return self._name_ds
@name_ds.setter
def name_ds(self, value):
self._name_ds = value
@property
def has_ics(self):
if not self.icLabels():
return False
else:
return True
@property
def has_row_data(self):
if not self.row_data:
return False
else:
return True
@property
def unit_type(self):
return self._unit_type
@property
def unit_category(self):
return self._unit_category
def icLabels(self):
"""Returns the initial_conditions values for this object.
This method should be overriden by all classes that contain intial
conditions.
For example a BridgeUnit type will have two initial conditions labels;
the upstream and downstream label names.
By default this will return an empty list.
Return:
list - of intial condition label names.
"""
return []
def linkLabels(self):
"""Dict of all the names that the unit references.
For a RiverUnit this is only the self.name + spills and laterals,
for a bridge it would be self.name, self.name_ds, self.remote_us,
self.remote_ds and for a JunctionUnit it could be many more.
It can be used to identify which other units are directly associated to
this one in some way.
Return:
dict - containing all referenced names.
"""
return {'name': self._name}
def copy(self):
"""Returns a copy of this unit with it's own memory allocation."""
object_copy = copy.deepcopy(self)
return object_copy
def rowDataObject(self, key, rowdata_key='main'):
"""Returns the row data object as a list.
This will return the row_collection data object referenced by the key
provided in list form.
If you intend to update the values you should use getRowDataObject
instead as the data provided will be mutable and therefore reflected in
the values held by the row_collection. If you just want a quick way to
loop through the values in one of the data objects and only intend to
read the data then use this.
Args:
key (int): the key for the data object requested. It is best to use
the class constants (i.e. RiverUnit.CHAINAGE) for this.
rowdata_key(str): key to a RowDataCollection in row_data.
Returns:
List containing the data in the DataObject that the key points
to. Returns false if there is no row collection.
Raises:
KeyError: If key or rowdata_key don't exist.
"""
if not self.has_row_data:
return None
return self.row_data[rowdata_key].dataObject(key)
def row(self, index, rowdata_key='main'):
"""Get the data vals in a particular row by index.
Args:
index(int): the index of the row to return.
Return:
dict - containing the values for the requested row.
"""
if not self.has_row_data:
return None
return self.row_data[rowdata_key].rowAsDict(index)
def getData(self):
"""Getter for the unit data.
Return the file geometry data formatted ready for saving in the style
of an ISIS .dat file
Note:
This method should be overriden by the sub class to restore the
data to the format required by the dat file.
Returns:
List of strings - formatted for writing to .dat file.
"""
raise NotImplementedError
def readUnitData(self, data, file_line, **kwargs):
"""Reads the unit data supplied to the object.
This method is called by the FmpUnitFactory class when constructing the
Isis unit based on the data passed in from the dat file.
The default hook just copies all the data parsed in the buildUnit()
method of the factory and aves it to the given unit. This is exactly
what happens for the UnknownUnit class that just maintains a copy of the
unit data exactly as it was read in.
Args:
data (list): raw data for the section as supplied to the class.
Note:
When a class inherits from AUnit it should override this method
with unit specific load behaviour. This is likely to include:
populate unit specific header value dictionary and in some units
creating row data object.
See Also:
RiverSection for an example of overriding this method with a
concrete class.
"""
self.head_data['all'] = data
def deleteRow(self, index, rowdata_key='main', **kwargs):
"""Removes a data row from the RowDataCollection.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
"""
if index < 0 or index >= self.row_data[rowdata_key].numberOfRows():
raise IndexError('Given index is outside bounds of row_data[rowdata_key] data')
self.row_data[rowdata_key].deleteRow(index, **kwargs)
def updateRow(self, row_vals, index, rowdata_key='main', **kwargs):
"""Update an existing data row in one of the RowDataCollection's.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
Args:
row_vals(dict): Named arguments required for adding a row to the
collection. These will be as stipulated by the way that a
concrete implementation of this class setup the collection.
rowdata_key='main'(str): the name of the RowDataCollection
held by this to add the new row to. If None it is the
self.row_collection. Otherwise it is the name of one of the
entries in the self.additional_row_collections dictionary.
index=None(int): the index in the RowDataObjectCollection to update
with the row_vals
"""
if index >= self.row_data[rowdata_key].numberOfRows():
raise IndexError('Given index is outside bounds of row_collection data')
# Call the row collection add row method to add the new row.
self.row_data[rowdata_key].updateRow(row_vals=row_vals, index=index, **kwargs)
def addRow(self, row_vals, rowdata_key='main', index=None, **kwargs):
"""Add a new data row to one of the row data collections.
Provides the basics of a function for adding additional row dat to one
of the RowDataCollection's held by an AUnit type.
Checks that key required variables: ROW_DATA_TYPES.CHAINAGE amd
ROW_DATA_TYPES.ELEVATION are in the kwargs and that inserting chainge in
the specified location is not negative, unless check_negatie == False.
It then passes the kwargs directly to the RowDataCollection's
addNewRow function. It is the concrete class implementations
respnsobility to ensure that these are the expected values for it's
row collection and to set any defaults. If they are not as expected by
the RowDataObjectCollection a ValueError will be raised.
**kwargs:
These are passed onto the RowDataCollection. See there for details.
Args:
row_vals(dict): Named arguments required for adding a row to the
collection. These will be as stipulated by the way that a
concrete implementation of this class setup the collection.
rowdata_key='main'(str): the name of the RowDataCollection
held by this to add the new row to. If None it is the
self.row_collection. Otherwise it is the name of one of the
entries in the self.additional_row_collections dictionary.
index=None(int): the index in the RowDataObjectCollection to insert
the row into. If None it will be appended to the end.
"""
# If index is >= record length it gets set to None and is appended
if index is not None and index >= self.row_data[rowdata_key].numberOfRows():
index = None
if index is None:
index = self.row_data[rowdata_key].numberOfRows()
self.row_data[rowdata_key].addRow(row_vals, index, **kwargs)
def checkIncreases(self, data_obj, value, index):
"""Checks that: prev_value < value < next_value.
If the given value is not greater than the previous value and less
than the next value it will return False.
If an index greater than the number of rows in the row_data it will
check that it's greater than previous value and return True if it is.
Note:
the ARowDataObject class accepts a callback function called
update_callback which is called whenever an item is added or
updated. That is how this method is generally used.
Args:
data_obj(RowDataObject): containing the values to check against.
value(float | int): the value to check.
index=None(int): index to check ajacent values against. If None
it will assume the index is the last on in the list.
Returns:
False if not prev_value < value < next_value. Otherwise True.
"""
details = self._getAdjacentDataObjDetails(data_obj, value, index)
if details['prev_value']:
if not value >= details['prev_value']:
raise ValueError('VALUE must be > prev index and < next index.')
if details['next_value']:
if not value <= details['next_value']:
raise ValueError('VALUE must be > prev index and < next index.')
def _getAdjacentDataObjDetails(self, data_obj, value, index):
"""Safely check the status of adjacent values in an ADataRowObject.
Fetches values for previous and next indexes in the data_obj if they
exist.
Note value in return 'index' key will be the given index unless it was
None, in which case it will be the maximum index.
All other values will be set to None if they do not exist.
Args:
data_obj(RowDataObject): containing the values to check against.
value(float | int): the value to check.
index=None(int): index to check ajacent values against. If None
it will assume the index is the last on in the list.
Return:
dict - containing previous and next values and indexes, as well as
the given index checked for None.
"""
prev_value = None
next_value = None
prev_index = None
next_index = None
if index is None:
index = data_obj._max
if index < 0:
raise ValueError('Index must be > 0')
if index > 0:
prev_index = index - 1
prev_value = data_obj[prev_index]
if index < data_obj._max:
next_index = index
next_value = data_obj[next_index]
retvals = {'index': index,
'prev_value': prev_value, 'prev_index': prev_index,
'next_value': next_value, 'next_index': next_index}
return retvals
class UnknownUnit(AUnit):
""" Catch all section for unknown parts of the .dat file.
This can be used for all sections of the isis dat file that have not had
a unit class constructed.
It has no knowledge of the file section that it contains and will store it
without altering it's state and return it in exactly the same format that it
received it.
This class is designed to be a fall-back class for any parts of the dat file
for which it is deemed unnecessary to deal with more carefully.
It has a 'Section' suffix rather that 'Unit' which is the naming convention
for the other unit objects because it is not necessarily a single unit. It
could be many different units.
It is created whenever the DatLoader finds
parts of the dat file that it doesn't Know how to load (i.e. there is no
*Unit defined for it. It will then put all the dat file data in one of these
until it reaches a part of the file that it does recognise.
"""
FILE_KEY = 'UNKNOWN'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(UnknownUnit, self).__init__(**kwargs)
self._unit_type = 'unknown'
self._unit_category = 'unknown'
self._name = 'unknown_' + str(hashlib.md5(str(random.randint(-500, 500)).encode()).hexdigest()) # str(uuid.uuid4())
def getData(self):
return self.head_data['all']
def readUnitData(self, data):
self.head_data['all'] = data
class CommentUnit(AUnit):
"""Holds the data in COMMENT sections of the .dat file.
This is very similar to the UnknownSection in that all it does is grab the
data between the comment tags and save it. It then prints out the same data
in the same format with the COMMENT tags around it.
"""
# Class constants
UNIT_TYPE = 'comment'
UNIT_CATEGORY = 'meta'
FILE_KEY = 'COMMENT'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(CommentUnit, self).__init__(**kwargs)
text = kwargs.get('text', '')
self._unit_type = CommentUnit.UNIT_TYPE
self._unit_category = CommentUnit.UNIT_CATEGORY
self._name = 'comment_' + str(hashlib.md5(str(random.randint(-500, 500)).encode()).hexdigest()) # str(uuid.uuid4())
self.has_datarows = True
self.data = []
if not text.strip() == '':
self.addCommentText(text)
def addCommentText(self, text):
text = text.split('\n')
self.no_of_rows = int(len(self.data) + len(text))
for t in text:
self.data.append(t.strip())
def readUnitData(self, data, file_line):
"""
"""
file_line += 1
line = data[file_line]
self.no_of_rows = int(data[file_line].strip())
file_line += 1
for i in range(file_line, file_line + self.no_of_rows):
self.data.append(data[file_line].strip())
file_line += 1
return file_line - 1
def getData(self):
"""
"""
output = []
output.append('{:<10}'.format('COMMENT'))
output.append('{:>10}'.format(self.no_of_rows))
for d in self.data:
output.append(d)
if len(output) > self.no_of_rows + 2:
output = output[:self.no_of_rows + 2]
return output
class HeaderUnit(AUnit):
"""This class deals with the data file values at the top of the file.
These contain the global variables for the model such as water temperature,
key matrix coefficients and the total number of nodes.
There is only ever one of these units in every dat file - at the very top -
so it seems convenient to put it in this module.
"""
# Class constants
UNIT_TYPE = 'header'
UNIT_CATEGORY = 'meta'
FILE_KEY = 'HEADER'
FILE_KEY2 = None
def __init__(self, **kwargs):
"""Constructor.
"""
super(HeaderUnit, self).__init__(**kwargs)
self._unit_type = HeaderUnit.UNIT_TYPE
self._unit_category = HeaderUnit.UNIT_CATEGORY
self._name = 'header'
self.head_data = {
'name': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'revision': HeadDataItem('#REVISION#1', '{:>10}', 1, 0, dtype=dt.STRING),
'node_count': HeadDataItem(0, '{:>10}', 2, 0, dtype=dt.INT),
'fr_lower': HeadDataItem(0.750, '{:>10}', 2, 1, dtype=dt.FLOAT, dps=3, default=0.9),
'fr_upper': HeadDataItem(0.900, '{:>10}', 2, 2, dtype=dt.FLOAT, dps=3, default=0.75),
'min_depth': HeadDataItem(0.100, '{:>10}', 2, 3, dtype=dt.FLOAT, dps=3, default=0.01),
'direct_method': HeadDataItem(0.001, '{:>10}', 2, 4, dtype=dt.FLOAT, dps=3, default=0.001),
'label_length': HeadDataItem(12, '{:>10}', 2, 5, dtype=dt.INT),
'water_temp': HeadDataItem(10.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3, default=10),
'flow': HeadDataItem(0.010, '{:>10}', 3, 1, dtype=dt.FLOAT, dps=3, default=0.10),
'head': HeadDataItem(0.010, '{:>10}', 3, 2, dtype=dt.FLOAT, dps=3, default=0.10),
'math_damp': HeadDataItem(0.700, '{:>10}', 3, 3, dtype=dt.FLOAT, dps=3, default=0.7),
'pivot': HeadDataItem(0.100, '{:>10}', 3, 4, dtype=dt.FLOAT, dps=3, default=0.1),
'relax': HeadDataItem(0.700, '{:>10}', 3, 5, dtype=dt.FLOAT, dps=3, default=0.7),
'dummy': HeadDataItem(0.000, '{:>10}', 3, 6, dtype=dt.FLOAT, dps=3, default=0.0),
'roughness': HeadDataItem('', '{:>10}', 5, 0, dtype=dt.STRING),
}
def readUnitData(self, unit_data, file_line):
"""Reads the given data into the object.
Args:
unit_data (list): The raw file data to be processed.
"""
self.head_data['name'].value = unit_data[0].strip()
self.head_data['revision'].value = unit_data[1].strip()
self.head_data['node_count'].value = unit_data[2][:10].strip()
self.head_data['fr_lower'].value = unit_data[2][10:20].strip()
self.head_data['fr_upper'].value = unit_data[2][20:30].strip()
self.head_data['min_depth'].value = unit_data[2][30:40].strip()
self.head_data['direct_method'].value = unit_data[2][40:50].strip()
self.head_data['label_length'].value = unit_data[2][50:60].strip()
self.head_data['water_temp'].value = unit_data[3][:10].strip()
self.head_data['flow'].value = unit_data[3][10:20].strip()
self.head_data['head'].value = unit_data[3][20:30].strip()
self.head_data['math_damp'].value = unit_data[3][30:40].strip()
self.head_data['pivot'].value = unit_data[3][40:50].strip()
self.head_data['relax'].value = unit_data[3][50:60].strip()
self.head_data['dummy'].value = unit_data[3][60:70].strip()
self.head_data['roughness'].value = unit_data[5].strip()
return file_line + 7
def getData(self):
""" Getter for the formatted data to write back to the .dat file.
Returns:
List - data formatted for writing to the new dat file.
"""
out = []
out.append(self.head_data['name'].value)
key_order = ['revision', 'node_count', 'fr_lower', 'fr_upper', 'min_depth',
'direct_method', 'unknown', 'water_temp', 'flow', 'head',
'math_damp', 'pivot', 'relax', 'dummy']
for k in key_order:
out.append(self.head_data[k].format(True))
out = ''.join(out).split('\n')
out.append('RAD FILE')
out.append(self.head_data['roughness'].format())
out.append('END GENERAL')
return out
| |
import tensorflow as tf
from tfsnippet.utils import (get_default_scope_name, camel_to_underscore,
add_name_and_scope_arg_doc)
from ..flows import BaseFlow
__all__ = ['InvertibleActivation', 'InvertibleActivationFlow']
class InvertibleActivation(object):
"""
Base class for intertible activation functions.
An invertible activation function is an element-wise transformation
:math:`y = f(x)`, where its inverse function :math:`x = f^{-1}(y)`
exists and can be explicitly computed.
"""
def __call__(self, x):
y, _ = self.transform(
x=x, compute_y=True, compute_log_det=False,
name=get_default_scope_name(
camel_to_underscore(self.__class__.__name__))
)
return y
def _transform(self, x, compute_y, compute_log_det):
raise NotImplementedError()
def transform(self, x, compute_y=True, compute_log_det=True,
value_ndims=0, name=None):
"""
Transform `x` into `y`, and compute the log-determinant of `f` at `x`
(i.e., :math:`\\log \\det \\frac{\\partial f(x)}{\\partial x}`).
Args:
x (Tensor): The samples of `x`.
compute_y (bool): Whether or not to compute :math:`y = f(x)`?
Default :obj:`True`.
compute_log_det (bool): Whether or not to compute the
log-determinant? Default :obj:`True`.
value_ndims (int): Number of value dimensions.
`log_det.ndims == x.ndims - value_ndims`.
name (str): If specified, will use this name as the TensorFlow
operational name scope.
Returns:
(tf.Tensor, tf.Tensor): `y` and the (maybe summed) log-determinant.
The items in the returned tuple might be :obj:`None`
if corresponding `compute_?` argument is set to :obj:`False`.
Raises:
RuntimeError: If both `compute_y` and `compute_log_det` are set
to :obj:`False`.
"""
if not compute_y and not compute_log_det:
raise ValueError('At least one of `compute_y` and '
'`compute_log_det` should be True.')
value_ndims = int(value_ndims)
if value_ndims < 0:
raise ValueError('`value_ndims` must be >= 0: got {}'.
format(value_ndims))
x = tf.convert_to_tensor(x)
with tf.name_scope(
name,
default_name=get_default_scope_name('transform', self),
values=[x]):
y, log_det = self._transform(
x=x, compute_y=compute_y, compute_log_det=compute_log_det)
if log_det is not None and value_ndims > 0:
log_det = tf.reduce_sum(
log_det, axis=list(range(-value_ndims, 0)))
return y, log_det
def _inverse_transform(self, y, compute_x, compute_log_det):
raise NotImplementedError()
def inverse_transform(self, y, compute_x=True, compute_log_det=True,
value_ndims=0, name=None):
"""
Transform `y` into `x`, and compute the log-determinant of `f^{-1}` at
`y` (i.e.,
:math:`\\log \\det \\frac{\\partial f^{-1}(y)}{\\partial y}`).
Args:
y (Tensor): The samples of `y`.
compute_x (bool): Whether or not to compute :math:`x = f^{-1}(y)`?
Default :obj:`True`.
compute_log_det (bool): Whether or not to compute the
log-determinant? Default :obj:`True`.
value_ndims (int): Number of value dimensions.
`log_det.ndims == y.ndims - value_ndims`.
name (str): If specified, will use this name as the TensorFlow
operational name scope.
Returns:
(tf.Tensor, tf.Tensor): `x` and the (maybe summed) log-determinant.
The items in the returned tuple might be :obj:`None`
if corresponding `compute_?` argument is set to :obj:`False`.
Raises:
RuntimeError: If both `compute_x` and `compute_log_det` are set
to :obj:`False`.
RuntimeError: If the flow is not explicitly invertible.
"""
if not compute_x and not compute_log_det:
raise ValueError('At least one of `compute_x` and '
'`compute_log_det` should be True.')
value_ndims = int(value_ndims)
if value_ndims < 0:
raise ValueError('`value_ndims` must be >= 0: got {}'.
format(value_ndims))
y = tf.convert_to_tensor(y)
with tf.name_scope(
name,
default_name=get_default_scope_name('inverse_transform', self),
values=[y]):
x, log_det = self._inverse_transform(
y=y, compute_x=compute_x, compute_log_det=compute_log_det)
if log_det is not None and value_ndims > 0:
log_det = tf.reduce_sum(
log_det, axis=list(range(-value_ndims, 0)))
return x, log_det
@add_name_and_scope_arg_doc
def as_flow(self, value_ndims, name=None, scope=None):
"""
Convert this activation object into a :class:`BaseFlow`.
Args:
value_ndims (int): Number of value dimensions in both `x` and `y`.
`x.ndims - value_ndims == log_det.ndims` and
`y.ndims - value_ndims == log_det.ndims`.
Returns:
InvertibleActivationFlow: The flow.
"""
return InvertibleActivationFlow(
activation=self, value_ndims=value_ndims, name=name, scope=scope)
class InvertibleActivationFlow(BaseFlow):
"""
A flow that converts a :class:`InvertibleActivation` into a flow.
"""
@add_name_and_scope_arg_doc
def __init__(self, activation, value_ndims, name=None, scope=None):
"""
Construct a new :class:`InvertibleActivationFlow`.
Args:
activation (InvertibleActivation): The invertible activation object.
value_ndims (int): Number of value dimensions in both `x` and `y`.
`x.ndims - value_ndims == log_det.ndims` and
`y.ndims - value_ndims == log_det.ndims`.
"""
if not isinstance(activation, InvertibleActivation):
raise TypeError('`activation` must be an instance of '
'`InvertibleActivation`: got {}'.format(activation))
super(InvertibleActivationFlow, self).__init__(
x_value_ndims=value_ndims,
y_value_ndims=value_ndims,
require_batch_dims=False,
name=name,
scope=scope,
)
self._activation = activation
@property
def value_ndims(self):
"""
Get the number of value dimensions.
Returns:
int: The number of value dimensions.
"""
assert(self.y_value_ndims == self.x_value_ndims)
return self.x_value_ndims
@property
def activation(self):
"""
Get the invertible activation object.
Returns:
InvertibleActivation: The invertible activation object.
"""
return self._activation
@property
def explicitly_invertible(self):
return True
def _transform(self, x, compute_y, compute_log_det):
return self._activation.transform(
x=x, compute_y=compute_y, compute_log_det=compute_log_det,
value_ndims=self.value_ndims,
)
def _inverse_transform(self, y, compute_x, compute_log_det):
return self._activation.inverse_transform(
y=y, compute_x=compute_x, compute_log_det=compute_log_det,
value_ndims=self.value_ndims,
)
def _build(self, input=None):
pass
| |
#!/usr/bin/python
#
# set_defs.py
#
# Class inclues all set defintions
# Allow defining set of accepted values and quick membership checking
#
# Rachel Kobayashi
# with
# Aaron Anderson
# Eric Gan
#
#
import re;
from collections import deque;
from qutil import *
import nltk;
# set definitions here
# allows for quick change
# and quick intersection / union, subtraction of sets
# be CAREFUL of abbreviations (converts to lower for check);
months = set(['january','february','march','april','may','june',
'july', 'august','september','october','november','december',
'jan','feb','mar','apr','may', 'jun',
'jul','aug','sep','sept','oct','nov','dec']);
days = set(['monday','tuesday','wednesday',
'mon', 'tue','tues','wed','thur','thu','fri','sat','sun'
'thursday','friday','saturday','sunday',]);
timewords = set(['today','tomorrow','yesterday']);
qWords = set(['who','what','where','when','why','did','do','does','is','was','how']);
namePre = set(['mr.', 'mrs.', 'ms.', 'dr.', 'miss']);
linkVerb = set(['is', 'am', 'are','was']);
endPhrasePunc = set(['!', ',','.',';','?']);
subPronouns = set(['he','she','we','they','i']);
objPronouns = set(['her','him','me','us','them']);
posPronouns = set(['their','his','her','our','my']);
beingV = set(['is','are','was','were']);
# from wikipedia
states = set(['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','District of Columbia','Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wisconsin','Wyoming','AL','AK','AZ','AR','CA','CO','CT','DE','DC','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY']);
countries = set([]);
## REGULAR EXPRESSION STRINGS
# (note there is an alernative way of savying the expression,
# but that is mostly applied when used multiple times)
# dates divided by foward slashes or dashes
# accept both year/month/day and month/day/year
# also year-month-day and month-day-year
# with both the year as 2 or 4 digits;
# does not check value of digits
RE_DATE_FSLH1 = '\d{1,2}/\d{1,2}/(\d{4}|\d{2})$';
RE_DATE_FSLH2 = '(\d{4}|\d{2})/\d{1,2}/\d{1,2}$'
RE_DATE_DASH1 = '\d{1,2}-\d{1,2}-(\d{4}|\d{2})$'
RE_DATE_DASH2 = '(\d{4}|\d{2})-\d{1,2}-\d{1,2}$'
# tag sequence is number [anything] number
RE_CD_EP_CD = 'CD (?P<mid>[^\s]{1,4}) CD'
# tag sequence is [not_number] proper_noun number
RE_X_NNP_CD = '([^C][^D]+) NNP CD'
# re.match(' NNP CD',newStr):
#uses python sets for speed.
class Identity(object):
def isBeingVerb(self,word):
return word.lower() in beingV;
def isEndPhrasePunc(self,word):
return word.lower() in endPhrasePunc;
# "replaceable" means it is a subject or object
# returns 0 as "false"
def isReplacablePronoun(self,word):
if word.lower() in subPronouns:
return 1;
elif word.lower() in objPronouns:
return -1;
elif word.lower() == "it":
return 2;
# not pronoun
else:
return 0;
def isMonth(self,word):
return word.lower() in months;
def isDayOfWeek(self, word):
return word.lower() in days;
def isTimeWord(self, word):
return word.lower() in timewords;
def isQuestionWord(self,word):
return word.lower() in qWords;
def isNamePre(self, word):
return word.lower() in namePre;
def isLinkVerb(self,word):
return word.lower() in linkVerb;
def isPlace(self, first, second):
(firstTok, firstTag) = first;
(secondTok, secondTag) = second;
state = False;
country = False;
if is_propN(firstTag) and is_propN(secondTag):
state = secondTok in states;
country = secondTok in countries;
return state or country;
else:
return False;
# timewords: today, friday, yesterday, etc
def isTemporal(self, word):
words = days | timewords;
return word.lower() in words;
# > 0 to check for days of the week
# < 0 to check for today, tommorrow, yesterday
# = 0 to check for both
def isTimeDep(self, wordList, ckCode):
for word in wordList:
if ckCode < 0 and self.isTimeWord(word):
return True;
elif ckCode > 0 and self.isDayOfWeeK(word):
return True;
elif ckCode == 0 and self.isTemporal(word):
return True;
return False;
# return dates in a given phrase
# TODO pin down numerical constraints better
def findDates(self, wordList, tagList):
n = len(wordList);
tagset = deque(["",""]);
tag = "";
locations = [];
for idx in range(0,n):
start = idx -2;
tag = tagList[idx];
tagset.append(tag);
newStr = q2str(tagset,3);
m = re.match(RE_CD_EP_CD,newStr);
if m:
midTag = m.groupdict()['mid'];
if len(midTag) >=2 and midTag[0:2] == "NN":
if self.isMonth(wordList[start+1]):
locations.append((start,3));
elif len(midTag) > 0 and midTag == ",":
if idx > 0 and self.isMonth(wordList[start-1]):
locations.append((start-1,4));
# case for a month and day without year
# contains errors with regular expression
elif re.match(RE_X_NNP_CD,newStr) or \
newStr == ' NNP CD':
if self.isMonth(wordList[start+1]):
locations.append((start+1, 2));
elif tag == "CD":
word = wordList[idx];
# case for numeric date seprated by slashes or dashes
if re.match(RE_DATE_FSLH1,word) or \
re.match(RE_DATE_FSLH2,word) or \
re.match(RE_DATE_DASH1,word) or \
re.match(RE_DATE_DASH2,word):
locations.append((idx,1));
# case for year by itself
elif re.match('\d{4}$',word):
if idx < n-1 and not self.isMonth(wordList[idx+1]):
if int(word) > 0 and int(word) < 2100:
locations.append((idx,1));
tagset.popleft();
return locations;
# cheap Named Entity Recognition (really only identifying
# capitalizaed strings of words, does not take into account meaning
# does not include the first word of the sentence unless it is
# undenibly "Proper"
def findNER(self, wordList, tagList):
n = len(wordList);
nltkTag = nltk.pos_tag(wordList);
locations = [];
idx = 0;
propStrLen = 0;
if tagList[idx] == "NNP" and nltkTag[idx][1] == "NNP":
tag = tagList[idx];
elif n > 1:
idx += 1;
tag = tagList[idx];
prevTag = None;
while idx < n:
if idx > 0:
prevTag = tagList[idx-1];
if self.isPropN(wordList[idx], tagList[idx]):
propStrLen += 1;
elif prevTag == "NNP" and tagList[idx] == "CD":
propStrLen += 1;
else:
if propStrLen > 1:
locations.append((idx-propStrLen,propStrLen));
propStrLen = 0;
idx += 1;
return locations;
# determinds if a word is proper noun based on the tag "NNP"
# or the capitalization
def isPropN(self,word, tag):
return tag == "NNP" or word[0].isupper();
# finds of a subset list is of the form "NNP of/and the NNP"
# or "NNP of NNP"
# returns the length of the phrase or 0 if not one
def NNPoftheNNP(self, wordList, tagList):
n = len(tagList);
if n >= 3:
if self.isPropN(wordList[0],tagList[0]) and \
(tagList[1] == "IN" or tagList[1] == "CC" or tagList[1] == ":"):
if self.isPropN(wordList[2],tagList[2]):
return 3;
elif n >= 4:
if tagList[2] == "DT":
if self.isPropN(wordList[3],tagList[3]):
return 4;
return 0;
# finds locations of Proper Prepositional phrases such as
# Lord of the Rings, Harry Potter and the _______
# wrapper function for the above NNPoftheNNP
def findPropPrep(self, wordList, tagList):
locations = [];
for idx in range(len(tagList)):
if self.isPropN(wordList[idx],tagList[idx]):
subWord = wordList[idx:idx+4];
subTags = tagList[idx:idx+4];
n = self.NNPoftheNNP(subWord,subTags);
if n > 0:
locations.append((idx,n));
return locations;
# now deprecated with the above changes
# used to find NamePrefix First Name, Last Name
"""
def findNm(self, wordList, tagList):
prevTag = tagList[0];
locations = [];
for idx in range(1, len(tagList)):
tag = tagList[idx];
if prevTag == "NNP" and tag == "NNP":
if self.isNamePre(wordList[idx-1]):
if idx < len(tagList) and tagList[idx+1] == "NNP":
locations.append((idx-1, 3));
else:
locations.append((idx-1, 2));
elif idx > 0 and idx < len(tagList) - 2:
lenPropPhrase = NNPoftheNNP(tagList[idx-1:idx+3]);
if lenPropPhrase > 0:
locations.append((idx-1,lenPropPhrase));
prevTag = tag;
return locations;"""
| |
import sys
class CardSuit:
Hearts, Spades, Clubs, Diamonds = range(1, 5)
def __init__(self):
pass
@staticmethod
def string_to_suit(s):
return {
'S': CardSuit.Spades,
'D': CardSuit.Diamonds,
'C': CardSuit.Clubs,
'H': CardSuit.Hearts
}[s]
class CardValue:
One, Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten, Jack, Queen, King, Ace = range(1, 15)
def __init__(self):
pass
@staticmethod
def string_to_value(s):
return {
'1': CardValue.One,
'2': CardValue.Two,
'3': CardValue.Three,
'4': CardValue.Four,
'5': CardValue.Five,
'6': CardValue.Six,
'7': CardValue.Seven,
'8': CardValue.Eight,
'9': CardValue.Nine,
'T': CardValue.Ten,
'J': CardValue.Jack,
'Q': CardValue.Queen,
'K': CardValue.King,
'A': CardValue.Ace
}[s]
class Card:
def __init__(self, value, suit):
self.value = value
self.suit = suit
def __str__(self):
return str('{0:x}'.format(self.value)) + ":" + str(self.suit)
def __lt__(self, other):
return self.value < other.value
class PokerHand:
def __init__(self, cards):
self.cards = sorted(cards)
self.value_to_count_map = {}
for c in self.cards:
if c.value in self.value_to_count_map:
self.value_to_count_map[c.value] += 1
else:
self.value_to_count_map[c.value] = 1
def _is_x_of_a_kind(self, x):
for value in self.value_to_count_map.keys():
count = self.value_to_count_map[value]
if count == x:
return True, value
return False, 0
def _is_all_same_suit(self):
suit = self.cards[0].suit
for c in self.cards:
if c.suit != suit:
return False
return True
def _is_continuously_increasing(self):
previous = self.cards[0].value
for c in self.cards[1:]:
if c.value != previous+1:
return False
else:
previous = c.value
return True
def is_one_pair(self):
occurrences = 0
weight = 0
for value in self.value_to_count_map.keys():
count = self.value_to_count_map[value]
if count == 2:
occurrences += 1
weight = value
return occurrences, weight
def is_two_pairs(self):
found = 0
max_value = 0
for value in self.value_to_count_map.keys():
if self.value_to_count_map[value] == 2:
found += 1
max_value = max(max_value, value)
return found == 2, max_value
def is_three_of_a_kind(self):
return self._is_x_of_a_kind(3)
def is_straight(self):
return self._is_continuously_increasing(), self.cards[-1].value
def is_flush(self):
return self._is_all_same_suit(), self.cards[-1].value
def is_full_house(self):
three_result = self.is_three_of_a_kind()
return three_result[0] and self.is_one_pair()[0], three_result[1]
def is_four_of_a_kind(self):
return self._is_x_of_a_kind(4)
def is_straight_flush(self):
return self._is_continuously_increasing() and self._is_all_same_suit(), self.cards[-1].value
def is_royal_flush(self):
return self.cards[0].value == CardValue.Ten and self.is_straight_flush()[0], 0
# Return < 0 if self < other, 0 if self == other, > 0 if self > other.
def __cmp__(self, other):
def render_result(s_result, o_result):
if s_result[0] and o_result[0] and s_result[1] != o_result[1]:
return True, s_result[1] - o_result[1]
if s_result[0] and not o_result[0]:
return True, 1
if not s_result[0] and o_result[0]:
return True, -1
return False, 0
# Royal flush.
result = render_result(self.is_royal_flush(), other.is_royal_flush())
if result[0]:
return result[1]
# Straight flush.
result = render_result(self.is_straight_flush(), other.is_straight_flush())
if result[0]:
return result[1]
# Four of a kind.
result = render_result(self.is_four_of_a_kind(), other.is_four_of_a_kind())
if result[0]:
return result[1]
# Full house.
result = render_result(self.is_full_house(), other.is_full_house())
if result[0]:
return result[1]
# Flush.
result = render_result(self.is_flush(), other.is_flush())
if result[0]:
return result[1]
# Straight.
result = render_result(self.is_straight(), other.is_straight())
if result[0]:
return result[1]
# Three of a kind.
result = render_result(self.is_three_of_a_kind(), other.is_three_of_a_kind())
if result[0]:
return result[1]
# Two pairs.
result = render_result(self.is_two_pairs(), other.is_two_pairs())
if result[0]:
return result[1]
# One pairs.
result = render_result(self.is_one_pair(), other.is_one_pair())
if result[0]:
return result[1]
# High card.
for c1, c2 in zip(sorted(self.cards, reverse=True), sorted(other.cards, reverse=True)):
if c1.value != c2.value:
return c1.value - c2.value
return 0
def __str__(self):
s = []
for c in self.cards:
s.append(c.__str__())
return ",".join(s)
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
cards = []
for c in test.split(' '):
cards.append(Card(CardValue.string_to_value(c[0]), CardSuit.string_to_suit(c[1])))
hand1 = PokerHand(cards[0:5])
hand2 = PokerHand(cards[5:10])
compare = hand1.__cmp__(hand2)
if compare == 0:
print('none')
elif compare < 0: # hand1 < hand2, i.e. hand2 is better.
print('right')
else:
print('left')
test_cases.close()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import (
TargetInstancesTransport,
DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO,
)
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class TargetInstancesRestInterceptor:
"""Interceptor for TargetInstances.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the TargetInstancesRestTransport.
.. code-block:: python
class MyCustomTargetInstancesInterceptor(TargetInstancesRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
transport = TargetInstancesRestTransport(interceptor=MyCustomTargetInstancesInterceptor())
client = TargetInstancesClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListTargetInstancesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.AggregatedListTargetInstancesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the TargetInstances server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.TargetInstanceAggregatedList
) -> compute.TargetInstanceAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the TargetInstances server but before
it is returned to user code.
"""
return response
def pre_delete(
self,
request: compute.DeleteTargetInstanceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteTargetInstanceRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the TargetInstances server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the TargetInstances server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetTargetInstanceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetTargetInstanceRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the TargetInstances server.
"""
return request, metadata
def post_get(self, response: compute.TargetInstance) -> compute.TargetInstance:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the TargetInstances server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertTargetInstanceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertTargetInstanceRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the TargetInstances server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the TargetInstances server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListTargetInstancesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListTargetInstancesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the TargetInstances server.
"""
return request, metadata
def post_list(
self, response: compute.TargetInstanceList
) -> compute.TargetInstanceList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the TargetInstances server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class TargetInstancesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: TargetInstancesRestInterceptor
class TargetInstancesRestTransport(TargetInstancesTransport):
"""REST backend transport for TargetInstances.
The TargetInstances API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, TargetInstancesRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[TargetInstancesRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or TargetInstancesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(TargetInstancesRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListTargetInstancesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetInstanceAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListTargetInstancesRequest):
The request object. A request message for
TargetInstances.AggregatedList. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TargetInstanceAggregatedList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/targetInstances",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListTargetInstancesRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListTargetInstancesRequest.to_json(
compute.AggregatedListTargetInstancesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.TargetInstanceAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Delete(TargetInstancesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteTargetInstanceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteTargetInstanceRequest):
The request object. A request message for
TargetInstances.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteTargetInstanceRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteTargetInstanceRequest.to_json(
compute.DeleteTargetInstanceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(TargetInstancesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetTargetInstanceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetInstance:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetTargetInstanceRequest):
The request object. A request message for
TargetInstances.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TargetInstance:
Represents a Target Instance
resource. You can use a target instance
to handle traffic for one or more
forwarding rules, which is ideal for
forwarding protocol traffic that is
managed by a single source. For example,
ESP, AH, TCP, or UDP. For more
information, read Target instances.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetTargetInstanceRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetTargetInstanceRequest.to_json(
compute.GetTargetInstanceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.TargetInstance.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(TargetInstancesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertTargetInstanceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertTargetInstanceRequest):
The request object. A request message for
TargetInstances.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/zones/{zone}/targetInstances",
"body": "target_instance_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertTargetInstanceRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.TargetInstance.to_json(
compute.TargetInstance(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertTargetInstanceRequest.to_json(
compute.InsertTargetInstanceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(TargetInstancesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListTargetInstancesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetInstanceList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListTargetInstancesRequest):
The request object. A request message for
TargetInstances.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TargetInstanceList:
Contains a list of TargetInstance
resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/zones/{zone}/targetInstances",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListTargetInstancesRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListTargetInstancesRequest.to_json(
compute.ListTargetInstancesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.TargetInstanceList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListTargetInstancesRequest],
compute.TargetInstanceAggregatedList,
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def delete(
self,
) -> Callable[[compute.DeleteTargetInstanceRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(
self,
) -> Callable[[compute.GetTargetInstanceRequest], compute.TargetInstance]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertTargetInstanceRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListTargetInstancesRequest], compute.TargetInstanceList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("TargetInstancesRestTransport",)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from oslo.config import cfg
from nova.db.sqlalchemy import types
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = ()
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == 0)')
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
class ComputeNodeStat(BASE, NovaBase):
"""Stats related to the current workload of a compute host that are
intended to aid in making scheduler decisions.
"""
__tablename__ = 'compute_node_stats'
__table_args__ = (
Index('ix_compute_node_stats_compute_node_id', 'compute_node_id'),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted')
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
compute_node = relationship(ComputeNode, backref=backref('stats'),
foreign_keys=compute_node_id,
primaryjoin='and_('
'ComputeNodeStat.compute_node_id == '
'ComputeNode.id,'
'ComputeNodeStat.deleted == 0)')
def __str__(self):
return "{%d: %s = %s}" % (self.compute_node_id, self.key, self.value)
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('project_id', 'project_id'),
Index('instances_host_deleted_idx',
'host', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""
Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'volumes'
__table_args__ = (
Index('volumes_instance_uuid_idx', 'instance_uuid'),
)
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.volume_name_template % self.id
ec2_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255))
size = Column(Integer)
availability_zone = Column(String(255))
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(DateTime)
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(256))
provider_auth = Column(String(256))
volume_type_id = Column(Integer)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
#TODO(sshturm) Should be dropped. `virtual_name` was dropped
#in 186 migration,
#Duplicates `block_device_mapping_instance_uuid_device_name_idx` index.
Index("block_device_mapping_instance_uuid_virtual_name"
"_device_name_idx", 'instance_uuid', 'device_name'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance,
backref=backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
#TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class IscsiTarget(BASE, NovaBase):
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (
Index('iscsi_targets_volume_id_fkey', 'volume_id'),
Index('iscsi_targets_host_idx', 'host'),
Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id',
'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'))
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==0)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
Index('uniq_security_groups0project_id0name0deleted', 'project_id',
'name', 'deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'instance_uuid',
'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
#TODO(_cerberus_): enum
status = Column(String(255))
instance = relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('network_id', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
#TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
#TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
#TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = relationship(Network,
backref=backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
#TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('project_id', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = ()
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(36))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupMetadata(BASE, NovaBase):
"""Represents a key/value pair for an instance group."""
__tablename__ = 'instance_group_metadata'
__table_args__ = (
Index('instance_group_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True, nullable=False)
key = Column(String(255))
value = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_metadata = relationship(InstanceGroupMetadata, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMetadata.group_id,'
'InstanceGroupMetadata.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def metadetails(self):
return dict((m.key, m.value) for m in self._metadata)
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""
Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
extra_info = Column(Text)
instance_uuid = Column(String(36))
instance = relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
| |
"""tests the code found in the documentation
Any code changed in here needs to be updated in their place in the
documentation and vice versa, since we are copy pasting code between its
occurence to the tests.
In general, try to separate tests to one test per chunk of interdependent code
"""
import copy
from indra.statements import Event, Concept, Influence, Evidence
from nose.plugins.attrib import attr
from unittest import skip
def _get_gene_network_stmts():
from indra.tools.gene_network import GeneNetwork
gn = GeneNetwork(['H2AX'])
return gn.get_statements()
gn_stmts = _get_gene_network_stmts()
# CODE IN README.md #
# From stmt assembly pipeline description in README.md
def test_readme_pipeline():
stmts = gn_stmts # Added only here, not in docs
from indra.tools import assemble_corpus as ac
stmts = ac.filter_no_hypothesis(stmts)
stmts = ac.map_grounding(stmts)
stmts = ac.filter_grounded_only(stmts)
stmts = ac.filter_human_only(stmts)
stmts = ac.map_sequence(stmts)
stmts = ac.run_preassembly(stmts, return_toplevel=False)
stmts = ac.filter_belief(stmts, 0.8)
assert stmts, 'Update example to yield statements list of non-zero length'
# From 1st example under "Using INDRA"
def test_readme_using_indra1():
from indra.sources import trips
from indra.assemblers.pysb import PysbAssembler
pa = PysbAssembler()
# Process a natural language description of a mechanism
trips_processor = trips.process_text(
'MEK2 phosphorylates ERK1 at Thr-202 and Tyr-204')
# Collect extracted mechanisms in PysbAssembler
pa.add_statements(trips_processor.statements)
# Assemble the model
model = pa.make_model(policies='two_step')
assert model
# From 2nd example under "Using INDRA"
@attr('notravis') # This test takes 10+ minutes, stalling Travis
def test_readme_using_indra2():
from indra.sources import reach
reach_processor = reach.process_pmc('PMC8511698', url=reach.local_nxml_url)
assert reach_processor.statements
# From 3rd example under "Using INDRA"
@attr('slow', 'notravis')
def test_readme_using_indra3():
from indra.sources import reach
from indra.literature import pubmed_client
# Search for 10 most recent abstracts in PubMed on 'BRAF'
pmids = pubmed_client.get_ids('BRAF', retmax=10)
all_statements = []
for pmid in pmids:
abs = pubmed_client.get_abstract(pmid)
if abs is not None:
reach_processor = reach.process_text(abs, url=reach.local_text_url)
if reach_processor is not None:
all_statements += reach_processor.statements
assert len(all_statements) > 0
# From 4th example under "Using INDRA"
@attr('slow')
def test_readme_using_indra4():
from indra.sources import bel
# Process the neighborhood of BRAF and MAP2K1
bel_processor = bel.process_pybel_neighborhood(['BRAF', 'MAP2K1'])
assert bel_processor.statements
# From 5th example under "Using INDRA"
@attr('slow')
def test_readme_using_indra5():
from indra.sources import biopax
# Process the neighborhood of BRAF and MAP2K1
biopax_processor = biopax.process_pc_pathsfromto(['BRAF', 'RAF1'],
['MAP2K1', 'MAP2K2'])
assert biopax_processor.statements
# CODE IN nl_modeling.rst #
def test_nl_modeling():
# 1 code chunk
from indra.sources import trips
model_text = 'MAP2K1 phosphorylates MAPK1 and DUSP6 dephosphorylates MAPK1.'
tp = trips.process_text(model_text)
# 2nd code chunk
for st in tp.statements:
assert st.evidence[0].text # Replaces a print statement in the doc
# 3rd code chunk
from indra.assemblers.pysb import PysbAssembler
pa = PysbAssembler()
pa.add_statements(tp.statements)
pa.make_model(policies='two_step')
# 4th code chunk
for monomer in pa.model.monomers:
assert monomer # This replaces a print statements in the doc
# 5th code chunk
for rule in pa.model.rules:
assert rule # This replaces a print statements in the doc
# 6th code chunk
for parameter in pa.model.parameters:
assert parameter # This replaces a print statements in the doc
# 7th code chunk
for annotation in pa.model.annotations:
assert annotation # This replaces a print statements in the doc
# 8th code chunk (this code is currently in a commented out section)
pa.set_context('A375_SKIN')
for monomer_pattern, parameter in pa.model.initial_conditions:
assert monomer_pattern
assert parameter.value
# 9th code chunk
_ = pa.export_model('sbml')
assert _
_ = pa.export_model('bngl')
assert _
# 10th code chunk
# pa.export_model('sbml', 'example_model.sbml') # Don't save file
# CODE IN gene_network.rst
@attr('slow', 'notravis')
def test_gene_network():
# Chunk 1: this is tested in _get_gene_network_stmts
# from indra.tools.gene_network import GeneNetwork
# gn = GeneNetwork(['H2AX'])
# biopax_stmts = gn.get_biopax_stmts()
# bel_stmts = gn.get_bel_stmts()
# Chunk 2
from indra import literature
pmids = literature.pubmed_client.get_ids_for_gene('H2AX')
# Chunk 3
from indra import literature
paper_contents = {}
for pmid in pmids:
content, content_type = literature.get_full_text(pmid, 'pmid')
if content_type == 'abstract':
paper_contents[pmid] = content
if len(paper_contents) == 5: # Is 10 in actual code
break
# Chunk 4
from indra.sources import reach
literature_stmts = []
for pmid, content in paper_contents.items():
rp = reach.process_text(content, url=reach.local_text_url)
literature_stmts += rp.statements
print('Got %d statements' % len(literature_stmts))
assert literature_stmts # replaces a print statements
# Chunk 6
from indra.tools import assemble_corpus as ac
# stmts = biopax_stmts + bel_stmts + literature_stmts # tested elsewhere
stmts = gn_stmts + literature_stmts # Added instead of above line
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
stmts = ac.run_preassembly(stmts)
assert stmts
# Chunk 7
from indra.assemblers.cx import CxAssembler
from indra.databases import ndex_client
cxa = CxAssembler(stmts)
cx_str = cxa.make_model()
assert cx_str
# Chunk 8
# ndex_cred = {'user': 'myusername', 'password': 'xxx'}
# network_id = ndex_client.create_network(cx_str, ndex_cred)
# print(network_id)
# Chunk 9
from indra.assemblers.indranet import IndraNetAssembler
indranet_assembler = IndraNetAssembler(statements=stmts)
indranet = indranet_assembler.make_model()
assert len(indranet.nodes) > 0, 'indranet conatins no nodes'
assert len(indranet.edges) > 0, 'indranet conatins no edges'
# Chunk 10
import networkx as nx
paths = nx.single_source_shortest_path(G=indranet, source='H2AX',
cutoff=1)
assert paths
# Chunk 11
from indra.assemblers.pysb import PysbAssembler
pysb = PysbAssembler(statements=stmts)
pysb_model = pysb.make_model()
assert pysb_model
# CODE IN getting_started.rst
def test_getting_started1_2():
# Chunks 1 & 2
from indra.sources import bel
from indra.assemblers.pysb import PysbAssembler
assert bel
assert PysbAssembler
def test_getting_started3():
# Chunk 3
from indra.sources import trips
sentence = 'MAP2K1 phosphorylates MAPK3 at Thr-202 and Tyr-204'
trips_processor = trips.process_text(sentence)
assert trips_processor.statements
@skip('Same as test_readme_using_indra2')
def test_getting_started4():
# Chunk 4
from indra.sources import reach
reach_processor = reach.process_pmc('3717945')
assert reach_processor.statements
@attr('slow')
def test_getting_started5():
# Chunk 5
from indra.sources import bel
bel_processor = bel.process_pybel_neighborhood(['KRAS', 'BRAF'])
assert bel_processor.statements
def test_getting_started6():
# Chunk 6
from indra.statements import Phosphorylation, Agent
braf = Agent('BRAF')
map2k1 = Agent('MAP2K1')
stmt = Phosphorylation(braf, map2k1)
assert stmt
@attr('notravis')
def test_getting_started7_8():
# Chunk 7
stmts = gn_stmts # Added only in this test, not in docs
from indra.assemblers.pysb import PysbAssembler
pa = PysbAssembler()
pa.add_statements(stmts)
model = pa.make_model()
assert model
# Chunk 8
sbml_model = pa.export_model('sbml')
assert sbml_model
def test_getting_started9_10():
# Chunk 9
# pa.export_model('sbml', file_name='model.sbml')
# Chunk 10
from indra.assemblers.indranet import IndraNetAssembler
indranet_assembler = IndraNetAssembler(statements=gn_stmts)
indranet = indranet_assembler.make_model(method='df')
assert len(indranet.nodes) > 0, 'indranet contains no nodes'
assert len(indranet.edges) > 0, 'indranet contains no edges'
# Chunk 11
signed_graph = indranet.to_signed_graph()
assert len(signed_graph.nodes) > 0, 'signed graph contains no nodes'
assert len(signed_graph.edges) > 0, 'signed graph conatins no edges'
| |
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains functions for computing assignments of resources to runs.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import itertools
import logging
import math
import os
import sys
from benchexec import cgroups
from benchexec import util
__all__ = [
'check_memory_size',
'get_cpu_cores_per_run',
'get_memory_banks_per_run',
'get_cpu_package_for_core',
]
def get_cpu_cores_per_run(coreLimit, num_of_threads, my_cgroups, coreSet=None):
"""
Calculate an assignment of the available CPU cores to a number
of parallel benchmark executions such that each run gets its own cores
without overlapping of cores between runs.
In case the machine has hyper-threading, this method tries to avoid
putting two different runs on the same physical core
(but it does not guarantee this if the number of parallel runs is too high to avoid it).
In case the machine has multiple CPUs, this method avoids
splitting a run across multiple CPUs if the number of cores per run
is lower than the number of cores per CPU
(splitting a run over multiple CPUs provides worse performance).
It will also try to split the runs evenly across all available CPUs.
A few theoretically-possible cases are not implemented,
for example assigning three 10-core runs on a machine
with two 16-core CPUs (this would have unfair core assignment
and thus undesirable performance characteristics anyway).
The list of available cores is read from the cgroup file system,
such that the assigned cores are a subset of the cores
that the current process is allowed to use.
This script does currently not support situations
where the available cores are asymmetrically split over CPUs,
e.g. 3 cores on one CPU and 5 on another.
@param coreLimit: the number of cores for each run
@param num_of_threads: the number of parallel benchmark executions
@param coreSet: the list of CPU cores identifiers provided by a user, None makes benchexec using all cores
@return a list of lists, where each inner list contains the cores for one run
"""
try:
# read list of available CPU cores
allCpus = util.parse_int_list(my_cgroups.get_value(cgroups.CPUSET, 'cpus'))
# Filter CPU cores according to the list of identifiers provided by a user
if coreSet:
invalid_cores = sorted(set(coreSet).difference(set(allCpus)))
if len(invalid_cores) > 0:
raise ValueError("The following provided CPU cores are not available: {}".format(', '.join(map(str, invalid_cores))))
allCpus = [core for core in allCpus if core in coreSet]
logging.debug("List of available CPU cores is %s.", allCpus)
# read mapping of core to CPU ("physical package")
physical_packages = [get_cpu_package_for_core(core) for core in allCpus]
cores_of_package = collections.defaultdict(list)
for core, package in zip(allCpus, physical_packages):
cores_of_package[package].append(core)
logging.debug("Physical packages of cores are %s.", cores_of_package)
# read hyper-threading information (sibling cores sharing the same physical core)
siblings_of_core = {}
for core in allCpus:
siblings = util.parse_int_list(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list'.format(core)))
siblings_of_core[core] = siblings
logging.debug("Siblings of cores are %s.", siblings_of_core)
except ValueError as e:
sys.exit("Could not read CPU information from kernel: {0}".format(e))
return _get_cpu_cores_per_run0(coreLimit, num_of_threads, allCpus, cores_of_package, siblings_of_core)
def _get_cpu_cores_per_run0(coreLimit, num_of_threads, allCpus, cores_of_package, siblings_of_core):
"""This method does the actual work of _get_cpu_cores_per_run
without reading the machine architecture from the file system
in order to be testable. For description, c.f. above.
Note that this method might change the input parameters!
Do not call it directly, call getCpuCoresPerRun()!
@param allCpus: the list of all available cores
@param cores_of_package: a mapping from package (CPU) ids to lists of cores that belong to this CPU
@param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core)
"""
# First, do some checks whether this algorithm has a chance to work.
if coreLimit > len(allCpus):
sys.exit("Cannot run benchmarks with {0} CPU cores, only {1} CPU cores available.".format(coreLimit, len(allCpus)))
if coreLimit * num_of_threads > len(allCpus):
sys.exit("Cannot run {0} benchmarks in parallel with {1} CPU cores each, only {2} CPU cores available. Please reduce the number of threads to {3}.".format(num_of_threads, coreLimit, len(allCpus), len(allCpus) // coreLimit))
package_size = None # Number of cores per package
for package, cores in cores_of_package.items():
if package_size is None:
package_size = len(cores)
elif package_size != len(cores):
sys.exit("Asymmetric machine architecture not supported: CPU package {0} has {1} cores, but other package has {2} cores.".format(package, len(cores), package_size))
core_size = None # Number of threads per core
for core, siblings in siblings_of_core.items():
if core_size is None:
core_size = len(siblings)
elif core_size != len(siblings):
sys.exit("Asymmetric machine architecture not supported: CPU core {0} has {1} siblings, but other core has {2} siblings.".format(core, len(siblings), core_size))
all_cpus_set = set(allCpus)
for core, siblings in siblings_of_core.items():
siblings_set = set(siblings)
if not siblings_set.issubset(all_cpus_set):
sys.exit("Core assignment is unsupported because siblings {0} of core {1} are not usable. Please always make all virtual cores of a physical core available.".format(siblings_set.difference(all_cpus_set), core))
# Second, compute some values we will need.
package_count = len(cores_of_package)
packages = sorted(cores_of_package.keys())
coreLimit_rounded_up = int(math.ceil(coreLimit / core_size) * core_size)
assert coreLimit <= coreLimit_rounded_up < (coreLimit + core_size)
packages_per_run = int(math.ceil(coreLimit_rounded_up / package_size))
if packages_per_run > 1 and packages_per_run * num_of_threads > package_count:
sys.exit("Cannot split runs over multiple CPUs and at the same time assign multiple runs to the same CPU. Please reduce the number of threads to {0}.".format(package_count // packages_per_run))
runs_per_package = int(math.ceil(num_of_threads / package_count))
assert packages_per_run == 1 or runs_per_package == 1
if packages_per_run == 1 and runs_per_package * coreLimit > package_size:
sys.exit("Cannot run {} benchmarks with {} cores on {} CPUs with {} cores, because runs would need to be split across multiple CPUs. Please reduce the number of threads.".format(num_of_threads, coreLimit, package_count, package_size))
# Warn on misuse of hyper-threading
need_HT = False
if packages_per_run == 1:
# Checking whether the total amount of usable physical cores is not enough,
# there might be some cores we cannot use, e.g. when scheduling with coreLimit=3 on quad-core machines.
# Thus we check per package.
assert coreLimit * runs_per_package <= package_size
if coreLimit_rounded_up * runs_per_package > package_size:
need_HT = True
logging.warning("The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.", (package_size // coreLimit_rounded_up) * package_count)
else:
if coreLimit_rounded_up * num_of_threads > len(allCpus):
assert coreLimit_rounded_up * runs_per_package > package_size
need_HT = True
logging.warning("The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.", len(allCpus) // coreLimit_rounded_up)
logging.debug("Going to assign at most %s runs per package, each one using %s cores and blocking %s cores on %s packages.", runs_per_package, coreLimit, coreLimit_rounded_up, packages_per_run)
# Third, do the actual core assignment.
result = []
used_cores = set()
for run in range(num_of_threads):
# this calculation ensures that runs are split evenly across packages
start_package = (run * packages_per_run) % package_count
cores = []
cores_with_siblings = set()
for package_nr in range(start_package, start_package + packages_per_run):
assert len(cores) < coreLimit
# Some systems have non-contiguous package numbers,
# so we take the i'th package out of the list of available packages.
# On normal system this is the identity mapping.
package = packages[package_nr]
for core in cores_of_package[package]:
if core not in cores:
cores.extend(c for c in siblings_of_core[core] if not c in used_cores)
if len(cores) >= coreLimit:
break
cores_with_siblings.update(cores)
cores = cores[:coreLimit] # shrink if we got more cores than necessary
# remove used cores such that we do not try to use them again
cores_of_package[package] = [core for core in cores_of_package[package] if core not in cores]
assert len(cores) == coreLimit, "Wrong number of cores for run {} of {} - previous results: {}, remaining cores per package: {}, current cores: {}".format(run+1, num_of_threads, result, cores_of_package, cores)
blocked_cores = cores if need_HT else cores_with_siblings
assert not used_cores.intersection(blocked_cores)
used_cores.update(blocked_cores)
result.append(sorted(cores))
assert len(result) == num_of_threads
assert all(len(cores) == coreLimit for cores in result)
assert len(set(itertools.chain(*result))) == num_of_threads * coreLimit, "Cores are not uniquely assigned to runs: " + result
logging.debug("Final core assignment: %s.", result)
return result
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir('/sys/devices/system/node/'):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
def _get_memory_banks_listed_in_dir(path):
"""Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core)."""
# Such directories contain entries named "node<id>" for each memory bank
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith('node')]
def check_memory_size(memLimit, num_of_threads, memoryAssignment, my_cgroups):
"""Check whether the desired amount of parallel benchmarks fits in the memory.
Implemented are checks for memory limits via cgroup controller "memory" and
memory bank restrictions via cgroup controller "cpuset",
as well as whether the system actually has enough memory installed.
@param memLimit: the memory limit in bytes per run
@param num_of_threads: the number of parallel benchmark executions
@param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs)
"""
try:
# Check amount of memory allowed via cgroups.
def check_limit(actualLimit):
if actualLimit < memLimit:
sys.exit("Cgroups allow only {} bytes of memory to be used, cannot execute runs with {} bytes of memory.".format(actualLimit, memLimit))
elif actualLimit < memLimit * num_of_threads:
sys.exit("Cgroups allow only {} bytes of memory to be used, not enough for {} benchmarks with {} bytes each. Please reduce the number of threads".format(actualLimit, num_of_threads, memLimit))
if not os.path.isdir('/sys/devices/system/node/'):
logging.debug("System without NUMA support in Linux kernel, ignoring memory assignment.")
return
if cgroups.MEMORY in my_cgroups:
# We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes
# because the former may be lower if memory.use_hierarchy is enabled.
for key, value in my_cgroups.get_key_value_pairs(cgroups.MEMORY, 'stat'):
if key == 'hierarchical_memory_limit' or key == 'hierarchical_memsw_limit':
check_limit(int(value))
# Get list of all memory banks, either from memory assignment or from system.
if not memoryAssignment:
if cgroups.CPUSET in my_cgroups:
allMems = my_cgroups.read_allowed_memory_banks()
else:
allMems = _get_memory_banks_listed_in_dir('/sys/devices/system/node/')
memoryAssignment = [allMems] * num_of_threads # "fake" memory assignment: all threads on all banks
else:
allMems = set(itertools.chain(*memoryAssignment))
memSizes = dict((mem, _get_memory_bank_size(mem)) for mem in allMems)
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
# Check whether enough memory is allocatable on the assigned memory banks.
# As the sum of the sizes of the memory banks is at most the total size of memory in the system,
# and we do this check always even if the banks are not restricted,
# this also checks whether the system has actually enough memory installed.
usedMem = collections.Counter()
for mems_of_run in memoryAssignment:
totalSize = sum(memSizes[mem] for mem in mems_of_run)
if totalSize < memLimit:
sys.exit("Memory banks {} do not have enough memory for one run, only {} bytes available.".format(mems_of_run, totalSize))
usedMem[tuple(mems_of_run)] += memLimit
if usedMem[tuple(mems_of_run)] > totalSize:
sys.exit("Memory banks {} do not have enough memory for all runs, only {} bytes available. Please reduce the number of threads.".format(mems_of_run, totalSize))
def _get_memory_bank_size(memBank):
"""Get the size of a memory bank in bytes."""
fileName = '/sys/devices/system/node/node{0}/meminfo'.format(memBank)
size = None
with open(fileName) as f:
for line in f:
if 'MemTotal' in line:
size = line.split(':')[1].strip()
if size[-3:] != ' kB':
raise ValueError('"{}" in file {} is not a memory size.'.format(size, fileName))
size = int(size[:-3]) * 1024 # kernel uses KiB but names them kB, convert to Byte
logging.debug("Memory bank %s has size %s bytes.", memBank, size)
return size
raise ValueError('Failed to read total memory from {}.'.format(fileName))
def get_cpu_package_for_core(core):
"""Get the number of the physical package (socket) a core belongs to."""
return int(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/physical_package_id'.format(core)))
def get_cores_of_same_package_as(core):
return util.parse_int_list(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/core_siblings_list'.format(core)))
| |
"""
pyggle.image
This library (PYGGEL) is licensed under the LGPL by Matthew Roe and PYGGEL contributors.
The image module contains classes to load and render both 2d and 3d (billboarded) images.
"""
import time
from .include import *
from . import view, data, misc
from PIL import Image as pilImage
class Image(object):
"""A 2d image object"""
def __init__(self, filename, pos=(0,0),
rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Create the Image
filename must be a filename to an image file, a pygame.Surface object or an image.Image to copy
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
self.filename = filename
self.pos = pos
if type(filename) is type(""):
self._load_file()
elif isinstance(filename, type(self)):
self._pimage = filename._pimage
self._pimage2 = filename._pimage2
self._image_size = filename._image_size
self._altered_image_size = filename._altered_image_size
self.rect = self._pimage.get_rect()
self.to_be_blitted = list(filename.to_be_blitted)
self.display_list = filename.display_list
self.texture = filename.texture
self.offset = filename.offset
loaded = True
else:
self.compile_from_surface(filename)
self.filename = None
loaded = True
self.to_be_blitted = []
self.rotation = rotation
self.scale = scale
self.colorize = colorize
self.visible = True
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
def copy(self):
"""Return a copy of the image - sharing the same data.DisplayList"""
return Image(self, self.pos, self.rotation, self.scale, self.colorize)
def _get_next_biggest(self, x, y):
"""Return next largest power of 2 size for an image"""
nw = 16
nh = 16
while nw < x:
nw *= 2
while nh < y:
nh *= 2
return nw, nh
def test_on_screen(self):
"""Return whether the image is onscreen or not"""
r = pygame.rect.Rect(self.pos, self._image_size)
return view.screen.rect2d.colliderect(r)
def _load_file(self):
"""Load an image file"""
self._pimage = pygame.image.load(self.filename)
sx, sy = self._pimage.get_size()
xx, xy = self._get_next_biggest(sx, sy)
self._pimage2 = pygame.Surface((xx, xy)).convert_alpha()
self._pimage2.fill((0,0,0,0))
self._pimage2.blit(self._pimage, (0,0))
self._pimage2 = pygame.transform.flip(self._pimage2, 0, 1)
self._image_size = (sx, sy)
self._altered_image_size = (xx, xy)
self._texturize(self._pimage2)
self.rect = self._pimage.get_rect()
self._compile()
def compile_from_surface(self, surf):
"""Prepare surf to be stored in a Texture and DisplayList"""
self._pimage = surf
sx, sy = self._pimage.get_size()
xx, xy = self._get_next_biggest(sx, sy)
self._pimage2 = pygame.Surface((xx, xy)).convert_alpha()
self._pimage2.fill((0,0,0,0))
self._pimage2.blit(self._pimage, (0,0))
self._pimage2 = pygame.transform.flip(self._pimage2, 0, 1)
self._image_size = (sx, sy)
self._altered_image_size = (xx, xy)
self.rect = self._pimage.get_rect()
self._texturize(self._pimage2)
self._compile()
def _texturize(self, image):
"""Bind image to a data.Texture"""
self.texture = data.Texture(image)
def _compile(self):
"""Compile the Image into a data.DisplayList"""
self.offset = self.get_width()/2, self.get_height()/2
self.rect.center = self.offset[0] + self.pos[0], self.offset[1] + self.pos[1]
self.display_list = data.DisplayList()
self.display_list.begin()
off = self.offset
l = -off[0]
r = off[0]
t = -off[1]
b = off[1]
w = self.get_width()*1.0/self._altered_image_size[0]
h = self.get_height()*1.0/self._altered_image_size[1]
glBegin(GL_QUADS)
glTexCoord2f(0, 0)
glVertex3f(l, t, 0)
glTexCoord2f(0, h)
glVertex3f(l, b, 0)
glTexCoord2f(w, h)
glVertex3f(r, b, 0)
glTexCoord2f(w, 0)
glVertex3f(r, t, 0)
glEnd()
self.display_list.end()
def blit(self, other, pos):
"""Blit another image to this one at pos offset - ONLY allowing an image to blitted once
other is another image.Image
pos is the x,y offset of the blit"""
self.remove_blit(other)
self.to_be_blitted.append([other, pos])
def blit_again(self, other, pos):
"""Same as blit, except you can blit the same image multiple times"""
self.to_be_blitted.append([other, pos])
def render(self, camera=None):
"""Render the image
camera can be None or the camera the scene is using"""
if not self.test_on_screen():
return None
ox, oy = self.offset
h, w = self.get_size()
pos = self.pos
glPushMatrix()
glTranslatef(pos[0]+ox, pos[1]+oy, 0)
glRotatef(self.rotation[0], 1, 0, 0)
glRotatef(self.rotation[1], 0, 1, 0)
glRotatef(self.rotation[2], 0, 0, 1)
try:
glScalef(self.scale[0], self.scale[1], 1)
except:
glScalef(self.scale, self.scale, 1)
glColor(*self.colorize)
self.texture.bind()
if self.outline:
misc.outline(self.display_list, self.outline_color, self.outline_size, True)
self.display_list.render()
glPopMatrix()
if self.to_be_blitted:
view.screen.push_clip2d((int(pos[0]), int(pos[1])), (int(w), int(h)))
for i in self.to_be_blitted:
x, y = i[1]
x += pos[0]
y += pos[1]
o = i[0].pos
i[0].pos = (x, y)
i[0].render()
i[0].pos = o
view.screen.pop_clip()
def get_width(self):
"""Return the width in pixels of the image"""
return self._image_size[0]
def get_height(self):
"""Return the height in pixels of the image"""
return self._image_size[1]
def get_size(self):
"""Return the width/height size of the image"""
return self._image_size
def get_rect(self):
"""Return a pygame.Rect of the image"""
self.rect.center = self.offset[0] + self.pos[0], self.offset[1] + self.pos[1]
return self.rect
def clear_blits(self):
"""Remove all blits from the image"""
self.to_be_blitted = []
def remove_blit(self, image):
"""Remove all blits of image from the Image"""
for i in self.to_be_blitted:
if i[0] == image:
self.to_be_blitted.remove(i)
def sub_image(self, topleft, size):
"""Return a new Image object representing a smaller region of this Image."""
image = self._pimage.subsurface(topleft, size)
return Image(image, self.pos, self.rotation, self.scale, self.colorize)
class Image3D(Image):
"""A billboarded 3d image"""
_all_loaded = {}
def __init__(self, filename, pos=(0,0,0),
rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Create the Image3D
filename must be a filename to an image file, or a pygame.Surface object
pos is the 3d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
Image.__init__(self, filename, pos, rotation,
scale, colorize)
self.pickable = True
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
def get_dimensions(self):
"""Return a tuple of (1,1,1) signifying the 3d dimensions of teh image - used by the quad tree"""
return 1, 1, 1
def get_pos(self):
"""Return the position of the Image3D"""
return self.pos
def get_scale(self):
"""Return the scale of the object."""
try: return self.scale[0], self.scale[1], self.scale[2]
except: return self.scale, self.scale, self.scale
def render(self, camera=None):
"""Render the Image3D
camera can be None or the camera the scene is using to render from"""
h, w = self.get_size()
pos = self.pos
glPushMatrix()
glTranslatef(pos[0], pos[1], -pos[2])
if camera:
camera.set_facing_matrix()
glRotatef(self.rotation[0], 1, 0, 0)
glRotatef(self.rotation[1], 0, 1, 0)
glRotatef(self.rotation[2], 0, 0, 1)
try:
glScalef(self.scale[0], self.scale[1], 1)
except:
glScalef(self.scale, self.scale, 1)
glColor(*self.colorize)
glDisable(GL_LIGHTING)
self.texture.bind()
if self.outline:
misc.outline(self.display_list, self.outline_color, self.outline_size, True)
self.display_list.render()
if view.screen.lighting:
glEnable(GL_LIGHTING)
glPopMatrix()
def blit(self, *args, **kwargs):
print("Image3D does not support this function!")
clear_blits = blit
remove_blit = blit
blit_again = blit
test_on_screen = blit
def copy(self):
"""Return a copy og the Image - sharing the same data.DisplayList"""
return Image3D(self, self.pos, self.rotation, self.scale, self.colorize)
def _load_file(self):
"""Load an image file"""
self._pimage = pygame.image.load(self.filename)
sx, sy = self._pimage.get_size()
xx, xy = self._get_next_biggest(sx, sy)
self._pimage2 = pygame.Surface((xx, xy)).convert_alpha()
self._pimage2.fill((0,0,0,0))
self._pimage2.blit(self._pimage, (0,0))
self._pimage2 = pygame.transform.flip(self._pimage2, 0, 1)
self._image_size = (sx, sy)
self._altered_image_size = (xx, xy)
self._texturize(self._pimage2)
self._compile()
self.rect = self._pimage.get_rect()
def compile_from_surface(self, surf):
"""Prepare a pygame.Surface object for 3d rendering"""
self._pimage = surf
sx, sy = self._pimage.get_size()
xx, xy = self._get_next_biggest(sx, sy)
self._pimage2 = pygame.Surface((xx, xy)).convert_alpha()
self._pimage2.fill((0,0,0,0))
self._pimage2.blit(self._pimage, (0,0))
self._pimage2 = pygame.transform.flip(self._pimage2, 0, 1)
self._image_size = (sx, sy)
self._altered_image_size = (xx, xy)
self._texturize(self._pimage2)
self._compile()
def _compile(self):
"""Compile the rendering data into a data.DisplayList"""
self.offset = self.get_width()/2, self.get_height()/2
self.display_list = data.DisplayList()
self.display_list.begin()
w = self.get_width()*1.0/self._altered_image_size[0]
h = self.get_height()*1.0/self._altered_image_size[1]
gw, gh = self.get_size()
if gw < gh:
uw = gw * 1.0 / gh
uh = 1
elif gh < gw:
uw = 1
uh = gh * 1.0 / gw
else:
uw = uh = 1
glBegin(GL_QUADS)
glTexCoord2f(0, h)
glVertex3f(-uw, -uh, 0)
glTexCoord2f(w, h)
glVertex3f(uw, -uh, 0)
glTexCoord2f(w, 0)
glVertex3f(uw, uh, 0)
glTexCoord2f(0, 0)
glVertex3f(-uw, uh, 0)
glEnd()
self.display_list.end()
def sub_image(self, topleft, size):
"""Return a new Image3D object representing a smaller region of this Image3D."""
image = self._pimage.subsurface(topleft, size)
return Image3D(image, self.pos, self.rotation, self.scale, self.colorize)
def create_empty_image(size=(2,2), color=(1,1,1,1)):
"""Same as create_empty_texture, except returns an image.Image instead"""
view.require_init()
i = pygame.Surface(size).convert_alpha()
if len(color) == 3:
color = color + (1,)
i.fill((255,255,255,255))
return Image(i, colorize=color)
def create_empty_image3d(size=(2,2), color=(1,1,1,1)):
"""Same as create_empty_texture, except returns an image.Image3D instead"""
view.require_init()
i = pygame.Surface(size).convert_alpha()
if len(color) == 3:
color = color + (1,)
i.fill((255,255,255,255))
return Image3D(i, colorize=color)
class Animation(object):
"""A simple object used to store, manipulate, animate and render a bunch of frames of 2d Image obejcts."""
def __init__(self, frames=[], pos=(0,0),
rotation=(0,0,0), scale=1,
colorize=None):
"""Create the Animation
frames must be a list/tuple of [Image, duration] objects
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
self.frames = frames
self.pos = pos
self.rotation = rotation
self.scale = scale
self.colorize = colorize
self.cur = 0
self.ptime = time.time()
self.running = True
self.breakpoint = len(self.frames)-1
self.startpoint = 0
self.reversed = False
self.looping = True
self.visible = True
self.filename = None
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
def render(self, camera=None):
"""Render the animation - this also keeps track of swapping frames when they have run for their duration.
camera must be None or the camera.Camera object used to render the scene."""
if self.running:
if time.time() - self.ptime > self.frames[self.cur][1]:
if self.reversed:
self.cur -= 1
if self.cur < self.startpoint:
if self.looping:
self.cur = self.breakpoint
else:
self.cur += 1
else:
self.cur += 1
if self.cur > self.breakpoint:
if self.looping:
self.cur = self.startpoint
else:
self.cur -= 1
self.ptime = time.time()
frame = self.current()
frame.pos = self.pos
frame.rotation = self.rotation
frame.scale = self.scale
frame.outline = self.outline
frame.outline_size = self.outline_size
frame.outline_color = self.outline_color
if self.colorize:
frame.colorize = self.colorize
frame.render(camera)
def seek(self, num):
"""'Jump' to a specific frame in the animation."""
self.cur = num
if self.cur < 0:
self.cur = 0
if self.cur >= len(self.frames):
self.cur = len(self.frames)-1
self.ptime = time.time()
def set_bounds(self, start, end):
"""Set the start/end 'bounds' for playback, ie which range of frames to play."""
if start < 0:
start = 0
if start >= len(self.frames):
start = len(self.frames)-1
if end < 0:
end = 0
if end >= len(self.frames):
end = len(self.frames)-1
if end < start:
end = start
self.startpoint = start
self.breakpoint = end
def pause(self):
"""Pause the running of the animation, and locks rendering to the current frame."""
self.running = False
def play(self):
"""Play the animation - only needed if pause has been called."""
self.running = True
self.ptime = time.time()
def rewind(self):
"""Rewind the playback to first frame."""
self.seek(0)
def fastforward(self):
"""Fast forward playback to the last frame."""
self.seek(self.length()-1)
def get_width(self):
"""Return the width of the image."""
return self.current().get_width()
def get_height(self):
"""Return the height of the image."""
return self.current().get_height()
def get_size(self):
"""Return the width/height size of the image."""
return self.current().get_size()
def length(self):
"""Return the number of frames of the animation."""
return len(self.frames)
def reverse(self):
"""Reverse the playback of the image animation."""
self.reversed = not self.reversed
def reset(self):
"""Reset the image playback."""
self.cur = 0
self.ptime = time.time()
self.reversed = False
def loop(self, boolean=True):
"""Set looping of playback on/off - if looping is off animation will continue until the last frame and freeze."""
self.looping = boolean
self.ptime = time.time()
def copy(self):
"""Return a copy of this Animation. Frames are shared..."""
new = Animation(self.frames, self.pos, self.rotation, self.scale, self.colorize)
new.running = self.running
new.breakpoint = self.breakpoint
new.startpoint = self.startpoint
new.cur = self.cur
new.ptime = self.ptime
new.reversed = self.reversed
new.looping = self.looping
return new
def current(self):
"""Return the current frame Image."""
return self.frames[self.cur][0]
def get_rect(self):
"""Return a pygame.Rect of the image"""
frame = self.current()
frame.pos = self.pos
return frame.get_rect()
def clear_blits(self):
"""Remove all blits from all frames of the image"""
for i in self.frames:
i[0].to_be_blitted = []
def remove_blit(self, image):
"""Remove all blits of image from the Image"""
for frame in self.frames:
frame = frame[0]
for i in frame.to_be_blitted:
if i[0] == image:
frame.to_be_blitted.remove(i)
def sub_image(self, topleft, size):
"""Return a new Image object representing a smaller region of the current frame of this Image."""
return self.current().sub_image(topleft, size)
def blit(self, other, pos):
"""Blit another image to this one at pos offset - ONLY allowing an image to blitted once
other is another image.Image
pos is the x,y offset of the blit"""
for frame in self.frames:
frame = frame[0]
frame.remove_blit(other)
frame.to_be_blitted.append([other, pos])
def blit_again(self, other, pos):
"""Same as blit, except you can blit the same image multiple times"""
for frame in self.frames:
frame = frame[0]
frame.to_be_blitted.append([other, pos])
class Animation3D(Animation):
"""3D version of Animation."""
def __init__(self, frames=[], pos=(0,0,0), rotation=(0,0,0),
scale=1, colorize=(1,1,1,1)):
"""Create the Animation3D
frames must be a list/tuple of [frame, duration] objects
pos is the 3d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
Animation.__init__(self, frames, pos, rotation, scale, colorize)
self.pickable = True
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
def blit(self, *args, **kwargs):
print("Animation3D does not support this function!")
clear_blits = blit
remove_blit = blit
blit_again = blit
test_on_screen = blit
def get_dimensions(self):
"""Return a tuple of (1,1,1) signifying the 3d dimensions of teh image - used by the quad tree"""
return 1, 1, 1
def get_pos(self):
"""Return the position of the Image3D"""
return self.pos
def get_scale(self):
"""Return the scale of the object."""
try: return self.scale[0], self.scale[1], self.scale[2]
except: return self.scale, self.scale, self.scale
def copy(self):
"""Return a copy of this Animation. Frames are shared..."""
new = Animation3D(self.frames, self.pos, self.rotation, self.scale, self.colorize)
new.running = self.running
new.breakpoint = self.breakpoint
new.startpoint = self.startpoint
new.cur = self.cur
new.ptime = self.ptime
new.reversed = self.reversed
return new
def GIFImage(filename, pos=(0,0),
rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a GIF image into an Animation object.
filename must be the name of a gif image one disk
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
image = pilImage.open(filename)
frames = []
pal = image.getpalette()
base_palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
base_palette.append(rgb)
all_tiles = []
try:
while 1:
if not image.tile:
image.seek(0)
if image.tile:
all_tiles.append(image.tile[0][3][0])
image.seek(image.tell()+1)
except EOFError:
image.seek(0)
all_tiles = tuple(set(all_tiles))
try:
while 1:
try:
duration = image.info["duration"]
except:
duration = 100
duration *= .001 #convert to milliseconds!
cons = False
x0, y0, x1, y1 = (0, 0) + image.size
if image.tile:
tile = image.tile
else:
image.seek(0)
tile = image.tile
if len(tile) > 0:
x0, y0, x1, y1 = tile[0][1]
if all_tiles:
if all_tiles in ((6,), (7,)):
cons = True
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
elif all_tiles in ((7, 8), (8, 7)):
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
else:
palette = base_palette
else:
palette = base_palette
pi = pygame.image.fromstring(image.tostring(), image.size, image.mode)
pi.set_palette(palette)
if "transparency" in image.info:
pi.set_colorkey(image.info["transparency"])
pi2 = pygame.Surface(image.size, SRCALPHA)
if cons:
for i in frames:
pi2.blit(i[0], (0,0))
pi2.blit(pi, (x0, y0), (x0, y0, x1-x0, y1-y0))
frames.append([pi2, duration])
image.seek(image.tell()+1)
except EOFError:
pass
new_frames = []
for i in frames:
new_frames.append([Image(i[0]), i[1]])
return Animation(new_frames, pos, rotation, scale, colorize)
def GIFImage3D(filename, pos=(0,0,0),
rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a GIF image into an Animation3D object.
filename must be the name of a gif image one disk
pos is the 3d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
image = pilImage.open(filename)
frames = []
pal = image.getpalette()
base_palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
base_palette.append(rgb)
all_tiles = []
try:
while 1:
if not image.tile:
image.seek(0)
if image.tile:
all_tiles.append(image.tile[0][3][0])
image.seek(image.tell()+1)
except EOFError:
image.seek(0)
all_tiles = tuple(set(all_tiles))
try:
while 1:
try:
duration = image.info["duration"]
except:
duration = 100
duration *= .001 #convert to milliseconds!
cons = False
x0, y0, x1, y1 = (0, 0) + image.size
if image.tile:
tile = image.tile
else:
image.seek(0)
tile = image.tile
if len(tile) > 0:
x0, y0, x1, y1 = tile[0][1]
if all_tiles:
if all_tiles in ((6,), (7,)):
cons = True
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
elif all_tiles in ((7, 8), (8, 7)):
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
else:
palette = base_palette
else:
palette = base_palette
pi = pygame.image.fromstring(image.tostring(), image.size, image.mode)
pi.set_palette(palette)
if "transparency" in image.info:
pi.set_colorkey(image.info["transparency"])
pi2 = pygame.Surface(image.size, SRCALPHA)
if cons:
for i in frames:
pi2.blit(i[0], (0,0))
pi2.blit(pi, (x0, y0), (x0, y0, x1-x0, y1-y0))
frames.append([pi2, duration])
image.seek(image.tell()+1)
except EOFError:
pass
new_frames = []
for i in frames:
new_frames.append([Image3D(i[0]), i[1]])
return Animation3D(new_frames, pos, rotation, scale, colorize)
def SpriteSheet(filename, frames=[], durations=100,
pos=(0,0), rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a "spritesheet" (basically, a flat 2d image that holds a lot of different images) into an Animation object.
filename must be the name of an image on disk
frames must be a tuple/list of [x,y,width,height] portions of the image that are unique frames
durations must be a number or list/tuple of numbers representing the duration (in milliseconds) of all/each frame
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
if type(durations) in [type(1), type(1.2)]:
durations = [durations]*len(frames)
new = []
image = pygame.image.load(filename).convert_alpha()
for (frame, dur) in zip(frames, durations):
new.append([Image(image.subsurface(*frame)), dur*0.001])
return Animation(new, pos, rotation, scale, colorize)
def SpriteSheet3D(filename, frames=[], durations=[],
pos=(0,0), rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a "spritesheet" (basically, a flat 2d image that holds a lot of different images) into an Animation3D object.
filename must be the name of an image on disk
frames must be a tuple/list of [x,y,width,height] portions of the image that are unique frames
durations must be a number or list/tuple of numbers representing the duration (in milliseconds) of all/each frame
pos is the 3d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
if type(durations) in [type(1), type(1.2)]:
durations = [durations]*len(frames)
new = []
image = pygame.image.load(filename).convert_alpha()
for (frame, dur) in zip(frames, durations):
new.append([Image3D(image.subsurface(*frame)), dur*0.001])
return Animation3D(new, pos, rotation, scale, colorize)
def GridSpriteSheet(filename, frames=(1,1), duration=100,
pos=(0,0), rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a "spritesheet" (basically, a flat 2d image that holds a lot of different images) into an Animation object.
filename must be the name of an image on disk
frames must be a tuple/list of two ints, indicating the number of frames in the x/y axis
duration must be a number representing the duration (in milliseconds) of all frames
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
new = []
image = pygame.image.load(filename).convert_alpha()
x_size = int(image.get_width() / frames[0])
y_size = int(image.get_height() / frames[1])
for x in range(frames[0]):
for y in range(frames[1]):
new.append([Image(image.subsurface(x*x_size, y*y_size, x_size, y_size)),
duration*0.001])
return Animation(new, pos, rotation, scale, colorize)
def GridSpriteSheet3D(filename, frames=(1,1), duration=100,
pos=(0,0,0), rotation=(0,0,0), scale=1,
colorize=(1,1,1,1)):
"""Load a "spritesheet" (basically, a flat 2d image that holds a lot of different images) into an Animation object.
filename must be the name of an image on disk
frames must be a tuple/list of two ints, indicating the number of frames in the x/y axis
duration must be a number representing the duration (in milliseconds) of all frames
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor for the image
colorize is the color of the image"""
view.require_init()
new = []
image = pygame.image.load(filename).convert_alpha()
x_size = int(image.get_width() / frames[0])
y_size = int(image.get_height() / frames[1])
for x in range(frames[0]):
for y in range(frames[1]):
new.append([Image3D(image.subsurface(x*x_size, y*y_size, x_size, y_size)),
duration*0.001])
return Animation3D(new, pos, rotation, scale, colorize)
def load_and_tile_resize_image(filename, size, pos=(0,0),
rotation=(0,0,0), scale=1,
colorize=(1,1,1,1), border_size=None):
"""Load an image, resize it by tiling
(ie, each image is 9 tiles, and then the parts are scaled so that it fits or greator than size)
filename must be the filename of the image to load
size must be the (x, y) size of the image (may be larger)
pos is the 2d position of the image
rotation is the 3d rotation of the image
scale is the scale factor of the image
colorize is the color of the image
Returns Image, tile_size"""
view.require_init()
image = pygame.image.load(filename).convert_alpha()
x, y = size
if x < image.get_width(): x = image.get_width()
if y < image.get_height(): y = image.get_height()
size = x, y
if border_size:
if border_size > int(min(image.get_size())/3):
border_size = int(min(image.get_size())/3)
x1=min((border_size, int(image.get_width()/3)))
y1=min((border_size, int(image.get_height()/3)))
x2 = image.get_width()-x1*2
y2 = image.get_height()-y1*2
else:
x1=x2=int(image.get_width()/3)
y1=y2=int(image.get_height()/3)
topleft = image.subsurface((0, 0), (x1, y1))
top = pygame.transform.scale(image.subsurface((x1, 0), (x2, y1)), (size[0]-x1*2, y1))
topright = image.subsurface((x1+x2, 0), (x1,y1))
left = pygame.transform.scale(image.subsurface((0, y1), (x1, y2)), (x1, size[1]-y1*2))
middle = pygame.transform.scale(image.subsurface((x1, y1), (x2,y2)), (size[0]-x1*2, size[1]-y1*2))
right = pygame.transform.scale(image.subsurface((x1+x2, y1), (x1,y2)), (x1, size[1]-y1*2))
botleft = image.subsurface((0, y1+y2), (x1,y1))
bottom = pygame.transform.scale(image.subsurface((x1, y1+y2), (x2, y1)), (size[0]-x1*2, y1))
botright = image.subsurface((x1+y1, y1+y2), (x1,y1))
new = pygame.Surface(size).convert_alpha()
new.fill((0,0,0,0))
new.blit(topleft, (0, 0))
new.blit(top, (x1, 0))
new.blit(topright, (size[0]-x1, 0))
new.blit(left, (0, y1))
new.blit(middle, (x1,y1))
new.blit(right, (size[0]-x1, y1))
new.blit(botleft, (0, size[1]-y1))
new.blit(bottom, (x1, size[1]-y1))
new.blit(botright, (size[0]-x1, size[1]-y1))
return Image(new, pos, rotation, scale, colorize), (x1,y1)
| |
"""Support for the Vallox ventilation unit fan."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from vallox_websocket_api import Vallox
from vallox_websocket_api.exceptions import ValloxApiException
from homeassistant.components.fan import (
SUPPORT_PRESET_MODE,
FanEntity,
NotValidPresetModeError,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import ValloxStateProxy
from .const import (
DOMAIN,
METRIC_KEY_MODE,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
MODE_OFF,
MODE_ON,
SIGNAL_VALLOX_STATE_UPDATE,
STR_TO_VALLOX_PROFILE_SETTABLE,
VALLOX_PROFILE_TO_STR_SETTABLE,
)
_LOGGER = logging.getLogger(__name__)
ATTR_PROFILE_FAN_SPEED_HOME = {
"description": "fan_speed_home",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_HOME,
}
ATTR_PROFILE_FAN_SPEED_AWAY = {
"description": "fan_speed_away",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
}
ATTR_PROFILE_FAN_SPEED_BOOST = {
"description": "fan_speed_boost",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
}
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the fan device."""
if discovery_info is None:
return
client = hass.data[DOMAIN]["client"]
client.set_settable_address(METRIC_KEY_MODE, int)
device = ValloxFan(
hass.data[DOMAIN]["name"], client, hass.data[DOMAIN]["state_proxy"]
)
async_add_entities([device], update_before_add=False)
class ValloxFan(FanEntity):
"""Representation of the fan."""
_attr_should_poll = False
def __init__(
self, name: str, client: Vallox, state_proxy: ValloxStateProxy
) -> None:
"""Initialize the fan."""
self._client = client
self._state_proxy = state_proxy
self._is_on = False
self._preset_mode: str | None = None
self._fan_speed_home: int | None = None
self._fan_speed_away: int | None = None
self._fan_speed_boost: int | None = None
self._attr_name = name
self._attr_available = False
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_PRESET_MODE
@property
def preset_modes(self) -> list[str]:
"""Return a list of available preset modes."""
# Use the Vallox profile names for the preset names.
return list(STR_TO_VALLOX_PROFILE_SETTABLE.keys())
@property
def is_on(self) -> bool:
"""Return if device is on."""
return self._is_on
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return self._preset_mode
@property
def extra_state_attributes(self) -> Mapping[str, int | None]:
"""Return device specific state attributes."""
return {
ATTR_PROFILE_FAN_SPEED_HOME["description"]: self._fan_speed_home,
ATTR_PROFILE_FAN_SPEED_AWAY["description"]: self._fan_speed_away,
ATTR_PROFILE_FAN_SPEED_BOOST["description"]: self._fan_speed_boost,
}
async def async_added_to_hass(self) -> None:
"""Call to update."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_VALLOX_STATE_UPDATE, self._update_callback
)
)
@callback
def _update_callback(self) -> None:
"""Call update method."""
self.async_schedule_update_ha_state(True)
async def async_update(self) -> None:
"""Fetch state from the device."""
try:
# Fetch if the whole device is in regular operation state.
self._is_on = self._state_proxy.fetch_metric(METRIC_KEY_MODE) == MODE_ON
vallox_profile = self._state_proxy.get_profile()
# Fetch the profile fan speeds.
fan_speed_home = self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_HOME["metric_key"]
)
fan_speed_away = self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_AWAY["metric_key"]
)
fan_speed_boost = self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_BOOST["metric_key"]
)
except (OSError, KeyError, TypeError) as err:
self._attr_available = False
_LOGGER.error("Error updating fan: %s", err)
return
self._preset_mode = VALLOX_PROFILE_TO_STR_SETTABLE.get(vallox_profile)
self._fan_speed_home = (
int(fan_speed_home) if isinstance(fan_speed_home, (int, float)) else None
)
self._fan_speed_away = (
int(fan_speed_away) if isinstance(fan_speed_away, (int, float)) else None
)
self._fan_speed_boost = (
int(fan_speed_boost) if isinstance(fan_speed_boost, (int, float)) else None
)
self._attr_available = True
async def _async_set_preset_mode_internal(self, preset_mode: str) -> bool:
"""
Set new preset mode.
Returns true if the mode has been changed, false otherwise.
"""
try:
self._valid_preset_mode_or_raise(preset_mode) # type: ignore[no-untyped-call]
except NotValidPresetModeError as err:
_LOGGER.error(err)
return False
if preset_mode == self.preset_mode:
return False
try:
await self._client.set_profile(STR_TO_VALLOX_PROFILE_SETTABLE[preset_mode])
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting preset: %s", err)
return False
return True
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
update_needed = await self._async_set_preset_mode_internal(preset_mode)
if update_needed:
# This state change affects other entities like sensors. Force an immediate update that
# can be observed by all parties involved.
await self._state_proxy.async_update()
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn the device on."""
_LOGGER.debug("Turn on: %s", speed)
update_needed = False
if preset_mode:
update_needed = await self._async_set_preset_mode_internal(preset_mode)
if not self.is_on:
try:
await self._client.set_values({METRIC_KEY_MODE: MODE_ON})
except OSError as err:
_LOGGER.error("Error turning on: %s", err)
else:
update_needed = True
if update_needed:
# This state change affects other entities like sensors. Force an immediate update that
# can be observed by all parties involved.
await self._state_proxy.async_update()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
if not self.is_on:
return
try:
await self._client.set_values({METRIC_KEY_MODE: MODE_OFF})
except OSError as err:
_LOGGER.error("Error turning off: %s", err)
return
# Same as for turn_on method.
await self._state_proxy.async_update()
| |
########################################################################
# MCU Gear(R) system Sample Code
# Auther:y.kou.
# web site: http://www.milletool.com/
# Date : 8/OCT/2016
#
########################################################################
#Revision Information
#
########################################################################
#!/usr/bin/python
from ..mil import mil
from ..mil import p
from ..mil import wiringdata
import time
moduleAddress1 = 0x8000
moduleAddress2 = 0x001D
moduleAddress = 0x8000001D
def getInfo(Number):
if ((Number >= 0) and (Number <= 3)):
address = moduleAddress + Number
address2 = moduleAddress2 + Number
address1 = moduleAddress1
#check address
testaddr = (address1<<16) + address2
if address != testaddr:
print "ERROR: Device address is not correct!"
address1 = -1
address2 = -1
else:
address = -1
IOdata = wiringdata.getWiring(address)
datas = [address1,address2]
datas.extend(IOdata)
return datas
def getAddr(Number):
address = -1
if ((Number >= 0) and (Number <= 3)):
address = moduleAddress2 + Number
else:
address = -1
return moduleAddress1,moduleAddress2
def getIOs():
IOdata = wiringdata.getWiring(moduleAddress)
return IOdata
def stopStep(milModClass):
wiringdata.IOout(milModClass.pinData[0],0)#Y2
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[3],0)#X1
def movOneStep(milModClass, pinHighLow):
wiringdata.IOout(milModClass.pinData[3],pinHighLow[0])#X1
wiringdata.IOout(milModClass.pinData[1],pinHighLow[1])#Y1
wiringdata.IOout(milModClass.pinData[2],pinHighLow[2])#X2
wiringdata.IOout(milModClass.pinData[0],pinHighLow[3])#Y2
def moveStep(milModClass,Direction, StepNum,DELAY1):
#DELAY1 = 0.001 max speed
DELAY2 = DELAY1*2
if Direction == True:
for var in range(0,StepNum):
#print "var = ",var
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY1)
else:
for var in range(0,StepNum):
#print "var = ",var
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],0)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],1)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.pinData[3],0)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.pinData[3],1)#X1
wiringdata.IOout(milModClass.pinData[1],1)#Y1
wiringdata.IOout(milModClass.pinData[2],0)#X2
wiringdata.IOout(milModClass.pinData[0],0)#Y2
time.sleep(DELAY1)
def movTwinOneStep(milModClass1, pinHighLow1, milModClass2, pinHighLow2):
wiringdata.IOout(milModClass1.pinData[3],pinHighLow1[0])#X1
wiringdata.IOout(milModClass1.pinData[1],pinHighLow1[1])#Y1
wiringdata.IOout(milModClass1.pinData[2],pinHighLow1[2])#X2
wiringdata.IOout(milModClass1.pinData[0],pinHighLow1[3])#Y2
wiringdata.IOout(milModClass2.secondPinData[3],pinHighLow2[0])#X1
wiringdata.IOout(milModClass2.secondPinData[1],pinHighLow2[1])#Y1
wiringdata.IOout(milModClass2.secondPinData[2],pinHighLow2[2])#X2
wiringdata.IOout(milModClass2.secondPinData[0],pinHighLow2[3])#Y2
def moveSecondStep(milModClass,Direction, StepNum,DELAY1):
#DELAY1 = 0.001 max speed
DELAY2 = DELAY1*2
if Direction == True:
for var in range(0,StepNum):
#print "var = ",var
#print "milModClass.secondPinData[3] = ",milModClass.secondPinData[3]
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY1)
else:
for var in range(0,StepNum):
print "var = ",var
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],1)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],0)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],1)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY1)
wiringdata.IOout(milModClass.secondPinData[3],0)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY2)
wiringdata.IOout(milModClass.secondPinData[3],1)#X1
wiringdata.IOout(milModClass.secondPinData[1],1)#Y1
wiringdata.IOout(milModClass.secondPinData[2],0)#X2
wiringdata.IOout(milModClass.secondPinData[0],0)#Y2
time.sleep(DELAY1)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"Gamma",
"GammaWithSoftplusConcentrationRate",
]
class Gamma(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**alpha
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small `concentration`
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = Gamma(concentration=3.0, rate=2.0)
dist2 = Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (self.concentration - 1.) * math_ops.log(x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class GammaWithSoftplusConcentrationRate(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
| |
from pymongo import MongoClient
class NoodleMongoClient(MongoClient):
"""A MongoClient convenience class that exposes default collection
methods directly on the class.
"""
def __init__(self, host, database, collection=None, port=27017, use_nice_key=True, create_collection=False):
super(NoodleMongoClient, self).__init__(host, port=port)
self.use_nice_key = use_nice_key
if database in self.database_names():
self._database = self[database]
else:
raise KeyError("Could not find database '{1}' on host '{0}'".format(self.host, database))
if collection:
if create_collection:
self._collection = self._database[collection]
else:
if collection in self._database.collection_names():
self._collection = self._database[collection]
else:
raise KeyError("Could not find collection '{1}' in database '{0}'".format(
self._database, collection))
def database(self):
return self._database
def collection(self):
return self._collection
def find(self, filter, fields=None):
return self._collection.find(filter, fields)
def find_one(self, filter, fields=None):
return self._collection.find_one(filter, fields)
def get_category_code(self):
result = self.get_category().get("code")
if not result:
raise KeyError("No category prefix was found for {0}.".format(self.get_category()))
return result
def get_category(self):
category = self.category_collection().find({"collection": self._collection.name})
cnt = category.count()
if cnt == 0:
raise KeyError(self._collection.name)
elif cnt > 1:
raise Exception("More than one category found")
return category[0]
def category_collection(self):
result = None
collection = 'categories'
if collection in self._database.collection_names():
result = self._database[collection]
else:
raise KeyError("Could not find collection '{1}' in database '{0}'".format(self._database, collection))
return result
def insert(self, documents):
return self._collection.insert(documents)
def update(self, match_obj, set_obj, upsert=False, multi=False):
self._collection.update(match_obj, set_obj, upsert=upsert, multi=multi)
def remove(self, match_obj, multi=False):
self._collection.remove(match_obj, multi=multi)
def purge_soft_deleted(self):
return self._collection.remove({"soft_delete": True})
def save(self, document):
return self._collection.save(document)
def ensure_index(self, field, sparse=True, unique=False, background=True):
result = self._collection.ensure_index(field, sparse=sparse, unique=unique, background=background)
if type(result) == dict:
if result.get("err"):
if result["ok"] == 1:
if result.code == 11000:
raise KeyError(result.err)
else:
raise KeyError("{0}: {1}.".format(result["code"], result["err"]))
else:
raise Exception("Ensure_index: {0}.".format(result))
self._verify_index(field, unique)
return result
def _verify_index(self, field, unique=False):
"""
Verifies that an index exists for the specified field, checking for
optional uniqueness as well.
"""
result = False
for _, v in self._collection.index_information().iteritems():
index_key = v.get('key') or []
if len(index_key) == 1:
if index_key[0][0] == field:
if unique and not v.get('unique'):
if not self.use_nice_key:
raise KeyError("No unique index available for '{0}' on '{1} {2}.{3}'".format(
field, self.host, self._database.name, self._collection.name))
result = True
break
if not result:
raise KeyError("Could not find {0}index for '{1}' on '{2} {3}.{4}'.".format(
"unique " if unique else "", field, self.host, self._database.name, self._collection.name))
def index_information(self):
return self._collection.index_information()
def collection_names(self, include_system_collections=False):
return self._database.collection_names(include_system_collections=include_system_collections)
def __str__(self):
return "{0}:{1}:{2}:{3}".format(self.host, self.port, self._database.name, self._collection.name)
@classmethod
def create_from_mongo_spec(cls, mongo_spec, use_nice_key=True, create_collection=False):
"""
Returns an instance of NoodleMongoClient based on colon-delimited
host:database:collection spec
"""
result = None
spec = cls.parse_argstring(mongo_spec)
if spec:
ms = list(spec)
result = NoodleMongoClient(ms[0],
ms[2],
ms[3],
port=int(ms[1]),
use_nice_key=use_nice_key,
create_collection=create_collection)
return result
@classmethod
def create_from_db_spec(cls, db_spec):
"""
Returns an instance of NoodleMongoClient based on colon-delimited
host:port:database spec
"""
result = None
spec = cls.parse_db_argstring(db_spec)
if spec:
ms = list(spec)
result = NoodleMongoClient(ms[0], ms[2], collection=None, port=int(ms[1]))
return result
@classmethod
def parse_argstring(cls, host_spec):
"""
Parses colon-delimited argstring into mongo collection spec
Returns tuple() or tuple(mongo_uri_string, port, database, collection) where
mongo_uri_string optionally contains username and password.
"""
default_port = 27017
host = port = database = collection = user_pass = None
# Parse out host spec string into tuple(host, port, database, collection) result.
if host_spec:
if '@' in host_spec:
user_pass, host_spec = host_spec.split('@')
host_spec_parts = host_spec.split(':')
if len(host_spec_parts) == 2:
# database:collection
raise Exception("Host is required when provided a database and "
"collection name only.")
elif len(host_spec_parts) == 3:
# host:database:collection
port = default_port
host = host_spec_parts[0]
database = host_spec_parts[1]
collection = host_spec_parts[2]
elif len(host_spec_parts) == 4:
# host:port:database:collection
host = host_spec_parts[0]
port = int(host_spec_parts[1])
database = host_spec_parts[2]
collection = host_spec_parts[3]
else:
raise Exception("Does not match parsable connection string format.")
# Now that connection string is parsed out, add back the user/pass to the host.
if user_pass:
host = "{0}@{1}".format(user_pass, host)
mongo_uri_string = "mongodb://{0}:{1}/{2}".format(host, port, database)
return tuple([mongo_uri_string, port, database, collection])
@classmethod
def parse_db_argstring(cls, host_spec):
"""
Parses colon-delimited argstring into mongo database spec
Returns tuple() or tuple(host, port, database)
"""
default_port = 27017
host = port = database = user_pass = None
# Parse out host spec string into tuple(host, port, database, collection) result.
if host_spec:
if '@' in host_spec:
user_pass, host_spec = host_spec.split('@')
host_spec_parts = host_spec.split(':')
if len(host_spec_parts) == 1:
# database
raise Exception("Host is required when provided only a database name.")
elif len(host_spec_parts) == 2:
# host:database
port = default_port
host = host_spec_parts[0]
database = host_spec_parts[1]
elif len(host_spec_parts) == 3:
# host:port:database
host = host_spec_parts[0]
port = int(host_spec_parts[1])
database = host_spec_parts[2]
else:
raise Exception("Does not match parsable connection string format.")
# Now that connection string is parsed out, add back the user/pass to the host.
if user_pass:
host = "{0}@{1}".format(user_pass, host)
mongo_uri_string = "mongodb://{0}:{1}/{2}".format(host, port, database)
return tuple([mongo_uri_string, port, database])
| |
# -*- coding: utf-8 -*-
"""
Buildings Assessments module
@author Pradnya Kulkarni <kulkarni.pradnya@gmail.com>
@author Akila Ramakr <aramakr@ncsu.edu>
@author Fran Boon <fran@aidiq.com>
Data model from:
http://www.atcouncil.org/products/downloadable-products/placards
Postearthquake Safety Evaluation of Buildings: ATC-20
http://www.atcouncil.org/pdfs/rapid.pdf
This is actually based on the New Zealand variant:
http://eden.sahanafoundation.org/wiki/BluePrintBuildingAssessments
@ToDo: add other forms (ATC-38, ATC-45)
"""
module = "building"
if deployment_settings.has_module(module):
from gluon.sql import SQLCustomType
# Options
building_area_inspected = {
1:T("Exterior and Interior"),
2:T("Exterior Only")
}
building_construction_types = {
1:T("Timber frame"), # Wood frame
2:T("Steel frame"),
3:T("Tilt-up concrete"),
4:T("Concrete frame"),
5:T("Concrete shear wall"),
6:T("Unreinforced masonry"),
7:T("Reinforced masonry"),
8:T("RC frame with masonry infill"),
99:T("Other")
}
building_primary_occupancy_opts = {
1:T("Dwelling"),
2:T("Other residential"),
3:T("Public assembly"),
4:T("School"),
5:T("Religious"),
6:T("Commercial/Offices"),
7:T("Industrial"),
8:T("Government"),
9:T("Heritage Listed"), # Historic
99:T("Other")
}
building_evaluation_condition = {
1:T("Minor/None"),
2:T("Moderate"),
3:T("Severe")
}
building_estimated_damage = {
1:T("None"),
2:"0-1%",
3:"1-10%",
4:"10-30%",
5:"30-60%",
6:"60-100%",
7:"100%"
}
building_estimated_damage_image = {
1:"tic.png",
2:"1percent.png",
3:"10percent.png",
4:"10-30percent.png",
5:"30-60percent.png",
6:"60-100percent.png",
7:"cross.png",
}
building_posting_l1_opts = {
1:"%s (%s)" % (T("Inspected"), T("Green")),
2:"%s (%s)" % (T("Restricted Use"), T("Yellow")),
3:"%s (%s)" % (T("Unsafe"), T("Red")),
}
building_posting_l2_opts = {
1:"%s (%s): G1" % (T("Inspected"), T("Green")),
2:"%s (%s): G2" % (T("Inspected"), T("Green")),
3:"%s (%s): Y1" % (T("Restricted Use"), T("Yellow")),
4:"%s (%s): Y2" % (T("Restricted Use"), T("Yellow")),
5:"%s (%s): R1" % (T("Unsafe"), T("Red")),
6:"%s (%s): R2" % (T("Unsafe"), T("Red")),
7:"%s (%s): R3" % (T("Unsafe"), T("Red")),
}
def uuid8anum():
import uuid
return "%s-%s" % (str(uuid.uuid4())[0:4], str(uuid.uuid4())[4:8])
s3uuid_8char = SQLCustomType(type = "string",
native = "VARCHAR(64)",
encoder = (lambda x: "'%s'" % (uuid8anum() if x == "" else str(x).replace("'", "''"))),
decoder = (lambda x: x))
# NZSEE Level 1 (~ATC-20 Rapid Evaluation) Safety Assessment Form ---------
resourcename = "nzseel1"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("ticket_id",
type=s3uuid_8char,
length=64,
notnull=True,
unique=True,
writable=False,
default=uuid8anum(),
label = T("Ticket ID"),
represent = lambda id: id and id.upper() or T("None")
),
person_id(label=T("Inspector ID"), empty=False), # pre-populated in Controller
organisation_id(label=T("Territorial Authority")), # Affiliation in ATC20 terminology
Field("date", "datetime", default=request.now,
requires=IS_DATETIME(format=s3_datetime_format),
label=T("Inspection date and time")),
#Field("daytime", "time", label=T("Inspection time")),
Field("area", "integer", label=T("Areas inspected"),
requires=IS_NULL_OR(IS_IN_SET(building_area_inspected)),
represent=lambda opt: building_area_inspected.get(opt, UNKNOWN_OPT)),
#Field("name", label=T("Building Name"), requires=IS_NOT_EMPTY()), # Included in location_id
location_id(empty=False),
Field("name_short", label=T("Building Short Name/Business Name")),
Field("contact_name", label=T("Contact Name"), requires=IS_NOT_EMPTY()),
Field("contact_phone", label=T("Contact Phone"), requires=IS_NOT_EMPTY()),
Field("stories_above", "integer", label=T("Storeys at and above ground level")), # Number of stories above ground
Field("stories_below", "integer", label=T("Below ground level")), # Number of stories below ground
Field("footprint", "integer", label=T("Total gross floor area (square meters)")),
Field("year_built", "integer", label=T("Year built")),
Field("residential_units", "integer", label=T("Number of residential units")),
#Field("residential_units_not_habitable", "integer",
# label=T("Number of residential units not habitable")),
Field("photo", "boolean", label=T("Photo Taken?")),
Field("construction_type", "integer", label=T("Type of Construction"),
requires=IS_NULL_OR(IS_IN_SET(building_construction_types)),
represent=lambda opt: building_construction_types.get(opt, UNKNOWN_OPT)),
Field("construction_type_other", label="(%s)" % T("specify")),
Field("primary_occupancy", "integer", label=T("Primary Occupancy"),
requires=IS_NULL_OR(IS_IN_SET(building_primary_occupancy_opts)),
represent=lambda opt: building_primary_occupancy_opts.get(opt, UNKNOWN_OPT)),
Field("primary_occupancy_other", label="(%s)" % T("specify")),
Field("collapse", "integer",
label=T("Collapse, partial collapse, off foundation"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("leaning", "integer", label=T("Building or storey leaning"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural", "integer",
label=T("Wall or other structural damage"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("falling", "integer",
label=T("Overhead falling hazard"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("slips", "integer",
label=T("Ground movement, settlement, slips"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("neighbour", "integer",
label=T("Neighbouring building hazard"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other", "integer", label=T("Other"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other_details", label="(%s)" % T("specify")),
Field("action_comments", "text", label=T("Comments")),
Field("posting", "integer",
requires=IS_IN_SET(building_posting_l1_opts),
represent=lambda opt: building_posting_l1_opts.get(opt, UNKNOWN_OPT)),
Field("restrictions", "text", label=T("Record any restriction on use or entry")),
#Field("posting_comments", "text", label=T("Comments")),
Field("barricades", "boolean",
label=T("Barricades are needed")),
Field("barricades_details", "text",
label="(%s)" % T("state location")),
Field("detailed_evaluation", "boolean",
label=T("Level 2 or detailed engineering evaluation recommended")),
Field("detailed_structural", "boolean",
label=T("Structural")),
Field("detailed_geotechnical", "boolean",
label=T("Geotechnical")),
Field("detailed_other", "boolean", label=T("Other")),
Field("detailed_other_details", label="(%s)" % T("specify")),
Field("other_recommendations", "text",
label=T("Other recommendations")),
Field("estimated_damage", "integer",
label=T("Estimated Overall Building Damage"),
comment="(%s)" % T("Exclude contents"),
requires=IS_IN_SET(building_estimated_damage),
represent=lambda opt: building_estimated_damage.get(opt, UNKNOWN_OPT)),
*s3_meta_fields())
# CRUD strings
ADD_ASSESSMENT = T("Add Level 1 Assessment")
LIST_ASSESSMENTS = T("List Level 1 Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Level 1 Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Level 1 Assessment"),
title_search = T("Search Level 1 Assessments"),
subtitle_create = T("Add New Level 1 Assessment"),
subtitle_list = T("Level 1 Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Level 1 Assessment"),
msg_record_created = T("Level 1 Assessment added"),
msg_record_modified = T("Level 1 Assessment updated"),
msg_record_deleted = T("Level 1 Assessment deleted"),
msg_list_empty = T("No Level 1 Assessments currently registered"))
building_nzseel1_search = s3base.S3Search(
name="nzseel1_search_simple",
label=T("Ticket ID"),
comment=T("To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments."),
field=["ticket_id"])
# Set as default search method
s3mgr.configure(tablename,
search_method=building_nzseel1_search)
# -------------------------------------------------------------------------
# NZSEE Level 2 (~ATC-20 Rapid Evaluation) Safety Assessment Form
resourcename = "nzseel2"
tablename = "%s_%s" % (module, resourcename)
ADD_IMAGE = T("Add Photo")
table = db.define_table(tablename,
Field("ticket_id",
type=s3uuid_8char,
length=64,
notnull=True,
unique=True,
label = T("Ticket ID"),
represent = lambda id: id and id.upper() or T("None")),
person_id(label=T("Inspector ID"), empty=False), # pre-populated in Controller
organisation_id(label=T("Territorial Authority")), # Affiliation in ATC20 terminology
Field("date", "datetime", default=request.now,
requires=IS_DATETIME(format=s3_datetime_format),
label=T("Inspection date and time")),
#Field("daytime", "time", label=T("Inspection time")),
Field("area", "integer", label=T("Areas inspected"),
requires=IS_NULL_OR(IS_IN_SET(building_area_inspected)),
represent=lambda opt: building_area_inspected.get(opt, UNKNOWN_OPT)),
#Field("name", label=T("Building Name"), requires=IS_NOT_EMPTY()), # Included in location_id
location_id(empty=False),
Field("name_short", label=T("Building Short Name/Business Name")),
Field("contact_name", label=T("Contact Name"), requires=IS_NOT_EMPTY()),
Field("contact_phone", label=T("Contact Phone"), requires=IS_NOT_EMPTY()),
Field("stories_above", "integer", label=T("Storeys at and above ground level")), # Number of stories above ground
Field("stories_below", "integer", label=T("Below ground level")), # Number of stories below ground
Field("footprint", "integer", label=T("Total gross floor area (square meters)")),
Field("year_built", "integer", label=T("Year built")),
Field("residential_units", "integer", label=T("Number of residential units")),
#Field("residential_units_not_habitable", "integer",
# label=T("Number of residential units not habitable")),
Field("photo", "boolean", label=T("Photo Taken?")),
Field("construction_type", "integer", label=T("Type of Construction"),
requires=IS_NULL_OR(IS_IN_SET(building_construction_types)),
represent=lambda opt: building_construction_types.get(opt, UNKNOWN_OPT)),
Field("construction_type_other", label="(%s)" % T("specify")),
Field("primary_occupancy", "integer", label=T("Primary Occupancy"),
requires=IS_NULL_OR(IS_IN_SET(building_primary_occupancy_opts)),
represent=lambda opt: building_primary_occupancy_opts.get(opt, UNKNOWN_OPT)),
Field("primary_occupancy_other", label="(%s)" % T("specify")),
Field("collapse", "integer",
label=T("Collapse, partial collapse, off foundation"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("leaning", "integer", label=T("Building or storey leaning"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural", "integer",
label=T("Wall or other structural damage"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("falling", "integer",
label=T("Overhead falling hazard"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("slips", "integer",
label=T("Ground movement, settlement, slips"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("neighbour", "integer",
label=T("Neighbouring building hazard"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other", "integer", label=T("Electrical, gas, sewerage, water, hazmats"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
#Field("other_details", label="(%s)" % T("specify")),
Field("action_comments", "text", label=T("Comments")),
Field("posting_existing", "integer",
label=T("Existing Placard Type"),
requires=IS_IN_SET(building_posting_l1_opts),
represent=lambda opt: building_posting_l1_opts.get(opt, UNKNOWN_OPT)),
Field("posting", "integer",
label=T("Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance."),
requires=IS_IN_SET(building_posting_l2_opts),
#@ToDo: comment= Guidance on meaning of options
represent=lambda opt: building_posting_l2_opts.get(opt, UNKNOWN_OPT)),
Field("restrictions", "text", label=T("Record any restriction on use or entry")),
#Field("posting_comments", "text", label=T("Comments")),
Field("barricades", "boolean",
label=T("Barricades are needed")),
Field("barricades_details", "text",
label="(%s)" % T("state location")),
Field("detailed_evaluation", "boolean",
label=T("Level 2 or detailed engineering evaluation recommended")),
Field("detailed_structural", "boolean",
label=T("Structural")),
Field("detailed_geotechnical", "boolean",
label=T("Geotechnical")),
Field("detailed_other", "boolean", label=T("Other")),
Field("detailed_other_details", label="(%s)" % T("specify")),
Field("other_recommendations", "text",
label=T("Other recommendations")),
Field("estimated_damage", "integer",
label=T("Estimated Overall Building Damage"),
comment="(%s)" % T("Exclude contents"),
requires=IS_IN_SET(building_estimated_damage),
represent=lambda opt: building_estimated_damage.get(opt, UNKNOWN_OPT)),
Field("structural_foundations", "integer",
label=T("Foundations"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_roofs", "integer",
label=T("Roofs, floors (vertical load)"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_columns", "integer",
label=T("Columns, pilasters, corbels"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_diaphragms", "integer",
label=T("Diaphragms, horizontal bracing"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_precast", "integer",
label=T("Pre-cast connections"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_beam", "integer",
label=T("Beam"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_parapets", "integer",
label=T("Parapets, ornamentation"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_cladding", "integer",
label=T("Cladding, glazing"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_ceilings", "integer",
label=T("Ceilings, light fixtures"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_interior", "integer",
label=T("Interior walls, partitions"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_elevators", "integer",
label=T("Elevators"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_stairs", "integer",
label="%s/ %s" % (T("Stairs"), T("Exits")),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_utilities", "integer",
label=T("Utilities"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
comment= "(%s)" % T("eg. gas, electricity, water"),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_other", "integer",
label=T("Other"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_slope", "integer",
label=T("Slope failure, debris"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_ground", "integer",
label=T("Ground movement, fissures"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_soil", "integer",
label=T("Soil bulging, liquefaction"),
requires=IS_NULL_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("general_comments", "text",
label=T("General Comment")),
Field("sketch", "upload",
autodelete=True,
requires = IS_EMPTY_OR(IS_IMAGE(maxsize=(800, 800),
error_message=T("Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!"))),
label=T("Sketch"),
comment=DIV(A(ADD_IMAGE,
_class="colorbox",
_href=URL(c="doc", f="image", args="create", vars=dict(format="popup")),
_target="top",
_title=ADD_IMAGE),
DIV( _class="tooltip",
_title="%s|%s" % (T("Sketch"),
T("Provide an optional sketch of the entire building or damage points. Indicate damage points."))))),
Field("recommendations", "text",
label=T("Recommendations for Repair and Reconstruction or Demolition"),
comment="(%s)" % T("Optional")),
*s3_meta_fields())
# CRUD strings
ADD_ASSESSMENT = T("Add Level 2 Assessment")
LIST_ASSESSMENTS = T("List Level 2 Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Level 2 Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Level 2 Assessment"),
title_search = T("Search Level 2 Assessments"),
subtitle_create = T("Add New Level 2 Assessment"),
subtitle_list = T("Level 2 Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Level 2 Assessment"),
msg_record_created = T("Level 2 Assessment added"),
msg_record_modified = T("Level 2 Assessment updated"),
msg_record_deleted = T("Level 2 Assessment deleted"),
msg_list_empty = T("No Level 2 Assessments currently registered"))
building_nzseel2_search = s3base.S3Search(
name="nzseel2_search_simple",
label=T("Ticket ID"),
comment=T("To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments."),
field=["ticket_id"])
# Set as default search method
s3mgr.configure(tablename,
search_method=building_nzseel2_search)
# -------------------------------------------------------------------------
| |
import cgi
import codecs
import copy
from io import BytesIO
from itertools import chain
from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig, TooManyFieldsSent,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain
from django.utils.regex_helper import _lazy_re_compile
from .multipartparser import parse_header
RAISE_ERROR = object()
host_validation_re = _lazy_re_compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return '<%s>' % self.__class__.__name__
return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path())
@cached_property
def headers(self):
return HttpHeaders(self.META)
@cached_property
def accepted_types(self):
"""Return a list of MediaType instances."""
return parse_accept_header(self.headers.get('Accept', '*/*'))
def accepts(self, media_type):
return any(
accepted_type.match(media_type)
for accepted_type in self.accepted_types
)
def _set_content_type_params(self, meta):
"""Set content_type, content_params, and encoding."""
self.content_type, self.content_params = cgi.parse_header(meta.get('CONTENT_TYPE', ''))
if 'charset' in self.content_params:
try:
codecs.lookup(self.content_params['charset'])
except LookupError:
pass
else:
self.encoding = self.content_params['charset']
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['.localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(path),
'/' if force_append_slash and not path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
else:
# Coerce lazy locations.
location = str(location)
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and
'/./' not in bits.path and '/../' not in bits.path):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith('//'):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return '{}://{}'.format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, secure_value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
header_value = self.META.get(header)
if header_value is not None:
return 'https' if header_value == secure_value else 'http'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(list_[1] for list_ in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b'')
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = 'HTTP_'
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
def __getitem__(self, key):
"""Allow header lookup using underscores in place of hyphens."""
return super().__getitem__(key.replace('_', '-'))
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX):]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace('_', '-').title()
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'encoding': self.encoding,
'max_num_fields': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
try:
for key, value in parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
except ValueError as e:
# ValueError can also be raised if the strict_parsing argument to
# parse_qsl() is True. As that is not used by Django, assume that
# the exception was raised by exceeding the value of max_num_fields
# instead of fragile checks of exception message strings.
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
) from e
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return '&'.join(output)
class MediaType:
def __init__(self, media_type_raw_line):
full_type, self.params = parse_header(
media_type_raw_line.encode('ascii') if media_type_raw_line else b''
)
self.main_type, _, self.sub_type = full_type.partition('/')
def __str__(self):
params_str = ''.join(
'; %s=%s' % (k, v.decode('ascii'))
for k, v in self.params.items()
)
return '%s%s%s' % (
self.main_type,
('/%s' % self.sub_type) if self.sub_type else '',
params_str,
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__qualname__, self)
@property
def is_all_types(self):
return self.main_type == '*' and self.sub_type == '*'
def match(self, other):
if self.is_all_types:
return True
other = MediaType(other)
if self.main_type == other.main_type and self.sub_type in {'*', other.sub_type}:
return True
return False
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts)
def parse_accept_header(header):
return [MediaType(token) for token in header.split(',') if token.strip()]
| |
# Copyright 2015 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume FC driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import interface
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import (
storwize_svc_common as storwize_common)
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
storwize_svc_fc_opts = [
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_fc_opts)
@interface.volumedriver
class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver):
"""IBM Storwize V7000 and SVC FC volume driver.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
to lsfabric, clear unused data from connections, ensure
matching WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods
for FC and iSCSI within the StorwizeSVCCommonDriver class
2.0.1 - Added support for multiple pools with model update
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
"""
VERSION = "2.1.1"
def __init__(self, *args, **kwargs):
super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self.configuration.append_config_values(
storwize_svc_fc_opts)
def validate_connector(self, connector):
"""Check connector for at least one enabled FC protocol."""
if 'wwpns' not in connector:
LOG.error(_LE('The connector does not contain the required '
'information.'))
raise exception.InvalidConnectorException(
missing='wwpns')
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Perform necessary work to make a FC connection."""
@utils.synchronized('storwize-host' + self._state['system_id'] +
connector['host'], external=True)
def _do_initialize_connection_locked():
return self._do_initialize_connection(volume, connector)
return _do_initialize_connection_locked()
def _do_initialize_connection(self, volume, connector):
"""Perform necessary work to make a FC connection.
To be able to create an FC connection from a given host to a
volume, we must:
1. Translate the given WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
volume_name = volume['name']
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s.') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s.'), e)
raise exception.VolumeBackendAPIException(
data=_('initialize_connection: Missing volume attribute for '
'volume %s.') % volume_name)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].values():
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s.') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning(_LW('initialize_connection: Did not find a '
'preferred node for volume %s.'), volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
# If conn_wwpns is empty, then that means that there were
# no target ports with visibility to any of the initiators
# so we return all target ports.
if len(conn_wwpns) == 0:
for node in self._state['storage_nodes'].values():
conn_wwpns.extend(node['WWPN'])
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
# specific for z/VM, refer to cinder bug 1323993
if "zvm_fcp" in connector:
properties['zvm_fcp'] = connector['zvm_fcp']
except Exception:
with excutils.save_and_reraise_exception():
self._do_terminate_connection(volume, connector)
LOG.error(_LE('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n'), {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume['id'], 'conn': connector,
'prop': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an FC connection has been terminated."""
# If a fake connector is generated by nova when the host
# is down, then the connector will not have a host property,
# In this case construct the lock without the host property
# so that all the fake connectors to an SVC are serialized
host = connector['host'] if 'host' in connector else ""
@utils.synchronized('storwize-host' + self._state['system_id'] + host,
external=True)
def _do_terminate_connection_locked():
return self._do_terminate_connection(volume, connector,
**kwargs)
return _do_terminate_connection_locked()
def _do_terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an FC connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_name = volume['name']
info = {}
if 'host' in connector:
# get host according to FC protocol
connector = connector.copy()
connector.pop('initiator', None)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
host_name = self._helpers.get_host_from_connector(
connector, volume_name=vol_name)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
# Unmap volumes, if hostname is None, need to get value from vdiskmap
host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
# Host_name could be none
if host_name:
resp = self._helpers.check_host_mapped_vols(host_name)
if not len(resp):
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
# Build info data structure for zone removing
if 'wwpns' in connector and host_name:
target_wwpns = []
# Returning all target_wwpns in storage_nodes, since
# we cannot determine which wwpns are logged in during
# a VM deletion.
for node in self._state['storage_nodes'].values():
target_wwpns.extend(node['WWPN'])
init_targ_map = (self._make_initiator_target_map
(connector['wwpns'],
target_wwpns))
info['data'] = {'initiator_target_map': init_targ_map}
# No volume mapped to the host, delete host from array
self._helpers.delete_host(host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s', {'vol': volume['id'],
'conn': connector})
return info
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.template import Context, Template
from django.core.urlresolvers import resolve
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.shortcuts import HttpResponse
from .models import Html
import tempfile
import time
import os
import subprocess
class InstanceError(Exception):
pass
class WkHtmlToPdfError(Exception):
pass
class WKHtmlToPDFGenerator(object):
"""
Wkhtmltopdf generator, it will take context and html converts to pdf.
"""
def __init__(self, **kwargs):
"""
By default it will search django core settings,
if wkhtmltopdf not found there it will add that command.
:param kwargs:
It may contains the html and context,
html object is model and context of view
:type kwargs: Dictionary
:returns: None
"""
tempfile.tempdir = '/tmp/'
self.tmp_dir = tempfile.gettempdir()
self.html = kwargs.get('html')
self.context = kwargs.get('context')
self.command = []
self.code_format = "utf-8"
if hasattr(settings, "WKHTML_TO_PDF_CMD") and isinstance(settings.WKHTML_TO_PDF_CMD, str):
self.command = [settings.WKHTML_TO_PDF_CMD]
@property
def _get_options(self):
""" Providing external options for wkhtmltopdf from settings
and HtmlHeaderFooter model
"""
if self.html.htmlheader.quiet:
self.command.append('--quiet')
if self.html.htmlheader.zoom:
self.command.extend(['--zoom', str(self.html.htmlheader.zoom)])
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
self.command.extend(['--encoding', self.html.htmlheader.encode_type])
options = getattr(settings, 'WKHTML_OPTIONS', None)
if options is not None:
if not isinstance(options, dict):
raise InstanceError("WKHTML_OPTIONS not a dictionary")
for key, value in options.iteritems():
if value is None:
self.command.append(str(key))
else:
self.command.extend([str(key), str(value)])
def _write_data_into_file(self, content, name, file_to_del, css=False):
"""It will creates the temp file in temporary folder
:param: content: context of view
:type: content: Dictionary
:param: name:
html or css file suffix
:type: name: str or unicode
:param: file_to_del:
it will holds the temp files for after delete those
files from temp folder when pdf generate complete.
:type: file_to_del: list
:param: css:
By default it is False, If it provides then this suffix is css.
:type: css: bool
:returns: temp file object
"""
encoded_content = content.encode(self.code_format)
if not css:
_sanitize_string = self._sanitize_html(encoded_content)
else:
_sanitize_string = encoded_content
render_str = self._render_string(_sanitize_string)
com_file = file(
os.path.join(self.tmp_dir, str(time.time()) + name), 'w'
)
com_file.write(render_str.encode(self.code_format))
file_to_del.append(com_file.name)
return com_file
@property
def generate_pdf_file(self):
"""This method will generates the Pdf object from html
:return: pdf object
:raise: RuntimeError: run time
:raise: WkHtmlToPdfError: html error
:raise: OSError, IOError: none
"""
out_filename = tempfile.mktemp(suffix=".pdf", prefix="webkit.tmp.")
file_to_del = [out_filename]
if not self.command:
self.command = ['wkhtmltopdf']
self._get_options
if self.html.htmlheader.header:
head_file = self._write_data_into_file(
str(self.html.htmlheader.header),
'.head.html',
file_to_del
)
file_to_del.append(head_file.name)
self.command.extend(['--header-html', head_file.name])
head_file.close()
if self.html.htmlheader.footer:
foot_file = self._write_data_into_file(
self.html.htmlheader.footer,
'.foot.html',
file_to_del
)
self.command.extend(['--footer-html', foot_file.name])
file_to_del.append(foot_file.name)
foot_file.close()
if self.html.htmlheader.css:
css_file = self._write_data_into_file(
self.html.htmlheader.footer,
'.style.css',
file_to_del,
css=True
)
file_to_del.append(css_file.name)
self.command.extend(['--user-style-sheet', css_file.name])
css_file.close()
if self.html.htmlheader.margin_top:
self.command.extend([
'--margin-top',
str(self.html.htmlheader.margin_top).replace(',', '.')
])
if self.html.htmlheader.margin_bottom:
self.command.extend([
'--margin-bottom',
str(self.html.htmlheader.margin_bottom).replace(',', '.')
])
if self.html.htmlheader.margin_left:
self.command.extend([
'--margin-left',
str(self.html.htmlheader.margin_left).replace(',', '.')
])
if self.html.htmlheader.margin_right:
self.command.extend([
'--margin-right',
str(self.html.htmlheader.margin_right).replace(',', '.')
])
if self.html.htmlheader.orientation:
self.command.extend([
'--orientation',
str(self.html.htmlheader.orientation).replace(',', '.')
])
if self.html.htmlheader.page_size:
self.command.extend([
'--page-size',
str(self.html.htmlheader.page_size).replace(',', '.')
])
count = 0
for body in self.html.htmlbody.all():
html_file = self._write_data_into_file(
body.body,
'.%s.body.html' % body.id,
file_to_del
)
self.command.append(html_file.name)
count += 1
html_file.close()
self.command.append(out_filename)
seder_fd, seder_path = tempfile.mkstemp(text=True)
file_to_del.append(seder_path)
try:
status = subprocess.call(self.command, stderr=seder_fd)
os.close(seder_fd) # ensure flush before reading
seder_fd = None # avoid closing again in finally block
file_obj = open(seder_path, 'r')
message = file_obj.read()
file_obj.close()
if not message:
error_message = 'No diagnosis message was provided'
else:
error_message = '''The following diagnosis message was provided:\n''' + message
if status:
raise RuntimeError("""
Webkit error The command 'wkhtmltopdf'
failed with error
code = %s. Message: %s""" %
(status, error_message))
pdf_file = open(out_filename, 'rb')
pdf = pdf_file.read()
pdf_file.close()
except Exception as e:
if subprocess.call(['which', self.command[0]]):
raise WkHtmlToPdfError("make sure wkhtmltopdf installed in your instance \
or check wkhtmltopdf path is given correctly")
if "does not support more then one input document" in (e.message):
raise WkHtmlToPdfError("""This Wkhtmltopdf doesn't support please follow this link
http://stackoverflow.com/questions/18758589/wkhtmltopdf-installation-error-on-ubuntu""")
finally:
if seder_fd is not None:
os.close(seder_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
# print("cannot remove file %s: %s" % (f_to_del, exc))
pass
return pdf
@property
def generate_template_file(self):
"""This method will generates the Pdf object from html
:return: pdf object
:raise: RuntimeError: run time
:raise: WkHtmlToPdfError: html error
:raise: OSError, IOError: none
"""
out_filename = tempfile.mktemp(suffix=".pdf", prefix="webkit.tmp.")
file_to_del = [out_filename]
if not self.command:
self.command = ['wkhtmltopdf']
self._get_options
if self.html.htmlheader.header:
head_file = self._write_data_into_file(
str(self.html.htmlheader.header),
'.head.html',
file_to_del
)
file_to_del.append(head_file.name)
self.command.extend(['--header-html', head_file.name])
head_file.close()
if self.html.htmlheader.footer:
foot_file = self._write_data_into_file(
self.html.htmlheader.footer,
'.foot.html',
file_to_del
)
self.command.extend(['--footer-html', foot_file.name])
file_to_del.append(foot_file.name)
foot_file.close()
if self.html.htmlheader.css:
css_file = self._write_data_into_file(
self.html.htmlheader.css,
'.style.css',
file_to_del,
css=True
)
file_to_del.append(css_file.name)
self.command.extend(['--user-style-sheet', css_file.name])
css_file.close()
if self.html.htmlheader.margin_top:
self.command.extend([
'--margin-top',
str(self.html.htmlheader.margin_top).replace(',', '.')
])
if self.html.htmlheader.margin_bottom:
self.command.extend([
'--margin-bottom',
str(self.html.htmlheader.margin_bottom).replace(',', '.')
])
if self.html.htmlheader.margin_left:
self.command.extend([
'--margin-left',
str(self.html.htmlheader.margin_left).replace(',', '.')
])
if self.html.htmlheader.margin_right:
self.command.extend([
'--margin-right',
str(self.html.htmlheader.margin_right).replace(',', '.')
])
if self.html.htmlheader.orientation:
self.command.extend([
'--orientation',
str(self.html.htmlheader.orientation).replace(',', '.')
])
if self.html.htmlheader.page_size:
self.command.extend([
'--page-size',
str(self.html.htmlheader.page_size).replace(',', '.')
])
count = 0
for body in self.html.htmlbody.all():
html_file = self._write_data_into_file(
body.body,
'.%s.body.html' % body.id,
file_to_del
)
self.command.append(html_file.name)
count += 1
html_file.close()
data = {}
if self.html.htmlheader.name:
data['t'] = self.html.htmlheader.name
if 'head_file' in locals():
with open (head_file.name, "r") as myfile:
data['h'] = myfile.read()
if 'foot_file' in locals():
with open (foot_file.name, "r") as myfile:
data['f'] = myfile.read()
if 'css_file' in locals():
with open (css_file.name, "r") as myfile:
data['c'] = myfile.read()
if 'head_file' in locals():
with open (html_file.name, "r") as myfile:
data['b'] = myfile.read()
data.update((x, unicode(y.replace('\n', '').replace('\r', '').replace('<!DOCTYPE html>', ''))) for x, y in data.items())
return data
def _render_string(self, html):
"""Render the context in html
:param html: html data
:type html: str or unicode
:returns Sends the Render the Context html
"""
temp = Template(html)
return temp.render(Context(self.context))
@staticmethod
def _sanitize_html(html):
"""wkhtmltopdf expects the html page to declare a doctype.
:param html: html document
:type html: str or unicode
:returns: html document
"""
input_html = html
if input_html and input_html[:9].upper() != "<!DOCTYPE":
html = "<!DOCTYPE html>\n" + input_html
return html
def convert_html_to_pdf(**kwargs):
"""It is Api call for converting Html to Pdf.
It Creates the WKHtmlToPDFGenerator instance.
:param request: view client request
:type request: WSGIRequest
:param context: rendering template with this context --> optional
:type context: Dictionary
:param name: pdf name --> optional
:type name: str or unicode or int
:returns: Sends the HttpResponse to view
:raises: DoesNotExist
:raises: InstanceError
"""
if 'request' not in kwargs:
raise KeyError('request param not in kwargs')
request = kwargs.get('request')
if not isinstance(request, WSGIRequest):
raise InstanceError("request is not instance of WSGIRequest")
url_match = resolve(request.path)
view = url_match.func.__module__ + "." + url_match.func.__name__
try:
html = Html.objects.get(view=view)
except Html.DoesNotExist:
raise Html.DoesNotExist("The provided view does not match in the Html model, view={}\
".format(view))
webkit = WKHtmlToPDFGenerator(context=kwargs.get('context'), html=html)
disposition = 'attachment;'
if not html.attachment:
disposition = ''
if 'name' in kwargs and len(str(kwargs.get('name'))) > 0:
disposition += " filename={}.pdf".format(str(kwargs.get('name')))
else:
disposition += " filename={}.pdf".format(html.name)
response = HttpResponse(
webkit.generate_pdf_file,
content_type='application/pdf')
response['Content-Disposition'] = disposition
webkit.command = []
return response
def get_pdf_html(**kwargs):
"""It is Api call for converting Html to Pdf.
It Creates the WKHtmlToPDFGenerator instance.
:param request: view client request
:type request: WSGIRequest
:param context: rendering template with this context --> optional
:type context: Dictionary
:param name: pdf name --> optional
:type name: str or unicode or int
:returns: Sends the HttpResponse to view
:raises: DoesNotExist
:raises: InstanceError
"""
if 'request' not in kwargs:
raise KeyError('request param not in kwargs')
request = kwargs.get('request')
if not isinstance(request, WSGIRequest):
raise InstanceError("request is not instance of WSGIRequest")
url_match = resolve(request.path)
view = url_match.func.__module__ + "." + url_match.func.__name__
try:
html = Html.objects.get(view=view)
except Html.DoesNotExist:
raise Html.DoesNotExist("The provided view does not match in the Html model, view={}\
".format(view))
webkit = WKHtmlToPDFGenerator(context=kwargs.get('context'), html=html)
#wut = webkit.generate_template_file
#help = self.ggggg
return webkit.generate_template_file
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ConfigParser
import os
from ambari_commons import OSCheck
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.core.logger import Logger
from resource_management.libraries import functions
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.substitute_vars import substitute_vars
from resource_management.libraries.resources.hdfs_resource import HdfsResource
import status_params
from functions import calc_xmn_from_xms
from functions import check_append_heap_property
from functions import trim_heap_property
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
def get_combined_memory_mb(value1, value2):
try:
part1 = int(value1.strip()[:-1]) if value1.lower().strip()[-1:] == 'm' else int(value1)
part2 = int(value2.strip()[:-1]) if value2.lower().strip()[-1:] == 'm' else int(value2)
return str(part1 + part2) + 'm'
except:
return None
pass
#AMBARI_METRICS data
ams_pid_dir = status_params.ams_collector_pid_dir
is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
ams_collector_script = "/usr/sbin/ambari-metrics-collector"
ams_collector_pid_dir = status_params.ams_collector_pid_dir
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
ams_collector_list = default("/clusterHostInfo/metrics_collector_hosts", [])
embedded_mode_multiple_instances = False
if not is_ams_distributed and len(ams_collector_list) > 1:
embedded_mode_multiple_instances = True
set_instanceId = "false"
cluster_name = config["clusterName"]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
random_metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
failover_strategy_blacklisted_interval_seconds = default("/configurations/ams-env/failover_strategy_blacklisted_interval", "600")
failover_strategy = default("/configurations/ams-site/failover.strategy", "round-robin")
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_https_enabled = True
metric_collector_protocol = 'https'
else:
metric_collector_https_enabled = False
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metric_truststore_alias = default("/configurations/ams-ssl-client/ssl.client.truststore.alias", None)
if not metric_truststore_alias:
metric_truststore_alias = metric_collector_host
metric_truststore_ca_certs='ca.pem'
agent_cache_dir = config['hostLevelParams']['agentCacheDir']
service_package_folder = config['commandParams']['service_package_folder']
stack_name = default("/hostLevelParams/stack_name", None)
dashboards_dirs = []
# Stack specific
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', stack_name))
# Default
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', 'default'))
# Custom services
dashboards_dirs.append(os.path.join(agent_cache_dir, 'dashboards', 'grafana-dashboards'))
def get_grafana_dashboard_defs():
dashboard_defs = []
for dashboards_dir in dashboards_dirs:
if os.path.exists(dashboards_dir):
for root, dirs, files in os.walk(dashboards_dir):
for file in files:
if 'grafana' in file:
dashboard_defs.append(os.path.join(root, file))
return dashboard_defs
# find ambari version for grafana dashboards
def get_ambari_version():
ambari_version = None
AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
ambari_agent_config = ConfigParser.RawConfigParser()
if os.path.exists(AMBARI_AGENT_CONF):
try:
ambari_agent_config.read(AMBARI_AGENT_CONF)
data_dir = ambari_agent_config.get('agent', 'prefix')
ver_file = os.path.join(data_dir, 'version')
f = open(ver_file, "r")
ambari_version = f.read().strip()
f.close()
except Exception, e:
Logger.info('Unable to determine ambari version from version file.')
Logger.debug('Exception: %s' % str(e))
# No hostname script identified in the ambari agent conf
pass
pass
return ambari_version
ams_collector_log_dir = config['configurations']['ams-env']['metrics_collector_log_dir']
ams_collector_conf_dir = "/etc/ambari-metrics-collector/conf"
ams_monitor_log_dir = config['configurations']['ams-env']['metrics_monitor_log_dir']
ams_monitor_dir = "/usr/lib/python2.6/site-packages/resource_monitoring"
ams_monitor_conf_dir = "/etc/ambari-metrics-monitor/conf"
ams_monitor_pid_dir = status_params.ams_monitor_pid_dir
ams_monitor_script = "/usr/sbin/ambari-metrics-monitor"
ams_grafana_script = "/usr/sbin/ambari-metrics-grafana"
ams_grafana_home_dir = '/usr/lib/ambari-metrics-grafana'
ams_grafana_log_dir = default("/configurations/ams-grafana-env/metrics_grafana_log_dir", '/var/log/ambari-metrics-grafana')
ams_grafana_pid_dir = status_params.ams_grafana_pid_dir
ams_grafana_conf_dir = '/etc/ambari-metrics-grafana/conf'
ams_grafana_data_dir = default("/configurations/ams-grafana-env/metrics_grafana_data_dir", '/var/lib/ambari-metrics-grafana')
ams_grafana_admin_user = config['configurations']['ams-grafana-env']['metrics_grafana_username']
ams_grafana_admin_pwd = config['configurations']['ams-grafana-env']['metrics_grafana_password']
metrics_grafana_hosts = default('/clusterHostInfo/metrics_grafana_hosts', None)
ams_grafana_host = None
if metrics_grafana_hosts:
ams_grafana_host = metrics_grafana_hosts[0]
ams_grafana_port = default("/configurations/ams-grafana-ini/port", 3000)
ams_grafana_protocol = default("/configurations/ams-grafana-ini/protocol", 'http')
ams_grafana_cert_file = default("/configurations/ams-grafana-ini/cert_file", '/etc/ambari-metrics/conf/ams-grafana.crt')
ams_grafana_cert_key = default("/configurations/ams-grafana-ini/cert_key", '/etc/ambari-metrics/conf/ams-grafana.key')
ams_grafana_ca_cert = default("/configurations/ams-grafana-ini/ca_cert", None)
ams_hbase_home_dir = "/usr/lib/ams-hbase/"
ams_hbase_init_check_enabled = default("/configurations/ams-site/timeline.metrics.hbase.init.check.enabled", True)
#hadoop params
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = config['commandParams']['mark_draining_only']
hbase_included_hosts = config['commandParams']['included_hosts']
hbase_user = status_params.hbase_user
smokeuser = config['configurations']['cluster-env']['smokeuser']
hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
hbase_pid_dir = status_params.hbase_pid_dir
is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
is_local_fs_rootdir = hbase_root_dir.startswith('file://')
# security is disabled for embedded mode, when HBase is backed by file
security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
# this is "hadoop-metrics.properties" for 1.x stacks
metric_prop_file_name = "hadoop-metrics2-hbase.properties"
java_home = config['hostLevelParams']['java_home']
ambari_java_home = default("/commandParams/ambari_java_home", None)
# not supporting 32 bit jdk.
java64_home = ambari_java_home if ambari_java_home is not None else java_home
ambari_java_version = default("/commandParams/ambari_java_version", None)
if ambari_java_version:
java_version = expect("/commandParams/ambari_java_version", int)
else :
java_version = expect("/hostLevelParams/java_version", int)
metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
skip_disk_metrics_patterns = default("/configurations/ams-env/timeline.metrics.skip.disk.metrics.patterns", None)
skip_network_interfaces_patterns = default("/configurations/ams-env/timeline.metrics.skip.network.interfaces.patterns", None)
skip_virtual_interfaces = default("/configurations/ams-env/timeline.metrics.skip.virtual.interfaces", False)
hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']
# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
master_heapsize = check_append_heap_property(str(master_heapsize), "m")
regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
host_in_memory_aggregation_jvm_arguments = default("/configurations/ams-env/timeline.metrics.host.inmemory.aggregation.jvm.arguments",
"-Xmx256m -Xms128m -XX:PermSize=68m")
regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
if regionserver_xmn_max:
regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
regionserver_xmn_percent = expect("/configurations/ams-hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
else:
regionserver_xmn_size = config['configurations']['ams-hbase-env']['regionserver_xmn_size']
pass
hbase_master_xmn_size = config['configurations']['ams-hbase-env']['hbase_master_xmn_size']
hbase_master_maxperm_size = config['configurations']['ams-hbase-env']['hbase_master_maxperm_size']
# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
hbase_master_maxperm_size = check_append_heap_property(str(hbase_master_maxperm_size), "m")
hbase_master_xmn_size = check_append_heap_property(str(hbase_master_xmn_size), "m")
regionserver_xmn_size = check_append_heap_property(str(regionserver_xmn_size), "m")
# Choose heap size for embedded mode as sum of master + regionserver
if not is_hbase_distributed:
hbase_heapsize = get_combined_memory_mb(master_heapsize, regionserver_heapsize)
if hbase_heapsize is None:
hbase_heapsize = master_heapsize
else:
hbase_heapsize = master_heapsize
max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
hostname = config["hostname"]
cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
else:
cluster_zookeeper_clientPort = '2181'
if not is_hbase_distributed:
zookeeper_quorum_hosts = hostname
zookeeper_clientPort = '61181'
else:
zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
zookeeper_clientPort = cluster_zookeeper_clientPort
ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
_hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site'])
_zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir']
zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site'])
# TODO UPGRADE default, update site during upgrade
_local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site'])
phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20')
phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp')
phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp')
# Substitute vars if present
phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site'])
phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site'])
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
rs_hosts = ["localhost"]
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_permissions = "RWXCA"
service_check_data = functions.get_unique_id_and_date()
user_group = config['configurations']['cluster-env']["user_group"]
hadoop_user = "hadoop"
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_cmd = ""
klist_path_local = functions.get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
klist_cmd = ""
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hbase_user_keytab = config['configurations']['ams-hbase-env']['hbase_user_keytab']
ams_collector_jaas_config_file = format("{hbase_conf_dir}/ams_collector_jaas.conf")
ams_collector_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.myclient.keytab']
ams_collector_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.myclient.principal'].replace('_HOST',_hostname_lowercase)
ams_zookeeper_jaas_config_file = format("{hbase_conf_dir}/ams_zookeeper_jaas.conf")
ams_zookeeper_keytab = config['configurations']['ams-hbase-security-site']['ams.zookeeper.keytab']
ams_zookeeper_principal_name = config['configurations']['ams-hbase-security-site']['ams.zookeeper.principal'].replace('_HOST',_hostname_lowercase)
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
master_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.master.keytab.file']
master_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
regionserver_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.regionserver.keytab.file']
regionserver_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
kinit_cmd = '%s -kt %s %s' % (kinit_path_local, config['configurations']['ams-hbase-security-site']['ams.monitor.keytab'], config['configurations']['ams-hbase-security-site']['ams.monitor.principal'].replace('_HOST',_hostname_lowercase))
klist_cmd = '%s' % klist_path_local
#Ambari metrics log4j settings
ams_hbase_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_log_maxfilesize',256)
ams_hbase_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_log_maxbackupindex',20)
ams_hbase_security_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxfilesize',256)
ams_hbase_security_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxbackupindex',20)
ams_log_max_backup_size = default('configurations/ams-log4j/ams_log_max_backup_size',80)
ams_log_number_of_backup_files = default('configurations/ams-log4j/ams_log_number_of_backup_files',60)
#log4j.properties
if (('ams-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['ams-hbase-log4j'])):
hbase_log4j_props = config['configurations']['ams-hbase-log4j']['content']
else:
hbase_log4j_props = None
if (('ams-log4j' in config['configurations']) and ('content' in config['configurations']['ams-log4j'])):
log4j_props = config['configurations']['ams-log4j']['content']
else:
log4j_props = None
hbase_env_sh_template = config['configurations']['ams-hbase-env']['content']
ams_env_sh_template = config['configurations']['ams-env']['content']
ams_grafana_env_sh_template = config['configurations']['ams-grafana-env']['content']
ams_grafana_ini_template = config['configurations']['ams-grafana-ini']['content']
hbase_staging_dir = default("/configurations/ams-hbase-site/hbase.bulkload.staging.dir", "/amshbase/staging")
skip_create_hbase_root_dir = default("/configurations/ams-site/timeline.metrics.skip.create.hbase.root.dir", False)
hbase_wal_dir = default("/configurations/ams-hbase-site/hbase.wal.dir", None)
if hbase_wal_dir and re.search("^file://|/", hbase_wal_dir): #If wal dir is on local file system, create it.
hbase_wal_dir = re.sub("^file://|/", "", hbase_wal_dir, count=1)
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from collections import OrderedDict
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.backend.codegen.protobuf.java.java_protobuf_library import JavaProtobufLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries.binary_util import BinaryUtil
from pants.build_graph.address import Address
from pants.fs.archive import ZIP
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.memo import memoized_property
class ProtobufGen(SimpleCodegenTask):
@classmethod
def subsystem_dependencies(cls):
return super(ProtobufGen, cls).subsystem_dependencies() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(ProtobufGen, cls).register_options(register)
# The protoc version and the plugin names are used as proxies for the identity of the protoc
# executable environment here. Although version is an obvious proxy for the protoc binary
# itself, plugin names are less so and plugin authors must include a version in the name for
# proper invalidation of protobuf products in the face of plugin modification that affects
# plugin outputs.
register('--version', advanced=True, fingerprint=True,
help='Version of protoc. Used to create the default --javadeps and as part of '
'the path to lookup the tool with --pants-support-baseurls and '
'--pants-bootstrapdir. When changing this parameter you may also need to '
'update --javadeps.',
default='2.4.1')
register('--protoc-plugins', advanced=True, fingerprint=True, type=list,
help='Names of protobuf plugins to invoke. Protoc will look for an executable '
'named protoc-gen-$NAME on PATH.')
register('--extra_path', advanced=True, type=list,
help='Prepend this path onto PATH in the environment before executing protoc. '
'Intended to help protoc find its plugins.',
default=None)
register('--supportdir', advanced=True,
help='Path to use for the protoc binary. Used as part of the path to lookup the'
'tool under --pants-bootstrapdir.',
default='bin/protobuf')
register('--javadeps', advanced=True, type=list,
help='Dependencies to bootstrap this task for generating java code. When changing '
'this parameter you may also need to update --version.',
default=['3rdparty:protobuf-java'])
register('--import-from-root', type=bool, advanced=True,
help='If set, add the buildroot to the path protoc searches for imports. '
'This enables using import paths relative to the build root in .proto files, '
'as recommended by the protoc documentation.')
# TODO https://github.com/pantsbuild/pants/issues/604 prep start
@classmethod
def prepare(cls, options, round_manager):
super(ProtobufGen, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
round_manager.require_data('deferred_sources')
# TODO https://github.com/pantsbuild/pants/issues/604 prep finish
def __init__(self, *args, **kwargs):
"""Generates Java files from .proto files using the Google protobuf compiler."""
super(ProtobufGen, self).__init__(*args, **kwargs)
self.plugins = self.get_options().protoc_plugins or []
self._extra_paths = self.get_options().extra_path or []
@memoized_property
def protobuf_binary(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_binary(self.get_options().supportdir,
self.get_options().version,
'protoc')
@property
def javadeps(self):
return self.resolve_deps(self.get_options().javadeps or [])
def synthetic_target_type(self, target):
return JavaLibrary
def synthetic_target_extra_dependencies(self, target, target_workdir):
deps = OrderedSet()
if target.imported_jars:
# We need to add in the proto imports jars.
jars_address = Address(os.path.relpath(target_workdir, get_buildroot()),
target.id + '-rjars')
jars_target = self.context.add_new_target(jars_address,
JarLibrary,
jars=target.imported_jars,
derived_from=target)
deps.update([jars_target])
deps.update(self.javadeps)
return deps
def is_gentarget(self, target):
return isinstance(target, JavaProtobufLibrary)
def execute_codegen(self, target, target_workdir):
sources_by_base = self._calculate_sources(target)
sources = target.sources_relative_to_buildroot()
bases = OrderedSet()
# Note that the root import must come first, otherwise protoc can get confused
# when trying to resolve imports from the root against the import's source root.
if self.get_options().import_from_root:
bases.add('.')
bases.update(sources_by_base.keys())
bases.update(self._proto_path_imports([target]))
gen_flag = '--java_out'
gen = '{0}={1}'.format(gen_flag, target_workdir)
args = [self.protobuf_binary, gen]
if self.plugins:
for plugin in self.plugins:
args.append("--{0}_out={1}".format(plugin, target_workdir))
for base in bases:
args.append('--proto_path={0}'.format(base))
args.extend(sources)
# Tack on extra path entries. These can be used to find protoc plugins
protoc_environ = os.environ.copy()
if self._extra_paths:
protoc_environ['PATH'] = os.pathsep.join(self._extra_paths
+ protoc_environ['PATH'].split(os.pathsep))
# Note: The test_source_ordering integration test scrapes this output, so modify it with care.
self.context.log.debug('Executing: {0}'.format('\\\n '.join(args)))
with self.context.new_workunit(name='protoc',
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(args)) as workunit:
result = subprocess.call(args,
env=protoc_environ,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self.protobuf_binary, result))
def _calculate_sources(self, target):
gentargets = OrderedSet()
def add_to_gentargets(tgt):
if self.is_gentarget(tgt):
gentargets.add(tgt)
self.context.build_graph.walk_transitive_dependency_graph(
[target.address],
add_to_gentargets,
postorder=True)
sources_by_base = OrderedDict()
for target in gentargets:
base = target.target_base
if base not in sources_by_base:
sources_by_base[base] = OrderedSet()
sources_by_base[base].update(target.sources_relative_to_buildroot())
return sources_by_base
def _jars_to_directories(self, target):
"""Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
"""
files = set()
jar_import_products = self.context.products.get_data(JarImportProducts)
imports = jar_import_products.imports(target)
for coordinate, jar in imports:
files.add(self._extract_jar(coordinate, jar))
return files
def _extract_jar(self, coordinate, jar_path):
"""Extracts the jar to a subfolder of workdir/extracted and returns the path to it."""
with open(jar_path, 'rb') as f:
outdir = os.path.join(self.workdir, 'extracted', sha1(f.read()).hexdigest())
if not os.path.exists(outdir):
ZIP.extract(jar_path, outdir)
self.context.log.debug('Extracting jar {jar} at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
else:
self.context.log.debug('Jar {jar} already extracted at {jar_path}.'
.format(jar=coordinate, jar_path=jar_path))
return outdir
def _proto_path_imports(self, proto_targets):
for target in proto_targets:
for path in self._jars_to_directories(target):
yield os.path.relpath(path, get_buildroot())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Dongsheng Cai
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Dongsheng Cai nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL DONGSHENG CAI BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging.config
from pymongo.connection import Connection
from tornado.options import define
import tornado.httpserver
import tornado.ioloop
import tornado.options
from pushservices.apns import *
from pushservices.gcm import GCMClient
from pushservices.wns import WNSClient
from pushservices.mpns import MPNSClient
from pushservices.clickatell import *
from uimodules import *
from util import *
from constants import DEVICE_TYPE_IOS, DEVICE_TYPE_ANDROID, DEVICE_TYPE_WNS, \
DEVICE_TYPE_MPNS
define("port", default=8801, help="Application server listen port", type=int)
define("pemdir", default="pemdir", help="Directory to store pems")
define("passwordsalt", default="d2o0n1g2s0h3e1n1g", help="Being used to make password hash")
define("cookiesecret", default="airnotifiercookiesecret", help="Cookie secret")
define("debug", default=False, help="Debug mode")
define("mongohost", default="localhost", help="MongoDB host name")
define("mongoport", default=27017, help="MongoDB port")
define("masterdb", default="airnotifier", help="MongoDB DB to store information")
define("dbprefix", default="obj_", help="Collection name prefix")
loggingconfigfile='logging.ini'
if os.path.isfile(loggingconfigfile):
logging.config.fileConfig(loggingconfigfile)
class AirNotifierApp(tornado.web.Application):
def init_routes(self, dir):
from routes import RouteLoader
return RouteLoader.load(dir)
def get_broadcast_status(self, appname):
status = "Notification sent!"
error = False
try:
apns = self.services['apns'][appname][0]
except (IndexError, KeyError):
apns = None
if apns is not None and apns.hasError():
status = apns.getError()
error = True
return {'msg':status, 'error':error}
def send_broadcast(self, appname, appdb, **kwargs):
channel = kwargs.get('channel', 'default')
alert = kwargs.get('alert', None)
sound = kwargs.get('sound', None)
badge = kwargs.get('badge', None)
device = kwargs.get('device', None)
extra = kwargs.get('extra', {})
try:
apns = self.services['apns'][appname][0]
except (IndexError, KeyError):
apns = None
try:
wns = self.services['wns'][appname][0]
except (IndexError, KeyError):
wns = None
try:
mpns = self.services['mpns'][appname][0]
except (IndexError, KeyError):
mpns = None
try:
gcm = self.services['gcm'][appname][0]
except (IndexError, KeyError):
gcm = None
conditions = []
if channel == 'default':
# channel is not set or channel is default
conditions.append({'channel': {"$exists": False}})
conditions.append({'channel': 'default'})
else:
conditions.append({'channel': channel})
if device:
conditions.append({'device': device})
tokens = appdb.tokens.find({"$or": conditions})
regids = []
try:
for token in tokens:
t = token.get('token')
if token['device'] == DEVICE_TYPE_IOS:
if apns is not None:
apns.process(token=t, alert=alert, extra=extra, apns=kwargs.get('apns', {}))
elif token['device'] == DEVICE_TYPE_ANDROID:
regids.append(t)
elif token['device'] == DEVICE_TYPE_WNS:
if wns is not None:
wns.process(token=t, alert=alert, extra=extra, wns=kwargs.get('wns', {}))
elif token['device'] == DEVICE_TYPE_MPNS:
if mpns is not None:
mpns.process(token=t, alert=alert, extra=extra, mpns=kwargs.get('mpns', {}))
except Exception, ex:
logging.error(ex)
# Now sending android notifications
try:
if (gcm is not None) and regids:
response = gcm.process(token=regids, alert=alert, extra=extra, gcm=kwargs.get('gcm', {}))
responsedata = response.json()
except Exception, ex:
logging.error('GCM problem: ' + str(ex))
def __init__(self, services):
app_settings = dict(
debug=True,
# debug=options.debug,
app_title=u'AirNotifier',
ui_modules={"AppSideBar": AppSideBar, "NavBar": NavBar, "TabBar": TabBar},
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), 'static'),
cookie_secret=options.cookiesecret,
login_url=r"/auth/login",
autoescape=None,
)
self.services = services
sitehandlers = self.init_routes('controllers')
apihandlers = self.init_routes('api')
tornado.web.Application.__init__(self, sitehandlers + apihandlers, **app_settings)
mongodb = None
while not mongodb:
try:
mongodb = Connection(options.mongohost, options.mongoport)
except:
error_log("Cannot not connect to MongoDB")
self.mongodb = mongodb
self.masterdb = mongodb[options.masterdb]
assert self.masterdb.connection == self.mongodb
def main(self):
logging.info("Starting AirNotifier server")
http_server = tornado.httpserver.HTTPServer(self)
http_server.listen(options.port)
logging.info("AirNotifier is running")
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
logging.info("AirNotifier is quiting")
tornado.ioloop.IOLoop.instance().stop()
def init_messaging_agents():
services = {
'gcm': {},
'wns': {},
'apns': {},
'mpns': {},
'sms': {},
}
mongodb = None
while not mongodb:
try:
mongodb = Connection(options.mongohost, options.mongoport)
except Exception as ex:
logging.error(ex)
masterdb = mongodb[options.masterdb]
apps = masterdb.applications.find()
for app in apps:
''' APNs setup '''
services['apns'][app['shortname']] = []
conns = int(app['connections'])
if conns < 1:
conns = 1
if 'environment' not in app:
app['environment'] = 'sandbox'
if file_exists(app.get('certfile', False)) and file_exists(app.get('keyfile', False)) and 'shortname' in app:
if app.get('enableapns', False):
for instanceid in range(0, conns):
try:
apn = APNClient(app['environment'], app['certfile'], app['keyfile'], app['shortname'], instanceid)
except Exception as ex:
logging.error(ex)
continue
services['apns'][app['shortname']].append(apn)
''' GCMClient setup '''
services['gcm'][app['shortname']] = []
if 'gcmprojectnumber' in app and 'gcmapikey' in app and 'shortname' in app:
try:
http = GCMClient(app['gcmprojectnumber'], app['gcmapikey'], app['shortname'], 0)
except Exception as ex:
logging.error(ex)
continue
services['gcm'][app['shortname']].append(http)
''' WNS setup '''
services['wns'][app['shortname']] = []
if 'wnsclientid' in app and 'wnsclientsecret' in app and 'shortname' in app:
try:
wns = WNSClient(masterdb, app, 0)
except Exception as ex:
logging.error(ex)
continue
services['wns'][app['shortname']].append(wns)
''' MPNS setup '''
services['mpns'][app['shortname']] = []
try:
mpns = MPNSClient(masterdb, app, 0)
except Exception as ex:
logging.error(ex)
continue
services['mpns'][app['shortname']].append(mpns)
''' clickatell '''
services['sms'][app['shortname']] = []
try:
sms = ClickatellClient(masterdb, app, 0)
except Exception as ex:
logging.error(ex)
continue
services['sms'][app['shortname']].append(sms)
mongodb.close()
return services
if __name__ == "__main__":
tornado.options.parse_config_file("airnotifier.conf")
tornado.options.parse_command_line()
services = init_messaging_agents()
AirNotifierApp(services=services).main()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (("entities", "0001_initial"),)
def forwards(self, orm):
# Adding model 'Template'
db.create_table(u'sheets_template', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('period_start', self.gf('django.db.models.fields.DateField')(db_index=True, null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('name_he', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_en', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ar', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ru', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(db_index=True, blank=True)),
('description_he', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_en', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ar', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ru', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
))
db.send_create_signal(u'sheets', ['Template'])
# Adding M2M table for field divisions on 'Template'
m2m_table_name = db.shorten_name(u'sheets_template_divisions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('template', models.ForeignKey(orm[u'sheets.template'], null=False)),
('division', models.ForeignKey(orm[u'entities.division'], null=False))
))
db.create_unique(m2m_table_name, ['template_id', 'division_id'])
# Adding model 'TemplateNode'
db.create_table(u'sheets_templatenode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('name_he', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_en', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ar', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ru', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('direction', self.gf('django.db.models.fields.CharField')(default='REVENUE', max_length=15, db_index=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['sheets.TemplateNode'])),
('path', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('description_he', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description_en', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description_ar', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description_ru', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'sheets', ['TemplateNode'])
# Adding M2M table for field inverse on 'TemplateNode'
m2m_table_name = db.shorten_name(u'sheets_templatenode_inverse')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_templatenode', models.ForeignKey(orm[u'sheets.templatenode'], null=False)),
('to_templatenode', models.ForeignKey(orm[u'sheets.templatenode'], null=False))
))
db.create_unique(m2m_table_name, ['from_templatenode_id', 'to_templatenode_id'])
# Adding M2M table for field backwards on 'TemplateNode'
m2m_table_name = db.shorten_name(u'sheets_templatenode_backwards')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_templatenode', models.ForeignKey(orm[u'sheets.templatenode'], null=False)),
('to_templatenode', models.ForeignKey(orm[u'sheets.templatenode'], null=False))
))
db.create_unique(m2m_table_name, ['from_templatenode_id', 'to_templatenode_id'])
# Adding model 'TemplateNodeRelation'
db.create_table(u'sheets_templatenoderelation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sheets.Template'])),
('node', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sheets.TemplateNode'])),
))
db.send_create_signal(u'sheets', ['TemplateNodeRelation'])
# Adding unique constraint on 'TemplateNodeRelation', fields ['node', 'template']
db.create_unique(u'sheets_templatenoderelation', ['node_id', 'template_id'])
# Adding model 'Sheet'
db.create_table(u'sheets_sheet', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('period_start', self.gf('django.db.models.fields.DateField')(db_index=True, null=True, blank=True)),
('period_end', self.gf('django.db.models.fields.DateField')(db_index=True, null=True, blank=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sheets', to=orm['entities.Entity'])),
('template', self.gf('django.db.models.fields.related.ForeignKey')(related_name='using_sheets', to=orm['sheets.Template'])),
('description', self.gf('django.db.models.fields.TextField')(db_index=True, blank=True)),
('description_he', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_en', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ar', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ru', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
))
db.send_create_signal(u'sheets', ['Sheet'])
# Adding model 'SheetItem'
db.create_table(u'sheets_sheetitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('sheet', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sheetitems', to=orm['sheets.Sheet'])),
('description', self.gf('django.db.models.fields.TextField')(db_index=True, blank=True)),
('description_he', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_en', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ar', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ru', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('budget', self.gf('django.db.models.fields.DecimalField')(db_index=True, null=True, max_digits=26, decimal_places=2, blank=True)),
('actual', self.gf('django.db.models.fields.DecimalField')(db_index=True, null=True, max_digits=26, decimal_places=2, blank=True)),
('node', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sheetitems', to=orm['sheets.TemplateNode'])),
))
db.send_create_signal(u'sheets', ['SheetItem'])
# Adding model 'SheetItemComment'
db.create_table(u'sheets_sheetitemcomment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('item', self.gf('django.db.models.fields.related.ForeignKey')(related_name='discussion', to=orm['sheets.SheetItem'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='item_comments', to=orm['accounts.Account'])),
('comment', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'sheets', ['SheetItemComment'])
# Adding model 'DenormalizedSheetItem'
db.create_table(u'sheets_denormalizedsheetitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('name_he', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_en', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ar', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('name_ru', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('direction', self.gf('django.db.models.fields.CharField')(default='REVENUE', max_length=15, db_index=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['sheets.DenormalizedSheetItem'])),
('path', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('sheet', self.gf('django.db.models.fields.related.ForeignKey')(related_name='denormalizedsheetitems', to=orm['sheets.Sheet'])),
('description', self.gf('django.db.models.fields.TextField')(db_index=True, blank=True)),
('description_he', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_en', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ar', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('description_ru', self.gf('django.db.models.fields.TextField')(db_index=True, null=True, blank=True)),
('budget', self.gf('django.db.models.fields.DecimalField')(db_index=True, null=True, max_digits=26, decimal_places=2, blank=True)),
('actual', self.gf('django.db.models.fields.DecimalField')(db_index=True, null=True, max_digits=26, decimal_places=2, blank=True)),
('normal_item', self.gf('django.db.models.fields.related.OneToOneField')(related_name='denormalized', unique=True, to=orm['sheets.SheetItem'])),
('node_description', self.gf('django.db.models.fields.TextField')(blank=True)),
('node_description_he', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('node_description_en', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('node_description_ar', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('node_description_ru', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'sheets', ['DenormalizedSheetItem'])
# Adding M2M table for field inverse on 'DenormalizedSheetItem'
m2m_table_name = db.shorten_name(u'sheets_denormalizedsheetitem_inverse')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_denormalizedsheetitem', models.ForeignKey(orm[u'sheets.denormalizedsheetitem'], null=False)),
('to_denormalizedsheetitem', models.ForeignKey(orm[u'sheets.denormalizedsheetitem'], null=False))
))
db.create_unique(m2m_table_name, ['from_denormalizedsheetitem_id', 'to_denormalizedsheetitem_id'])
# Adding M2M table for field backwards on 'DenormalizedSheetItem'
m2m_table_name = db.shorten_name(u'sheets_denormalizedsheetitem_backwards')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_denormalizedsheetitem', models.ForeignKey(orm[u'sheets.denormalizedsheetitem'], null=False)),
('to_denormalizedsheetitem', models.ForeignKey(orm[u'sheets.denormalizedsheetitem'], null=False))
))
db.create_unique(m2m_table_name, ['from_denormalizedsheetitem_id', 'to_denormalizedsheetitem_id'])
def backwards(self, orm):
# Removing unique constraint on 'TemplateNodeRelation', fields ['node', 'template']
db.delete_unique(u'sheets_templatenoderelation', ['node_id', 'template_id'])
# Deleting model 'Template'
db.delete_table(u'sheets_template')
# Removing M2M table for field divisions on 'Template'
db.delete_table(db.shorten_name(u'sheets_template_divisions'))
# Deleting model 'TemplateNode'
db.delete_table(u'sheets_templatenode')
# Removing M2M table for field inverse on 'TemplateNode'
db.delete_table(db.shorten_name(u'sheets_templatenode_inverse'))
# Removing M2M table for field backwards on 'TemplateNode'
db.delete_table(db.shorten_name(u'sheets_templatenode_backwards'))
# Deleting model 'TemplateNodeRelation'
db.delete_table(u'sheets_templatenoderelation')
# Deleting model 'Sheet'
db.delete_table(u'sheets_sheet')
# Deleting model 'SheetItem'
db.delete_table(u'sheets_sheetitem')
# Deleting model 'SheetItemComment'
db.delete_table(u'sheets_sheetitemcomment')
# Deleting model 'DenormalizedSheetItem'
db.delete_table(u'sheets_denormalizedsheetitem')
# Removing M2M table for field inverse on 'DenormalizedSheetItem'
db.delete_table(db.shorten_name(u'sheets_denormalizedsheetitem_inverse'))
# Removing M2M table for field backwards on 'DenormalizedSheetItem'
db.delete_table(db.shorten_name(u'sheets_denormalizedsheetitem_backwards'))
models = {
u'accounts.account': {
'Meta': {'ordering': "['email', 'created_on']", 'object_name': 'Account'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'he'", 'max_length': '2'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'entities.division': {
'Meta': {'ordering': "['index', 'name']", 'unique_together': "(('name', 'domain'),)", 'object_name': 'Division'},
'budgeting': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'divisions'", 'to': u"orm['entities.Domain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'entities.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '3'}),
'ground_surface_unit': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'measurement_system': ('django.db.models.fields.CharField', [], {'default': "'metric'", 'max_length': '8'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'entities.entity': {
'Meta': {'ordering': "('division__domain', 'division__index', 'name')", 'unique_together': "(('name', 'parent', 'division'),)", 'object_name': 'Entity'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['entities.Division']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['entities.Entity']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'sheets.denormalizedsheetitem': {
'Meta': {'ordering': "['code']", 'object_name': 'DenormalizedSheetItem'},
'actual': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '26', 'decimal_places': '2', 'blank': 'True'}),
'backwards': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'forwards'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sheets.DenormalizedSheetItem']"}),
'budget': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '26', 'decimal_places': '2', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'REVENUE'", 'max_length': '15', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inverse': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'inverse_rel_+'", 'null': 'True', 'to': u"orm['sheets.DenormalizedSheetItem']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'node_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'node_description_ar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'node_description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'node_description_he': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'node_description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'normal_item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'denormalized'", 'unique': 'True', 'to': u"orm['sheets.SheetItem']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['sheets.DenormalizedSheetItem']"}),
'path': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sheet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'denormalizedsheetitems'", 'to': u"orm['sheets.Sheet']"}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.sheet': {
'Meta': {'ordering': "['entity']", 'object_name': 'Sheet'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sheets'", 'to': u"orm['entities.Entity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'period_start': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'using_sheets'", 'to': u"orm['sheets.Template']"}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.sheetitem': {
'Meta': {'ordering': "['node']", 'object_name': 'SheetItem'},
'actual': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '26', 'decimal_places': '2', 'blank': 'True'}),
'budget': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '26', 'decimal_places': '2', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sheetitems'", 'to': u"orm['sheets.TemplateNode']"}),
'sheet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sheetitems'", 'to': u"orm['sheets.Sheet']"}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.sheetitemcomment': {
'Meta': {'ordering': "['user', 'last_modified']", 'object_name': 'SheetItemComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discussion'", 'to': u"orm['sheets.SheetItem']"}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'item_comments'", 'to': u"orm['accounts.Account']"}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.template': {
'Meta': {'ordering': "['name']", 'object_name': 'Template'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'divisions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['entities.Division']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'period_start': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.templatenode': {
'Meta': {'ordering': "['name']", 'object_name': 'TemplateNode'},
'backwards': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'forwards'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['sheets.TemplateNode']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_ar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_he': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'default': "'REVENUE'", 'max_length': '15', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inverse': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'inverse_rel_+'", 'null': 'True', 'to': u"orm['sheets.TemplateNode']"}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_ar': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['sheets.TemplateNode']"}),
'path': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'templates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'nodes'", 'symmetrical': 'False', 'through': u"orm['sheets.TemplateNodeRelation']", 'to': u"orm['sheets.Template']"}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sheets.templatenoderelation': {
'Meta': {'ordering': "['template__name', 'node__name']", 'unique_together': "(('node', 'template'),)", 'object_name': 'TemplateNodeRelation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sheets.TemplateNode']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sheets.Template']"})
},
u'sources.auxsource': {
'Meta': {'ordering': "['last_modified', 'name']", 'object_name': 'AuxSource'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auxsources'", 'to': u"orm['accounts.Account']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_ar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'notes_ar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_he': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'retrieval_date': ('django.db.models.fields.DateField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'sources.referencesource': {
'Meta': {'ordering': "['last_modified', 'name']", 'object_name': 'ReferenceSource'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referencesources'", 'to': u"orm['accounts.Account']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_ar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_he': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'notes_ar': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_he': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'retrieval_date': ('django.db.models.fields.DateField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['sheets']
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import hashlib
from frappe.model.db_schema import DbManager
from frappe.installer import get_root_connection
from frappe.database import Database
import os
from markdown2 import markdown
from bs4 import BeautifulSoup
import jinja2.exceptions
def sync():
# make table
print 'Syncing help database...'
help_db = HelpDatabase()
help_db.make_database()
help_db.connect()
help_db.make_table()
help_db.sync_pages()
help_db.build_index()
@frappe.whitelist()
def get_help(text):
return HelpDatabase().search(text)
@frappe.whitelist()
def get_help_content(path):
return HelpDatabase().get_content(path)
class HelpDatabase(object):
def __init__(self):
self.global_help_setup = frappe.conf.get('global_help_setup')
if self.global_help_setup:
bench_name = os.path.basename(os.path.abspath(frappe.get_app_path('frappe')).split('/apps/')[0])
self.help_db_name = hashlib.sha224(bench_name).hexdigest()[:15]
def make_database(self):
'''make database for global help setup'''
if not self.global_help_setup:
return
dbman = DbManager(get_root_connection())
dbman.drop_database(self.help_db_name)
# make database
if not self.help_db_name in dbman.get_database_list():
try:
dbman.create_user(self.help_db_name, self.help_db_name)
except Exception, e:
# user already exists
if e.args[0] != 1396: raise
dbman.create_database(self.help_db_name)
dbman.grant_all_privileges(self.help_db_name, self.help_db_name)
dbman.flush_privileges()
def connect(self):
if self.global_help_setup:
self.db = Database(user=self.help_db_name, password=self.help_db_name)
else:
self.db = frappe.db
def make_table(self):
if not 'help' in self.db.get_tables():
self.db.sql('''create table help(
path varchar(255),
content text,
title text,
intro text,
full_path text,
fulltext(title),
fulltext(content),
index (path))
COLLATE=utf8mb4_unicode_ci
ENGINE=MyISAM
CHARACTER SET=utf8mb4''')
def search(self, words):
self.connect()
return self.db.sql('''
select title, intro, path from help where title like '%{term}%' union
select title, intro, path from help where match(content) against ('{term}') limit 10'''.format(term=words))
def get_content(self, path):
self.connect()
query = '''select title, content from help
where path like "{path}%" order by path desc limit 1'''
result = None
if not path.endswith('index'):
result = self.db.sql(query.format(path=os.path.join(path, 'index')))
if not result:
result = self.db.sql(query.format(path=path))
return {'title':result[0][0], 'content':result[0][1]} if result else {}
def sync_pages(self):
self.db.sql('truncate help')
doc_contents = '<ol>'
for app in os.listdir('../apps'):
docs_folder = '../apps/{app}/{app}/docs/user'.format(app=app)
self.out_base_path = '../apps/{app}/{app}/docs'.format(app=app)
if os.path.exists(docs_folder):
app_name = getattr(frappe.get_module(app), '__title__', None) or app.title()
doc_contents += '<li><a data-path="/{app}/index">{app_name}</a></li>'.format(
app=app, app_name=app_name)
for basepath, folders, files in os.walk(docs_folder):
files = self.reorder_files(files)
for fname in files:
if fname.rsplit('.', 1)[-1] in ('md', 'html'):
fpath = os.path.join(basepath, fname)
with open(fpath, 'r') as f:
try:
content = frappe.render_template(unicode(f.read(), 'utf-8'),
{'docs_base_url': '/assets/{app}_docs'.format(app=app)})
relpath = self.get_out_path(fpath)
relpath = relpath.replace("user", app)
content = markdown(content)
title = self.make_title(basepath, fname, content)
intro = self.make_intro(content)
content = self.make_content(content, fpath, relpath)
self.db.sql('''insert into help(path, content, title, intro, full_path)
values (%s, %s, %s, %s, %s)''', (relpath, content, title, intro, fpath))
except jinja2.exceptions.TemplateSyntaxError:
print "Invalid Jinja Template for {0}. Skipping".format(fpath)
doc_contents += "</ol>"
self.db.sql('''insert into help(path, content, title, intro, full_path) values (%s, %s, %s, %s, %s)''',
('/documentation/index', doc_contents, 'Documentation', '', ''))
def make_title(self, basepath, filename, html):
if '<h1>' in html:
title = html.split("<h1>", 1)[1].split("</h1>", 1)[0]
elif 'index' in filename:
title = basepath.rsplit('/', 1)[-1].title().replace("-", " ")
else:
title = filename.rsplit('.', 1)[0].title().replace("-", " ")
return title
def make_intro(self, html):
intro = ""
if '<p>' in html:
intro = html.split('<p>', 1)[1].split('</p>', 1)[0]
if 'Duration' in html:
intro = "Help Video: " + intro
return intro
def make_content(self, html, path, relpath):
if '<h1>' in html:
html = html.split('</h1>', 1)[1]
if '{next}' in html:
html = html.replace('{next}', '')
target = path.split('/', 3)[-1]
app_name = path.split('/', 3)[2]
html += '''
<div class="page-container">
<div class="page-content">
<div class="edit-container text-center">
<i class="fa fa-smile text-muted"></i>
<a class="edit text-muted" href="https://github.com/frappe/{app_name}/blob/develop/{target}">
Improve this page
</a>
</div>
</div>
</div>'''.format(app_name=app_name, target=target)
soup = BeautifulSoup(html, 'html.parser')
for link in soup.find_all('a'):
if link.has_attr('href'):
url = link['href']
if '/user' in url:
data_path = url[url.index('/user'):]
if '.' in data_path:
data_path = data_path[: data_path.rindex('.')]
if data_path:
link['data-path'] = data_path.replace("user", app_name)
parent = self.get_parent(relpath)
if parent:
parent_tag = soup.new_tag('a')
parent_tag.string = parent['title']
parent_tag['class'] = 'parent-link'
parent_tag['data-path'] = parent['path']
soup.find().insert_before(parent_tag)
return soup.prettify()
def build_index(self):
for data in self.db.sql('select path, full_path, content from help'):
self.make_index(data[0], data[1], data[2])
def make_index(self, original_path, full_path, content):
'''Make index from index.txt'''
if '{index}' in content:
path = os.path.dirname(full_path)
files = []
# get files from index.txt
index_path = os.path.join(path, "index.txt")
if os.path.exists(index_path):
with open(index_path, 'r') as f:
files = f.read().splitlines()
# files not in index.txt
for f in os.listdir(path):
if not os.path.isdir(os.path.join(path, f)):
name, extn = f.rsplit('.', 1)
if name not in files \
and name != 'index' and extn in ('md', 'html'):
files.append(name)
links_html = "<ol class='index-links'>"
for line in files:
fpath = os.path.join(os.path.dirname(original_path), line)
title = self.db.sql('select title from help where path like %s',
os.path.join(fpath, 'index') + '%')
if not title:
title = self.db.sql('select title from help where path like %s',
fpath + '%')
if title:
title = title[0][0]
links_html += "<li><a data-path='{fpath}'> {title} </a></li>".format(
fpath=fpath, title=title)
# else:
# bad entries in .txt files
# print fpath
links_html += "</ol>"
html = content.replace('{index}', links_html)
self.db.sql('update help set content=%s where path=%s', (html, original_path))
def get_out_path(self, path):
return '/' + os.path.relpath(path, self.out_base_path)
def get_parent(self, child_path):
if 'index' in child_path:
child_path = child_path[: child_path.rindex('index')]
if child_path[-1] == '/':
child_path = child_path[:-1]
child_path = child_path[: child_path.rindex('/')]
out = None
if child_path:
parent_path = child_path + "/index"
out = self.get_content(parent_path)
#if parent is documentation root
else:
parent_path = "/documentation/index"
out = {}
out['title'] = "Documentation"
if not out:
return None
out['path'] = parent_path
return out
def reorder_files(self, files):
pos = 0
if 'index.md' in files:
pos = files.index('index.md')
elif 'index.html' in files:
pos = files.index('index.html')
if pos:
files[0], files[pos] = files[pos], files[0]
return files
| |
import mock
import pytest
from addons.wiki.models import WikiVersion
from django.core.exceptions import ValidationError
from django.utils import timezone
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from nose.tools import assert_raises
from osf.models import Node, Registration, Sanction, RegistrationSchema, NodeLog
from addons.wiki.models import WikiPage
from osf.utils.permissions import ADMIN
from osf.registrations.utils import get_registration_provider_submissions_url
from website import settings
from . import factories
from .utils import assert_datetime_equal, mock_archive
from osf_tests.factories import get_default_metaschema, DraftRegistrationFactory
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from api.providers.workflows import Workflows
from osf.migrations import update_provider_auth_groups
from osf.models.action import RegistrationAction
from osf_tests.management_commands.test_migration_registration_responses import (
prereg_registration_responses,
prereg_registration_metadata_built,
veer_registration_responses,
veer_condensed
)
from osf.utils.workflows import (
RegistrationModerationStates,
RegistrationModerationTriggers,
ApprovalStates
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return factories.UserFactory()
@pytest.fixture()
def project(user, auth, fake):
ret = factories.ProjectFactory(creator=user)
ret.add_tag(fake.word(), auth=auth)
return ret
@pytest.fixture()
def auth(user):
return Auth(user)
# copied from tests/test_models.py
def test_factory(user, project):
# Create a registration with kwargs
registration1 = factories.RegistrationFactory(
title='t1', description='d1', creator=user,
)
assert registration1.title == 't1'
assert registration1.description == 'd1'
assert registration1.contributors.count() == 1
assert user in registration1.contributors.all()
assert registration1.registered_user == user
assert registration1.private_links.count() == 0
# Create a registration from a project
user2 = factories.UserFactory()
project.add_contributor(user2)
data = {'some': 'data'}
draft_reg = DraftRegistrationFactory(registration_metadata=data, branched_from=project)
registration2 = factories.RegistrationFactory(
project=project,
user=user2,
draft_registration=draft_reg,
)
assert registration2.registered_from == project
assert registration2.registered_user == user2
class TestRegistration:
def test_registered_schema_id(self):
reg = factories.RegistrationFactory()
assert reg.registered_schema_id == reg.registered_schema.get()._id
# Regression test for https://openscience.atlassian.net/browse/PLAT-776
# Some very old registrations on prod don't have a schema
def test_registered_schema_id_with_no_schema(self):
reg = factories.RegistrationFactory()
reg.registered_schema.clear()
assert reg.registered_schema_id is None
def test_update_category(self, auth):
reg = factories.RegistrationFactory(category='instrumentation')
new_category = 'software'
reg.update({'category': new_category}, auth=auth)
assert reg.category == new_category
last_log = reg.logs.latest()
assert last_log.action == NodeLog.CATEGORY_UPDATED
assert last_log.params['category_new'] == new_category
assert last_log.params['category_original'] == 'instrumentation'
def test_update_article_doi(self, auth):
reg = factories.RegistrationFactory()
reg.article_doi = '10.1234/giraffe'
reg.save()
new_article_doi = '10.12345/elephant'
reg.update({'article_doi': new_article_doi}, auth=auth)
assert reg.article_doi == new_article_doi
last_log = reg.logs.latest()
assert last_log.action == NodeLog.ARTICLE_DOI_UPDATED
assert last_log.params['article_doi_new'] == new_article_doi
assert last_log.params['article_doi_original'] == '10.1234/giraffe'
# copied from tests/test_models.py
class TestRegisterNode:
@pytest.fixture()
def registration(self, project):
reg = factories.RegistrationFactory(project=project)
private_link = factories.PrivateLinkFactory()
private_link.nodes.add(reg)
private_link.save()
return reg
def test_does_not_have_addon_added_log(self, registration):
# should not have addon_added log from wiki addon being added
assert NodeLog.ADDON_ADDED not in list(registration.logs.values_list('action', flat=True))
def test_title(self, registration, project):
assert registration.title == project.title
def test_description(self, registration, project):
assert registration.description == project.description
def test_category(self, registration, project):
assert registration.category == project.category
def test_permissions(self, registration, project):
assert registration.is_public is False
project.set_privacy(Node.PUBLIC)
registration = factories.RegistrationFactory(project=project)
assert registration.is_public is False
def test_contributors(self, registration, project):
assert registration.contributors.count() == project.contributors.count()
assert (
set(registration.contributors.values_list('id', flat=True)) ==
set(project.contributors.values_list('id', flat=True))
)
def test_forked_from(self, registration, project, auth):
# A a node that is not a fork
assert registration.forked_from is None
# A node that is a fork
fork = project.fork_node(auth)
registration = factories.RegistrationFactory(project=fork)
assert registration.forked_from == project
def test_private_links(self, registration, project):
assert registration.private_links != project.private_links
def test_creator(self, registration, project, user):
user2 = factories.UserFactory()
project.add_contributor(user2)
registration = factories.RegistrationFactory(project=project)
assert registration.creator == user
def test_logs(self, registration, project):
# Registered node has all logs except for registration approval initiated
assert project.logs.count() - 1 == registration.logs.count()
assert project.logs.first().action == 'registration_initiated'
project_second_log = project.logs.all()[:2][1]
assert registration.logs.first().action == project_second_log.action
def test_tags(self, registration, project):
assert (
set(registration.tags.values_list('name', flat=True)) ==
set(project.tags.values_list('name', flat=True))
)
def test_nodes(self, project, user):
# Create some nodes
# component of project
factories.NodeFactory(
creator=user,
parent=project,
title='Title1',
)
subproject = factories.ProjectFactory(
creator=user,
parent=project,
title='Title2',
)
# component of subproject
factories.NodeFactory(
creator=user,
parent=subproject,
title='Title3',
)
# Make a registration
registration = factories.RegistrationFactory(project=project)
# Reload the registration; else test won't catch failures to save
registration.refresh_from_db()
# Registration has the nodes
assert registration._nodes.count() == 2
assert(
set(registration._nodes.values_list('title', flat=True)) ==
set(project._nodes.values_list('title', flat=True))
)
# Nodes are copies and not the original versions
for node in registration._nodes.all():
assert node not in project._nodes.all()
assert node.is_registration
def test_linked_nodes(self, project, user, auth):
linked_node = factories.ProjectFactory()
project.add_node_link(linked_node, auth=auth, save=True)
registration = factories.RegistrationFactory(project=project)
registration.refresh_from_db()
assert project.linked_nodes.count() == registration.linked_nodes.count()
assert project.linked_nodes.first().title == registration.linked_nodes.first().title
def test_private_contributor_registration(self, project, user):
# Create some nodes
# component
comp1 = factories.NodeFactory( # noqa
title='Comp1',
creator=user,
parent=project,
)
# subproject
comp2 = factories.ProjectFactory( # noqa
title='Comp1',
creator=user,
parent=project,
)
# Create some nodes to share
shared_component = factories.NodeFactory(
title='Shared Component',
creator=user,
parent=project,
)
shared_subproject = factories.ProjectFactory(
title='Shared Subproject',
creator=user,
parent=project,
)
# Share the project and some nodes
user2 = factories.UserFactory()
project.add_contributor(user2, permissions=ADMIN)
shared_component.add_contributor(user2, permissions=ADMIN)
shared_subproject.add_contributor(user2, permissions=ADMIN)
# Partial contributor registers the node
registration = factories.RegistrationFactory(project=project, user=user2)
# The correct subprojects were registered
for registered_node in registration._nodes.all():
assert registered_node.root == registration
assert registered_node.registered_from
assert registered_node.parent_node == registration
assert registered_node.registered_from.parent_node == project
def test_is_registration(self, registration):
assert registration.is_registration
def test_registered_date(self, registration):
# allowance increased in OSF-9050, if this fails sporadically again then registrations may need to be optimized or this test reworked
assert_datetime_equal(registration.registered_date, timezone.now(), allowance=10000)
def test_registered_addons(self, registration):
assert (
[addon.config.short_name for addon in registration.get_addons()] ==
[addon.config.short_name for addon in registration.registered_from.get_addons()]
)
def test_registered_user(self, project):
# Add a second contributor
user2 = factories.UserFactory()
project.add_contributor(user2, permissions=ADMIN)
# Second contributor registers project
registration = factories.RegistrationFactory(project=project, user=user2)
assert registration.registered_user == user2
def test_registered_from(self, registration, project):
assert registration.registered_from == project
def test_registered_get_absolute_url(self, registration):
assert (
registration.get_absolute_url() ==
'{}v2/registrations/{}/'.format(settings.API_DOMAIN, registration._id)
)
def test_registration_list(self, registration, project):
assert registration._id in [n._id for n in project.registrations_all]
def test_registration_gets_institution_affiliation(self, user):
node = factories.NodeFactory()
institution = factories.InstitutionFactory()
user.affiliated_institutions.add(institution)
user.save()
node.add_affiliated_institution(institution, user=user)
node.save()
registration = factories.RegistrationFactory(project=node)
assert (
set(registration.affiliated_institutions.values_list('id', flat=True)) ==
set(node.affiliated_institutions.values_list('id', flat=True))
)
def test_registration_of_project_with_no_wiki_pages(self, registration):
assert WikiPage.objects.get_wiki_pages_latest(registration).exists() is False
assert registration.wikis.all().exists() is False
assert registration.wiki_private_uuids == {}
@mock.patch('website.project.signals.after_create_registration')
def test_registration_clones_project_wiki_pages(self, mock_signal, project, user):
project = factories.ProjectFactory(creator=user, is_public=True)
wiki_page = WikiFactory(
user=user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
)
current_wiki = WikiVersionFactory(
wiki_page=wiki_page,
identifier=2
)
draft_reg = factories.DraftRegistrationFactory(branched_from=project)
registration = project.register_node(get_default_metaschema(), Auth(user), draft_reg, None)
assert registration.wiki_private_uuids == {}
registration_wiki_current = WikiVersion.objects.get_for_node(registration, current_wiki.wiki_page.page_name)
assert registration_wiki_current.wiki_page.node == registration
assert registration_wiki_current._id != current_wiki._id
assert registration_wiki_current.identifier == 2
registration_wiki_version = WikiVersion.objects.get_for_node(registration, wiki.wiki_page.page_name, version=1)
assert registration_wiki_version.wiki_page.node == registration
assert registration_wiki_version._id != wiki._id
assert registration_wiki_version.identifier == 1
def test_legacy_private_registrations_can_be_made_public(self, registration, auth):
registration.is_public = False
registration.set_privacy(Node.PUBLIC, auth=auth)
assert registration.is_public
class TestRegisterNodeContributors:
@pytest.fixture()
def project_two(self, user, auth):
return factories.ProjectFactory(creator=user)
@pytest.fixture()
def component(self, user, auth, project_two):
return factories.NodeFactory(
creator=user,
parent=project_two,
)
@pytest.fixture()
def contributor_unregistered(self, user, auth, project_two):
ret = project_two.add_unregistered_contributor(fullname='Johnny Git Gud', email='ford.prefect@hitchhikers.com', auth=auth)
project_two.save()
return ret
@pytest.fixture()
def contributor_unregistered_no_email(self, user, auth, project_two, component):
ret = component.add_unregistered_contributor(fullname='Johnny B. Bard', email='', auth=auth)
component.save()
return ret
@pytest.fixture()
def registration(self, project_two, component, contributor_unregistered, contributor_unregistered_no_email):
with mock_archive(project_two, autoapprove=True) as registration:
return registration
def test_unregistered_contributors_unclaimed_records_get_copied(self, user, project, component, registration, contributor_unregistered, contributor_unregistered_no_email):
contributor_unregistered.refresh_from_db()
contributor_unregistered_no_email.refresh_from_db()
assert registration.contributors.filter(id=contributor_unregistered.id).exists()
assert registration._id in contributor_unregistered.unclaimed_records
# component
component_registration = registration.nodes[0]
assert component_registration.contributors.filter(id=contributor_unregistered_no_email.id).exists()
assert component_registration._id in contributor_unregistered_no_email.unclaimed_records
# copied from tests/test_registrations
class TestNodeApprovalStates:
def test_sanction_none(self):
node = factories.NodeFactory()
assert bool(node.sanction) is False
def test_sanction_embargo_termination_first(self):
embargo_termination_approval = factories.EmbargoTerminationApprovalFactory()
registration = Registration.objects.get(embargo_termination_approval=embargo_termination_approval)
assert registration.sanction == embargo_termination_approval
def test_sanction_retraction(self):
retraction = factories.RetractionFactory()
registration = Registration.objects.get(retraction=retraction)
assert registration.sanction == retraction
def test_sanction_embargo(self):
embargo = factories.EmbargoFactory()
registration = Registration.objects.get(embargo=embargo)
assert registration.sanction == embargo
def test_sanction_registration_approval(self):
registration_approval = factories.RegistrationApprovalFactory()
registration = Registration.objects.get(registration_approval=registration_approval)
assert registration.sanction == registration_approval
def test_sanction_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node) as registration:
approval = registration.registration_approval
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.sanction == approval
def test_is_pending_registration(self):
registration_approval = factories.RegistrationApprovalFactory()
registration = Registration.objects.get(registration_approval=registration_approval)
assert registration_approval.is_pending_approval
assert registration.is_pending_registration
def test_is_pending_registration_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_pending_registration
def test_is_registration_approved(self):
registration_approval = factories.RegistrationApprovalFactory(state=Sanction.APPROVED, approve=True)
registration = Registration.objects.get(registration_approval=registration_approval)
assert registration.is_registration_approved
def test_is_registration_approved_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node) as registration:
registration.registration_approval.state = Sanction.APPROVED
registration.registration_approval.save()
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_registration_approved is True
def test_is_retracted(self):
retraction = factories.RetractionFactory(state=Sanction.APPROVED, approve=True)
registration = Registration.objects.get(retraction=retraction)
assert registration.is_retracted
@mock.patch('osf.models.node.AbstractNode.update_search')
def test_is_retracted_searches_parents(self, mock_update_search):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node, autoapprove=True, retraction=True, autoapprove_retraction=True) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_retracted is True
def test_is_pending_retraction(self):
retraction = factories.RetractionFactory()
registration = Registration.objects.get(retraction=retraction)
assert retraction.is_pending_approval is True
assert registration.is_pending_retraction is True
@mock.patch('osf.models.node.AbstractNode.update_search')
def test_is_pending_retraction_searches_parents(self, mock_update_search):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node, autoapprove=True, retraction=True) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_pending_retraction is True
def test_embargo_end_date(self):
embargo = factories.EmbargoFactory()
registration = Registration.objects.get(embargo=embargo)
assert registration.embargo_end_date == embargo.embargo_end_date
def test_embargo_end_date_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node, embargo=True) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.embargo_end_date == registration.embargo_end_date
def test_is_pending_embargo(self):
embargo = factories.EmbargoFactory()
registration = Registration.objects.get(embargo=embargo)
assert embargo.is_pending_approval
assert registration.is_pending_embargo
def test_is_pending_embargo_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node, embargo=True) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_pending_embargo
def test_is_embargoed(self):
embargo = factories.EmbargoFactory()
registration = Registration.objects.get(embargo=embargo)
registration.embargo.state = Sanction.APPROVED
registration.embargo.save()
assert registration.is_embargoed
def test_is_embargoed_searches_parents(self):
user = factories.UserFactory()
node = factories.ProjectFactory(creator=user)
child = factories.NodeFactory(creator=user, parent=node)
factories.NodeFactory(creator=user, parent=child)
with mock_archive(node, embargo=True, autoapprove=True) as registration:
sub_reg = registration._nodes.first()._nodes.first()
assert sub_reg.is_embargoed
@pytest.mark.enable_implicit_clean
class TestDOIValidation:
def test_validate_bad_doi(self):
reg = factories.RegistrationFactory()
with pytest.raises(ValidationError):
reg.article_doi = 'nope'
reg.save()
with pytest.raises(ValidationError):
reg.article_doi = 'https://dx.doi.org/10.123.456'
reg.save() # should save the bare DOI, not a URL
with pytest.raises(ValidationError):
reg.article_doi = 'doi:10.10.1038/nwooo1170'
reg.save() # should save without doi: prefix
def test_validate_good_doi(self):
reg = factories.RegistrationFactory()
doi = '10.11038/nwooo1170'
reg.article_doi = doi
reg.save()
assert reg.article_doi == doi
class TestRegistrationMixin:
@pytest.fixture()
def draft_prereg(self, prereg_schema):
return factories.DraftRegistrationFactory(
registration_schema=prereg_schema,
registration_metadata={},
)
@pytest.fixture()
def draft_veer(self, veer_schema):
return factories.DraftRegistrationFactory(
registration_schema=veer_schema,
registration_metadata={},
)
@pytest.fixture()
def prereg_schema(self):
return RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=2
)
@pytest.fixture()
def veer_schema(self):
return RegistrationSchema.objects.get(
name__icontains='Pre-Registration in Social Psychology',
schema_version=2
)
def test_expand_registration_responses(self, draft_prereg):
draft_prereg.registration_responses = prereg_registration_responses
draft_prereg.save()
assert draft_prereg.registration_metadata == {}
registration_metadata = draft_prereg.expand_registration_responses()
assert registration_metadata == prereg_registration_metadata_built
def test_expand_registration_responses_veer(self, draft_veer):
draft_veer.registration_responses = veer_registration_responses
draft_veer.save()
assert draft_veer.registration_metadata == {}
registration_metadata = draft_veer.expand_registration_responses()
assert registration_metadata == veer_condensed
class TestRegistationModerationStates:
@pytest.fixture
def embargo(self):
return factories.EmbargoFactory()
@pytest.fixture
def registration_approval(self):
return factories.RegistrationApprovalFactory()
@pytest.fixture
def retraction(self):
return factories.RetractionFactory()
@pytest.fixture
def embargo_termination(self):
return factories.EmbargoTerminationApprovalFactory()
@pytest.fixture
def moderator(self):
return factories.AuthUserFactory()
@pytest.fixture
def provider(self, moderator):
provider = factories.RegistrationProviderFactory()
update_provider_auth_groups()
provider.get_group('moderator').user_set.add(moderator)
provider.reviews_workflow = Workflows.PRE_MODERATION.value
provider.save()
return provider
@pytest.fixture
def moderated_registration(self, provider):
return factories.RegistrationFactory(provider=provider, is_public=True)
@pytest.fixture
def withdraw_action(self, moderated_registration):
action = RegistrationAction.objects.create(
creator=moderated_registration.creator,
target=moderated_registration,
trigger=RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.ACCEPTED.db_name,
to_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
comment='yo'
)
action.save()
return action
@pytest.fixture
def withdraw_action_for_retraction(self, retraction):
action = RegistrationAction.objects.create(
creator=retraction.target_registration.creator,
target=retraction.target_registration,
trigger=RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.ACCEPTED.db_name,
to_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
comment='yo'
)
action.save()
return action
def test_embargo_states(self, embargo):
registration = embargo.target_registration
embargo.to_UNAPPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
embargo.to_PENDING_MODERATION()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
embargo.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
embargo.to_COMPLETED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
embargo.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
embargo.to_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REVERTED.db_name
def test_registration_approval_states(self, registration_approval):
registration = registration_approval.target_registration
registration_approval.to_UNAPPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
registration_approval.to_PENDING_MODERATION()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
registration_approval.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
registration_approval.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
registration_approval.to_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REVERTED.db_name
def test_retraction_states_over_registration_approval(self, registration_approval, withdraw_action):
registration = registration_approval.target_registration
registration.is_public = True
retraction = registration.retract_registration(registration.creator, justification='test')
registration_approval.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW_REQUEST.db_name
retraction.to_PENDING_MODERATION()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
retraction.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
retraction.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
retraction.to_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
def test_retraction_states_over_embargo(self, embargo):
registration = embargo.target_registration
retraction = registration.retract_registration(user=registration.creator, justification='test')
embargo.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW_REQUEST.db_name
retraction.to_PENDING_MODERATION()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
retraction.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
retraction.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
retraction.to_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
embargo.to_COMPLETED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
retraction.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
def test_embargo_termination_states(self, embargo_termination):
registration = embargo_termination.target_registration
assert registration.moderation_state == RegistrationModerationStates.PENDING_EMBARGO_TERMINATION.db_name
embargo_termination.to_REJECTED()
registration.update_moderation_state()
assert registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
embargo_termination.to_APPROVED()
registration.update_moderation_state()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
def test_retraction_states_over_embargo_termination(self, embargo_termination):
registration = embargo_termination.target_registration
embargo_termination.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
retraction = registration.retract_registration(user=registration.creator, justification='because')
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW_REQUEST.db_name
retraction.to_PENDING_MODERATION()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
retraction.to_APPROVED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
retraction.to_MODERATOR_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
retraction.to_REJECTED()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
class TestForcedWithdrawal:
@pytest.fixture
def embargo_termination(self):
return factories.EmbargoTerminationApprovalFactory()
@pytest.fixture
def moderator(self):
return factories.AuthUserFactory()
@pytest.fixture
def provider(self, moderator):
provider = factories.RegistrationProviderFactory()
update_provider_auth_groups()
provider.get_group('moderator').user_set.add(moderator)
provider.reviews_workflow = Workflows.PRE_MODERATION.value
provider.save()
return provider
@pytest.fixture
def moderated_registration(self, provider):
registration = factories.RegistrationFactory(provider=provider, is_public=True)
# Move to implicit ACCEPTED state
registration.update_moderation_state()
return registration
@pytest.fixture
def unmoderated_registration(self):
registration = factories.RegistrationFactory(is_public=True)
# Move to implicit ACCEPTED state
registration.update_moderation_state()
return registration
def test_force_retraction_changes_state(self, moderated_registration, moderator):
moderated_registration.retract_registration(
user=moderator, justification='because', moderator_initiated=True)
moderated_registration.refresh_from_db()
assert moderated_registration.is_retracted
assert moderated_registration.retraction.approval_stage is ApprovalStates.APPROVED
assert moderated_registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
def test_force_retraction_writes_action(self, moderated_registration, moderator):
justification = 'because power'
moderated_registration.retract_registration(
user=moderator, justification=justification, moderator_initiated=True)
expected_justification = 'Force withdrawn by moderator: ' + justification
assert moderated_registration.retraction.justification == expected_justification
action = RegistrationAction.objects.last()
assert action.trigger == RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
assert action.comment == expected_justification
assert action.from_state == RegistrationModerationStates.ACCEPTED.db_name
assert action.to_state == RegistrationModerationStates.WITHDRAWN.db_name
def test_cannot_force_retraction_on_unmoderated_registration(self):
unmoderated_registration = factories.RegistrationFactory(is_public=True)
with assert_raises(ValueError):
unmoderated_registration.retract_registration(
user=unmoderated_registration.creator, justification='', moderator_initiated=True)
def test_nonmoderator_cannot_force_retraction(self, moderated_registration):
with assert_raises(PermissionsError):
moderated_registration.retract_registration(
user=moderated_registration.creator, justification='', moderator_initiated=True)
assert moderated_registration.retraction is None
assert moderated_registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
class TestUtils:
@pytest.fixture
def provider_valid(self):
provider = factories.RegistrationProviderFactory()
return provider
@pytest.fixture
def provider_invalid(self):
provider = factories.PreprintProviderFactory()
return provider
def test_submissions_url_with_valid_provider(self, provider_valid):
submissions_url = get_registration_provider_submissions_url(provider_valid)
assert submissions_url == f'{settings.DOMAIN}registries/{provider_valid._id}/moderation/pending'
def test_submissions_url_with_invalid_provider(self, provider_invalid):
with pytest.raises(AssertionError):
get_registration_provider_submissions_url(provider_invalid)
| |
from CommonServerPython import *
from typing import List, Dict, Tuple, Union
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_CONTEXT_NAME = 'MSGraphCalendar'
DEFAULT_PAGE_SIZE = 100
NO_OUTPUTS: dict = {}
APP_NAME = 'ms-graph-calendar'
EVENT_HEADERS = ['Subject', 'Organizer', 'Attendees', 'Start', 'End', 'ID']
CALENDAR_HEADERS = ['Name', 'Owner Name', 'Owner Address', 'ID']
def camel_case_to_readable(cc: Union[str, Dict], fields_to_drop: List[str] = None) -> Union[str, Dict]:
"""
'camelCase' -> 'Camel Case' (text or dictionary keys)
Args:
cc: either a dictionary or a text to transform
fields_to_drop: keys to drop from input dictionary
Returns:
A Camel Cased string of Dict.
"""
if fields_to_drop is None:
fields_to_drop = []
if isinstance(cc, str):
if cc == 'id':
return 'ID'
return ''.join(' ' + char if char.isupper() else char.strip() for char in cc).strip().title()
elif isinstance(cc, Dict):
return {camel_case_to_readable(field): value for field, value in cc.items() if field not in fields_to_drop}
return cc
def snakecase_to_camelcase(sc: Union[str, Dict], fields_to_drop: List[str] = None) -> Union[str, Dict]:
"""
'snake_case' -> 'snakeCase' (text or dictionary keys)
Args:
sc: either a dictionary or a text to transform
fields_to_drop: keys to drop from input dictionary
Returns:
A connectedCamelCased string of Dict.
"""
if fields_to_drop is None:
fields_to_drop = []
if isinstance(sc, str):
return ''.join([word.title() for word in sc.split('_')])
elif isinstance(sc, Dict):
return {snakecase_to_camelcase(field): value for field, value in sc.items() if field not in fields_to_drop}
return sc
def parse_events(raw_events: Union[Dict, List[Dict]]) -> Tuple[List[Dict], List[Dict]]:
"""
Parse Calendar Events json data coming from Microsoft Graph into Demisto readable format
:param raw_events: raw events data
"""
# Fields to filter, dropping to not bloat the incident context.
fields_to_drop = ['@odata.etag', 'color']
if not isinstance(raw_events, list):
raw_events = [raw_events]
readable_events, context_output = [], []
for event in raw_events:
event_readable: Dict = camel_case_to_readable(event, fields_to_drop) # type: ignore
if '@removed' in event:
event_readable['Status'] = 'deleted'
event_context = {field.replace(' ', ''): value for field, value in event_readable.items()}
event_readable = {
'Subject': event_readable.get('Subject'),
'ID': event_readable.get('ID'),
'Organizer': demisto.get(event_readable, 'Organizer.emailAddress.name'),
'Attendees': [att.get('emailAddress', {}).get('name') for att in event_readable.get('Attendees', [])],
'Start': event_readable.get('Start', {}).get('dateTime'),
'End': event_readable.get('End', {}).get('dateTime')
}
readable_events.append(event_readable)
context_output.append(event_context)
return readable_events, context_output
def parse_calendar(raw_calendars: Union[Dict, List[Dict]]) -> Tuple[List[Dict], List[Dict]]:
"""
Parse Calendar json data coming from Microsoft Graph into Demisto readable format
:param raw_calendars: raw calendars data
"""
if not isinstance(raw_calendars, list):
raw_calendars = [raw_calendars]
readable_calendars, context_output = [], []
for raw_calendar in raw_calendars:
readable_calendar: Dict = camel_case_to_readable(raw_calendar, ['@odata.context', 'color']) # type: ignore
if '@removed' in readable_calendar:
readable_calendar['Status'] = 'deleted'
context_calendar = {field.replace(' ', ''): value for field, value in readable_calendar.items()}
readable_calendar = {
'Name': readable_calendar.get('Name'),
'Owner Name': readable_calendar.get('Owner', {}).get('name'),
'Owner Address': readable_calendar.get('Owner', {}).get('address'),
'ID': readable_calendar.get('ID')
}
context_output.append(context_calendar)
readable_calendars.append(readable_calendar)
return readable_calendars, context_output
def process_event_params(body: str = '', start: str = '', end: str = '', time_zone: str = '',
attendees: str = '', location: str = '', **other_params) -> Dict:
# some parameters don't need any processing
event_params: Dict[str, Union[str, Dict, List[Dict]]] = other_params
event_params['body'] = {"content": body}
event_params['location'] = {"displayName": location}
if start:
event_params['start'] = {"dateTime": start, "timeZone": time_zone}
if end:
event_params['end'] = {"dateTime": end, "timeZone": time_zone}
event_params['attendees'] = [{'emailAddress': {'address': attendee}} for attendee in attendees.split(',')]
return event_params
class MsGraphClient:
def __init__(self, tenant_id, auth_id, enc_key, app_name, base_url, verify,
proxy, default_user, self_deployed):
self.ms_client = MicrosoftClient(tenant_id=tenant_id, auth_id=auth_id,
enc_key=enc_key, app_name=app_name, base_url=base_url, verify=verify,
proxy=proxy, self_deployed=self_deployed)
self.default_user = default_user
def test_function(self):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
self.ms_client.http_request(method='GET', url_suffix='users/')
return 'ok', NO_OUTPUTS, NO_OUTPUTS
def get_calendar(self, user: str, calendar_id: str = None) -> Dict:
"""Returns a single calendar by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
"""
if not user and not self.default_user:
return_error('No user was provided. Please make sure to enter the use either in the instance setting,'
' or in the command parameter.')
calendar_raw = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendar' + f's/{calendar_id}' if calendar_id else '')
return calendar_raw
def list_calendars(self, user: str, order_by: str = None, next_link: str = None, top: int = DEFAULT_PAGE_SIZE,
filter_by: str = None) -> Dict:
"""
Lists all calendars by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument order_by: specify the sort order of the items returned from Microsoft Graph
:argument next_link: link for the next page of results, if exists. See Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/event-list?view=graph-rest-1.0
:argument top: specify the page size of the result set.
filter_by: filters results.
"""
params = {'$orderby': order_by} if order_by else {}
if next_link: # pagination
calendars = self.ms_client.http_request(method='GET', full_url=next_link)
elif filter_by:
calendars = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendars?$filter={filter_by}&$top={top}',
params=params
)
else:
calendars = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendars?$top={top}',
params=params
)
return calendars
def list_events(self, user: str, calendar_id: str = '', order_by: str = None, next_link: str = None,
top: int = DEFAULT_PAGE_SIZE, filter_by: str = None) -> Dict:
"""
Returns all events by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
:argument order_by: specify the sort order of the items returned from Microsoft Graph
:argument next_link: the link for the next page of results. see Microsoft documentation for more details.
:argument top: specify the page size of the result set.
:argument filter_by: filters results.
"""
calendar_url = f'{user}/calendars/{calendar_id}' if calendar_id else user
params = {'$orderby': order_by} if order_by else {}
if next_link: # pagination
events = self.ms_client.http_request(method='GET', full_url=next_link)
elif filter_by:
events = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{calendar_url}/events?$filter={filter_by}&$top={top}', params=params)
else:
events = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{calendar_url}/events?$top={top}',
params=params)
return events
def get_event(self, user: str, event_id: str) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument event_id: the event id
"""
event = self.ms_client.http_request(method='GET', url_suffix=f'users/{user}/calendar/events/{event_id}')
return event
def create_event(self, user: str, calendar_id: str = '', **kwargs) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
Event Properties:
:keyword attendees: The collection of attendees for the event.
:keyword body: The body of the message associated with the event. It can be in HTML or text format.
:keyword subject: The text of the event's subject line.
:keyword location: The location of the event. an event as an online meeting such as a Zoom meeting. Read-only.
:keyword end: The date, time, and time zone that the event ends. By default, the end time is in UTC.
:keyword originalEndTimeZone: The end time zone that was set when the event was created.
:keyword originalStart: The Timestamp type represents date and time using ISO 8601 format in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
:keyword originalStartTimeZone: The start time zone that was set when the event was created.
"""
if calendar_id:
event = self.ms_client.http_request(
method='POST',
url_suffix=f'/users/{user}/calendars/{calendar_id}/events',
json_data=kwargs
)
else:
event = self.ms_client.http_request(
method='POST',
url_suffix=f'users/{user}/calendar/events',
json_data=kwargs
)
return event
def update_event(self, user: str, event_id: str, **kwargs) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument event_id: the event ID
Event Properties:
:keyword attendees: The collection of attendees for the event.
:keyword body: The body of the message associated with the event. It can be in HTML or text format.
:keyword subject:The text of the event's subject line.
:keyword location: The location of the event.
an event as an online meeting such as a Skype meeting. Read-only.
:keyword end: The date, time, and time zone that the event ends. By default, the end time is in UTC.
:keyword originalEndTimeZone: The end time zone that was set when the event was created.
:keyword originalStart: The Timestamp type represents date and time using ISO 8601 format in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
:keyword originalStartTimeZone: The start time zone that was set when the event was created.
"""
event = self.ms_client.http_request(
method='PATCH',
url_suffix=f'users/{user}/calendar/events/{event_id}',
json_data=kwargs)
return event
def delete_event(self, user: str, event_id: str):
"""
Delete a single event by sending a DELETE request.
Args:
:argument user: the user id | userPrincipalName
:argument id: the event id
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
self.ms_client.http_request(
method='DELETE',
url_suffix=f'users/{user}/calendar/events/{event_id}',
resp_type='text'
)
def list_events_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Lists all events and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
"""
events = client.list_events(**args)
events_readable, events_outputs = parse_events(events.get('value')) # type: ignore
next_link_response = ''
if '@odata.nextLink' in events:
next_link_response = events['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID).NextLink': next_link_response,
f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': events_outputs}
title = 'Events (Note that there are more results. Please use the next_link argument to see them.):'
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': events_outputs}
title = 'Events:'
human_readable = tableToMarkdown(
name=title,
t=events_readable,
headers=EVENT_HEADERS,
removeNull=True
)
return human_readable, entry_context, events
def get_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Retrieves an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
event = client.get_event(**args)
# display the event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name=f"Event - {event_outputs[0].get('Subject')}",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_outputs}
return human_readable, entry_context, event
def create_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Creates an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
args = process_event_params(**args)
params: Dict = snakecase_to_camelcase(args, fields_to_drop=['user', 'calendar_id']) # type: ignore
# create the event
event = client.create_event(user=args.get('user', ''), calendar_id=args.get('calendar_id', ''), **params)
# display the new event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name="Event was created successfully:",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_outputs}
return human_readable, entry_context, event
def update_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get a event by event id and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
"""
event_id = args.get('event_id', '')
args = process_event_params(**args)
params: Dict = snakecase_to_camelcase(args, fields_to_drop=['user', 'calendar_id', 'event_id']) # type: ignore
# update the event
event = client.update_event(user=args.get('user', ''), event_id=args.get('event_id', ''), **params)
# display the updated event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name="Event:",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {event_id})': event_outputs}
return human_readable, entry_context, event
def delete_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Delete an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
event_id = str(args.get('event_id'))
client.delete_event(**args)
# get the event data from the context
event_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === "{event_id}")')
if isinstance(event_data, list):
event_data = event_data[0]
# add a field that indicates that the event was deleted
event_data['Deleted'] = True # add a field with the members to the event
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_data}
human_readable = 'Event was deleted successfully.'
return human_readable, entry_context, NO_OUTPUTS
def list_calendars_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get all the user's calendars (/calendars navigation property)
Args:
client: Client object with request
args: Usually demisto.args()
"""
calendar = client.list_calendars(**args)
calendar_readable, calendar_outputs = parse_calendar(calendar.get('value')) # type: ignore
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Calendar(val.ID === obj.ID)': calendar_outputs}
title = 'Calendar:'
human_readable = tableToMarkdown(
name=title,
t=calendar_readable,
headers=CALENDAR_HEADERS,
removeNull=True
)
return human_readable, entry_context, calendar
def get_calendar_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get the properties and relationships of a calendar object.
The calendar can be one for a user, or the default calendar of an Office 365 group.
Args:
client: Client object with request
args: Usually demisto.args()
"""
calendar = client.get_calendar(**args)
calendar_readable, calendar_outputs = parse_calendar(calendar)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Calendar(val.ID === obj.ID)': calendar_outputs}
title = 'Calendar:'
human_readable = tableToMarkdown(
name=title,
t=calendar_readable,
headers=CALENDAR_HEADERS,
removeNull=True
)
return human_readable, entry_context, calendar
def module_test_function_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
"""
return client.test_function()
def main():
params: dict = demisto.params()
url = params.get('url', '').rstrip('/') + '/v1.0/'
tenant = params.get('tenant_id')
auth_and_token_url = params.get('auth_id', '')
enc_key = params.get('enc_key')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
default_user = params.get('default_user')
self_deployed: bool = params.get('self_deployed', False)
commands = {
'test-module': module_test_function_command,
'msgraph-calendar-list-calendars': list_calendars_command,
'msgraph-calendar-get-calendar': get_calendar_command,
'msgraph-calendar-list-events': list_events_command,
'msgraph-calendar-get-event': get_event_command,
'msgraph-calendar-create-event': create_event_command,
'msgraph-calendar-update-event': update_event_command,
'msgraph-calendar-delete-event': delete_event_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client: MsGraphClient = MsGraphClient(tenant_id=tenant, auth_id=auth_and_token_url, enc_key=enc_key,
app_name=APP_NAME, base_url=url, verify=verify, proxy=proxy,
default_user=default_user, self_deployed=self_deployed)
if 'user' not in demisto.args():
demisto.args()['user'] = client.default_user
# Run the command
human_readable, entry_context, raw_response = commands[command](client, demisto.args()) # type: ignore
# create a war room entry
return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=raw_response)
except Exception as err:
return_error(str(err))
from MicrosoftApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| |
# Date: August 2017
# Author: Kutay B. Sezginel
"""
Read Lammps output files for thermal conductivity calculations
"""
import os
import math
import yaml
import csv
import numpy as np
from thermof.reldist import reldist
from thermof.parameters import k_parameters, thermo_headers
def read_thermal_flux(file_path, dt=k_parameters['dt'], start=200014, j_index=3):
"""Read thermal flux autocorellation vs time data from Lammps simulation output file
Args:
- file_path (str): Thermal flux autocorellation file generated by Lammps
- dt (int): Sampling interval (fs). Can be calculated by multiplying timestep with sampling interval ($s) used for autocorrelation
- start (int): Index of the line to start reading flux autocorrelation (corresponds to last function)
- j_index (int): Index of thermal flux in file
Returns:
- list: thermal flux autocorrelation function
- list: time
"""
with open(file_path, 'r') as f:
flux_lines = f.readlines()
flux, time = [], []
for line in flux_lines[start:]:
ls = line.split()
t = (float(ls[0]) - 1) * dt / 1000.0
flux.append(float(ls[j_index]))
time.append(t)
return flux, time
def calculate_k(flux, k_par=k_parameters):
"""Calculate thermal conductivity (W/mK) from thermal flux autocorrelation function
Args:
- flux (list): Thermal flux autocorellation read by read_thermal_flux method
- k_par (dict): Dictionary of calculation parameters
Returns:
- list: Thermal conductivity autocorrelation function
"""
k = flux[0] / 2 * k_par['volume'] * k_par['dt'] / (k_par['kb'] * math.pow(k_par['temp'], 2)) * k_par['conv']
k_data = [k]
for J in flux[1:]:
k = k + J * k_par['volume'] * k_par['dt'] / (k_par['kb'] * math.pow(k_par['temp'], 2)) * k_par['conv']
k_data.append(k)
return k_data
def estimate_k(k_data, time, t0=5, t1=10):
""" Get approximate thermal conductivity value for a single simulation.
The arithmetic average of k values are taken between given timesteps.
Args:
- k_data (list): Thermal conductivity autocorrelation function
- time (list): Simulation timestep
- t0: Timestep to start taking average of k values
- t1: Timestep to end taking average of k values
Returns:
- float: Estimate thermal conductivity
"""
start, end = time.index(t0), time.index(t1)
return (sum(k_data[start:end]) / len(k_data[start:end]))
def average_k(k_runs):
"""Calculate average thermal conductivity for multiple runs
Args:
- k_runs (list): 2D list of thermal conductivity autocorrelation function for multiple runs
Returns:
- list: Arithmetic average of thermal conductivity per timestep for multiple runs
"""
n_frames = len(k_runs[0])
for run_index, k in enumerate(k_runs):
run_frames = len(k)
if run_frames != n_frames:
raise TimestepsMismatchError('Number of timesteps for inital run not equal to run %i (%i != %i)'
% (run_index, n_frames, run_frames))
avg_k_data = []
for timestep in range(n_frames):
avg_k_data.append(sum([k[timestep] for k in k_runs]) / len(k_runs))
return avg_k_data
def get_flux_directions(run_dir, k_par=k_parameters, verbose=True):
"""Return thermal flux data file and direction name for each direction as lists.
Each file with the given prefix is selected as thermal flux file and direction is read as the
character between prefix and file extension.
Example: J0Jt_tx.dat -> prefix should be 'J0Jt_t' and direction would be read as 'x'.
Args:
- run_dir (str): Lammps simulation directory with thermal flux files
Returns:
- list: List of thermal flux files found with given prefix
- list: List of thermal flux directions
"""
flux_files, directions = [], []
for f in os.listdir(run_dir):
if k_par['prefix'] in f:
flux_files.append(os.path.join(run_dir, f))
directions.append(f.split('.')[0].split('J0Jt_t')[1])
if len(directions) == 0:
raise FluxFileNotFoundError('No flux file found with prefix: %s' % k_par['prefix'])
else:
print('%i directions found.' % (len(directions))) if verbose else None
return flux_files, directions
def read_run(run_dir, k_par=k_parameters, t0=5, t1=10, verbose=True):
"""Read single Lammps simulation run
Args:
- run_dir (str): Lammps simulation directory for single run
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Run data containing thermal conductivity, estimate, timesteps, run name
"""
run_data = dict(name=os.path.basename(run_dir), k={}, k_est={}, time=[], directions=[], hcacf={})
trial_data = []
runs_id = []
if os.path.isdir(run_dir):
if k_par['read_thermo']:
print('Reading log file -> %s' % k_par['log_file']) if verbose else None
headers = get_thermo_headers(k_par['thermo_style'])
thermo_data = read_log(os.path.join(run_dir, '%s' % k_par['log_file']), headers=headers)
fix = k_par['fix']
run_data['thermo'] = read_thermo(thermo_data, headers=k_par['thermo_style'], fix=fix)
if 'vol' in k_par['thermo_style']:
if fix is None:
fix = list(range(len(thermo_data)))
k_par['fix'] = fix
k_par['initial_volume'] = k_par['volume']
k_par['volume'] = run_data['thermo'][fix[-1]]['vol'][-1]
k_par['deltaV'] = (k_par['volume'] - k_par['initial_volume']) / k_par['initial_volume'] * 100
print('Volume read as: %.3f | Delta V: %.2f %%' % (k_par['volume'], k_par['deltaV'])) if verbose else None
flux_files, directions = get_flux_directions(run_dir, k_par=k_par, verbose=verbose)
run_message = '%-9s ->' % run_data['name']
for direction, flux_file in zip(directions, flux_files):
flux, time = read_thermal_flux(flux_file, dt=k_par['dt'])
run_data['hcacf'][direction] = flux
k = calculate_k(flux, k_par=k_par)
run_data['k'][direction] = k
run_data['k_est'][direction] = estimate_k(k, time, t0=k_par['t0'], t1=k_par['t1'])
run_message += ' k: %.3f W/mK (%s) |' % (run_data['k_est'][direction], direction)
if k_par['read_walltime']:
run_data['walltime'] = read_walltime(os.path.join(run_dir, '%s' % k_par['log_file']))
if k_par['read_thexp']:
run_data['thexp'] = read_thermal_expansion(os.path.join(run_dir, '%s' % k_par['thexp_file']))
print('Thermal expansion read') if verbose else None
run_data['time'] = time
run_data['directions'] = directions
print(run_message) if verbose else None
else:
raise RunDirectoryNotFoundError('Run directory not found: %s' % run_dir)
if k_par['isotropic']:
run_data['k']['iso'] = average_k([run_data['k'][d] for d in directions])
run_data['hcacf']['iso'] = average_k([run_data['hcacf'][d] for d in directions])
run_data['k_est']['iso'] = estimate_k(run_data['k']['iso'], run_data['time'], t0=k_par['t0'], t1=k_par['t1'])
print('Isotropic -> k: %.3f W/mK from %i directions' % (run_data['k_est']['iso'], len(directions))) if verbose else None
return run_data
def read_trial(trial_dir, k_par=k_parameters, verbose=True):
"""Read Lammps simulation trial with any number of runs
Args:
- trial_dir (str): Lammps simulation directory including directories for multiple runs
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Trial data containing thermal conductivity, estimate, timesteps, run name for each run
"""
trial = dict(runs=[], data={}, name=os.path.basename(trial_dir))
print('\n------ %s ------' % trial['name']) if verbose else None
run_list = [os.path.join(trial_dir, run) for run in os.listdir(trial_dir)
if os.path.isdir(os.path.join(trial_dir, run))]
for run in run_list:
run_data = read_run(run, k_par=k_par, verbose=verbose)
trial['data'][run_data['name']] = run_data
trial['runs'].append(run_data['name'])
if k_par['average']:
trial['avg'] = average_trial(trial, isotropic=k_par['isotropic'])
return trial
def average_trial(trial, isotropic=False):
"""Take average of thermal conductivities for multiple runs.
Assumes all runs have the same number of directions.
Args:
- isotropic (bool): Isotropy of thermal flux, if True aveage is taken for each direction
Returns:
- dict: Trial data average for thermal conductivity and estimate
"""
trial_avg = dict(k={}, k_est={'stats': {}})
for direction in trial['data'][trial['runs'][0]]['directions']:
# Take average of k for each direction
trial_avg['k'][direction] = average_k([trial['data'][run]['k'][direction] for run in trial['runs']])
k_est_runs = [trial['data'][run]['k_est'][direction] for run in trial['runs']]
trial_avg['k_est'][direction] = sum(k_est_runs) / len(trial['runs'])
trial_avg['k_est']['stats'][direction] = dict(std=np.std(k_est_runs),
max=max(k_est_runs),
min=min(k_est_runs))
if isotropic:
# Take average of isotropic k and k_estimate
trial_avg['k']['iso'] = average_k([trial['data'][run]['k']['iso'] for run in trial['runs']])
k_est_iso_runs = [trial['data'][run]['k_est']['iso'] for run in trial['runs']]
trial_avg['k_est']['iso'] = sum(k_est_iso_runs) / len(trial['runs'])
trial_avg['k_est']['stats']['iso'] = dict(std=np.std(k_est_iso_runs),
max=max(k_est_iso_runs),
min=min(k_est_iso_runs))
return trial_avg
def read_trial_set(trial_set_dir, k_par=k_parameters, verbose=True):
"""Read multiple trials with multiple runs
Args:
- trial_set_dir (str): Lammps simulation directory including directories for multiple trials
- k_par (dict): Dictionary of calculation parameters
- verbose (bool): Print information about the run
Returns:
- dict: Trial set data containing thermal conductivity, estimate, timesteps, trial name for each trial
"""
trial_set = dict(trials=[], data={}, name=os.path.basename(trial_set_dir))
trial_list = [os.path.join(trial_set_dir, t) for t in os.listdir(trial_set_dir)
if os.path.isdir(os.path.join(trial_set_dir, t))]
for trial_dir in trial_list:
trial = read_trial(trial_dir, k_par=k_par, verbose=verbose)
trial_set['trials'].append(os.path.basename(trial_dir))
trial_set['data'][trial['name']] = trial
return trial_set
def read_log(log_file, headers='Step Temp E_pair E_mol TotEng Press'):
"""Read log.lammps file and return lines for multiple thermo data
Args:
- log_file (str): Lammps simulation log file path
- headers (str): The headers for thermo data ('Step Temp E_pair E_mol TotEng Press')
Returns:
- list: 2D list of thermo lines for all fixes
"""
with open(log_file, 'r') as log:
log_lines = log.readlines()
thermo_start = []
thermo_end = []
for line_index, line in enumerate(log_lines):
if headers in line:
start = line_index + 1
thermo_start.append(start)
if 'Loop time' in line:
end = line_index
thermo_end.append(end)
thermo_data = []
for s, e in zip(thermo_start, thermo_end):
thermo_data.append(log_lines[s:e])
return thermo_data
def read_thermo(thermo_data, headers=['step', 'temp', 'epair', 'emol', 'etotal', 'press'], fix=None):
"""Read thermo data from given thermo log lines
Args:
- thermo_data (list): 2D list of thermo lines for all fixes
- headers (list): The headers for thermo data
- fix (list): Name of the separate fixes in thermo
Returns:
- dict: Thermo data for all fixes separated as: thermo['fix1']['header1'] = ...
"""
thermo = {}
if fix is None:
fix = list(range(len(thermo_data)))
if len(fix) != len(thermo_data):
raise ThermoFixDataMatchError('Fixes: %s do not match fixes read in log file' % ' | '.join(fix))
else:
for t, thermo_fix in enumerate(thermo_data):
ther = {key: [] for key in headers}
for data in thermo_fix:
line = data.strip().split()
for i, h in enumerate(headers):
ther[h].append(float(line[i]))
thermo[fix[t]] = ther
return thermo
def read_walltime(log_file):
"""Read log.lammps file and return lines for multiple thermo data
Args:
- log_file (str): Lammps simulation log file path
Returns:
- list: Wall time in hours, minutes, and seconds -> [h, m, s]
"""
with open(log_file, 'r') as log:
log_lines = log.readlines()
if 'Total wall time' in log_lines[-1]:
walltime = log_lines[-1].split()[-1]
h, m, s = walltime.split(':')
else:
err_msg = 'Walltime not found! Simulation might not be finished, please check log file -> %s' % log_file
err_msg += '\nLast line of log file -> %s' % log_lines[-1]
raise WallTimeNotFoundError(err_msg)
return [int(h), int(m), int(s)]
def read_thermal_expansion(thexp_file):
"""
Read thermal expansion csv file.
Args:
- thexp_file (str): Thermal expansion csv file
Returns:
- dict: Thermal expansion data for Lammps run
"""
thexp = dict(step=[], volume=[], enthalpy=[])
with open(thexp_file, newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='|')
next(csv_reader, None) # Skip the headers
for row in csv_reader:
thexp['step'].append(float(row[0]))
thexp['volume'].append(float(row[1]))
thexp['enthalpy'].append(float(row[2]))
return thexp
def read_framework_distance(run_list, fdist_par):
"""Read trajectory for multiple runs and calculate framework distance
Args:
- run_list (list): List of absolute path of run directories
- k_par (dict): Dictionary of calculation parameters
Returns:
- list: List of dictionaries containing framework distance data
"""
start, end = fdist_par['traj_start'], fdist_par['traj_end']
dist_data = []
for run in run_list:
traj_path = os.path.join(run, fdist_par['traj'])
x_coords, y_coords, z_coords = reldist(traj_path, end=end)
x_coords.append(0)
x_coords.append(1)
y_coords.append(0)
y_coords.append(1)
title = '%s/%s' % (os.path.split(os.path.split(run)[0])[1], os.path.split(run)[1])
dist_data.append(dict(x=x_coords[start:], y=y_coords[start:], z=z_coords[start:], title=title))
return dist_data
def get_thermo_headers(thermo_style, thermo_headers=thermo_headers):
"""
Lammps thermo headers for log file.
"""
return ' '.join([thermo_headers[i] for i in thermo_style])
class FluxFileNotFoundError(Exception):
pass
class TimestepsMismatchError(Exception):
pass
class RunDirectoryNotFoundError(Exception):
pass
class ThermoFixDataMatchError(Exception):
pass
class WallTimeNotFoundError(Exception):
pass
| |
from modularodm import Q
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from api.base import generic_bulk_views as bulk_views
from api.base import permissions as base_permissions
from api.base.filters import ODMFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.views import BaseLinkedList
from api.base.views import LinkedNodesRelationship
from api.base.views import LinkedRegistrationsRelationship
from api.base.utils import get_object_or_error, is_bulk_request, get_user_auth
from api.collections.serializers import (
CollectionSerializer,
CollectionDetailSerializer,
CollectionNodeLinkSerializer,
)
from api.nodes.serializers import NodeSerializer
from api.registrations.serializers import RegistrationSerializer
from api.nodes.permissions import (
ContributorOrPublic,
ReadOnlyIfRegistration,
ContributorOrPublicForPointers,
)
from website.exceptions import NodeStateError
from osf.models import Collection, NodeRelation
from website.util.permissions import ADMIN
class CollectionMixin(object):
"""Mixin with convenience methods for retrieving the current collection based on the
current URL. By default, fetches the current node based on the collection_id kwarg.
"""
serializer_class = CollectionSerializer
node_lookup_url_kwarg = 'collection_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
Collection,
self.kwargs[self.node_lookup_url_kwarg],
display_name='collection'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a non-collection through a collection endpoint, we return a 404
if not node.is_collection:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class CollectionList(JSONAPIBaseView, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, ODMFilterMixin):
"""Organizer Collections organize projects and components. *Writeable*.
Paginated list of Project Organizer Collections ordered by their `date_modified`.
Each resource contains the full representation of the project organizer collection, meaning additional
requests to an individual Organizer Collection's detail view are not necessary.
The Project Organizer is a tool to allow the user to make Collections of projects, components, and registrations
for whatever purpose the user might want to organize them. They make node_links to any Node that a user has
read access to. Collections through this API do not nest. Currently Collections are private to any individual user,
though that could change one day.
##Collection Attributes
OSF Organizer Collection entities have the "nodes" `type`.
name type description
=================================================================================
title string title of Organizer Collection
date_created iso8601 timestamp timestamp that the collection was created
date_modified iso8601 timestamp timestamp when the collection was last updated
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Creating New Organizer Collections
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "collections", # required
"attributes": {
"title": {title}, # required
}
}
}
Success: 201 CREATED + collection representation
New Organizer Collections are created by issuing a POST request to this endpoint. The `title` field is
mandatory. All other fields not listed above will be ignored. If the Organizer Collection creation is successful
the API will return a 201 response with the representation of the new node in the body.
For the new Collection's canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Organizer Collections may be filtered by their `title`, which is a string field and will be filtered using simple
substring matching.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ]
required_write_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE]
serializer_class = CollectionSerializer
view_category = 'collections'
view_name = 'collection-list'
model_class = Collection
ordering = ('-date_modified', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
base_query = (
Q('is_deleted', 'ne', True)
)
user = self.request.user
if not user.is_anonymous:
permission_query = Q('creator', 'eq', user)
else:
permission_query = Q('is_public', 'eq', True)
query = base_query & permission_query
return query
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
query = Q('_id', 'in', [coll['id'] for coll in self.request.data])
auth = get_user_auth(self.request)
collections = Collection.find(query)
for collection in collections:
if not collection.can_edit(auth):
raise PermissionDenied
return collections
else:
query = self.get_query_from_request()
return Collection.find(query)
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDestroyJSONAPIView
def get_serializer_class(self):
"""
Use CollectionDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return CollectionDetailSerializer
else:
return CollectionSerializer
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
"""Create a node.
:param serializer:
"""
# On creation, make sure that current user is the creator
user = self.request.user
serializer.save(creator=user)
# overrides BulkDestroyJSONAPIView
def allow_bulk_destroy_resources(self, user, resource_list):
"""User must have admin permissions to delete nodes."""
for node in resource_list:
if not node.has_permission(user, ADMIN):
return False
return True
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
try:
instance.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
instance.save()
class CollectionDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CollectionMixin):
"""Details about Organizer Collections. *Writeable*.
The Project Organizer is a tool to allow the user to make Collections of projects, components, and registrations
for whatever purpose the user might want to organize them. They make node_links to any Node that a user has
read access to. Collections through this API do not nest. Currently Collections are private to any individual user,
though that could change one day.
##Collection Attributes
OSF Organizer Collection entities have the "nodes" `type`.
name type description
=================================================================================
title string title of Organizer Collection
date_created iso8601 timestamp timestamp that the collection was created
date_modified iso8601 timestamp timestamp when the collection was last updated
##Relationships
###Node links
Node links are pointers or aliases to nodes. This relationship lists all of the nodes that the Organizer Collection
is pointing to. New node links can be created with this collection.
##Links
self: the canonical api endpoint of this node
html: this node's page on the OSF website
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"id": {node_id}, # required
"attributes": {
"title": {title}, # mandatory
}
}
}
Success: 200 OK + node representation
To update an Organizer Collection, issue either a PUT or a PATCH request against the `/links/self` URL.
The `title` field is mandatory if you PUT and optional if you PATCH, though there's no reason to PATCH if you aren't
changing the name. Non-string values will be accepted and stringified, but we make no promises about the
stringification output. So don't do that.
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ]
required_write_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE]
serializer_class = CollectionDetailSerializer
view_category = 'collections'
view_name = 'collection-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
return self.get_node()
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_object()
try:
node.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
node.save()
class LinkedNodesList(BaseLinkedList, CollectionMixin):
"""List of nodes linked to this node. *Read-only*.
Linked nodes are the project/component nodes pointed to by node links. This view will probably replace node_links in the near future.
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Linked Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean is this is a registration?
collection boolean is this node a collection of other nodes?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = NodeSerializer
view_category = 'collections'
view_name = 'linked-nodes'
ordering = ('-date_modified',)
def get_queryset(self):
return super(LinkedNodesList, self).get_queryset().exclude(type='osf.registration')
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedNodesList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class LinkedRegistrationsList(BaseLinkedList, CollectionMixin):
"""List of registrations linked to this node. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, date_created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'collections'
view_name = 'linked-registrations'
ordering = ('-date_modified',)
def get_queryset(self):
return super(LinkedRegistrationsList, self).get_queryset().filter(type='osf.registration')
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedRegistrationsList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksList(JSONAPIBaseView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, CollectionMixin):
"""Node Links to other nodes. *Writeable*.
# Deprecated
The use of /collections/ID/node_links/ is deprecated in favor of linked_nodes, linked_registrations or a combination of both.
## Known Issue
Linked nodes of type 'registrations' will be returned with an error 'Not found.' in the {embeds: target_node: {}} object.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
*None*
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
},
'relationships': {
'target_node': {
'data': {
'type': 'nodes',
'id': '<node_id>'
}
}
}
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = CollectionNodeLinkSerializer
view_category = 'collections'
view_name = 'node-pointers'
model_class = NodeRelation
ordering = ('-date_modified',)
def get_queryset(self):
return self.get_node().node_relations.select_related('child').filter(child__is_deleted=False).exclude(child__type='osf.collection')
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
try:
node.rm_pointer(instance, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeLinksList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksDetail(JSONAPIBaseView, generics.RetrieveDestroyAPIView, CollectionMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
*None*
##Relationships
##Links
self: the canonical api endpoint of this node
##Actions
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node_link, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
ContributorOrPublicForPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = CollectionNodeLinkSerializer
view_category = 'nodes'
view_name = 'node-pointer-detail'
# overrides RetrieveAPIView
def get_object(self):
node_link_lookup_url_kwarg = 'node_link_id'
node_link = get_object_or_error(
NodeRelation,
self.kwargs[node_link_lookup_url_kwarg],
'node link'
)
# May raise a permission denied
self.kwargs['node_id'] = self.kwargs['collection_id']
self.check_object_permissions(self.request, node_link)
return node_link
# overrides DestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
pointer = self.get_object()
try:
node.rm_pointer(pointer, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
class CollectionLinkedNodesRelationship(LinkedNodesRelationship, CollectionMixin):
""" Relationship Endpoint for Collection -> Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'collections'
view_name = 'collection-node-pointer-relationship'
class CollectionLinkedRegistrationsRelationship(LinkedRegistrationsRelationship, CollectionMixin):
""" Relationship Endpoint for Collection -> Linked Registration relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_regisrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'collections'
view_name = 'collection-registration-pointer-relationship'
| |
"""
Module contains classes related to Burp Suite extension
"""
from os import walk, path
from json import loads, dumps
from imp import load_source
from burp import (IBurpExtender, IBurpExtenderCallbacks, ITab,
IContextMenuFactory)
from javax.swing import (JPanel, JTextField, GroupLayout, JTabbedPane,
JButton, JLabel, JScrollPane, JTextArea,
JFileChooser, JCheckBox, JMenuItem, JFrame, JViewport)
from java.net import URL, MalformedURLException
from java.awt import GridLayout, GridBagLayout, GridBagConstraints, Dimension
VERSION = "0.8.7"
class BurpExtender(IBurpExtender, IBurpExtenderCallbacks, IContextMenuFactory):
"""
Class contains the necessary function to begin the burp extension.
"""
def __init__(self):
self.config_tab = None
self.messages = []
self._callbacks = None
def registerExtenderCallbacks(self, callbacks):
"""
Default extension method. the objects within are related
to the internal tabs of the extension
"""
self.config_tab = SpyTab(callbacks)
self._callbacks = callbacks
callbacks.addSuiteTab(self.config_tab)
callbacks.registerContextMenuFactory(self)
def createMenuItems(self, invocation):
"""Creates the Burp Menu items"""
context = invocation.getInvocationContext()
if context == invocation.CONTEXT_MESSAGE_EDITOR_REQUEST \
or context == invocation.CONTEXT_MESSAGE_VIEWER_REQUEST \
or context == invocation.CONTEXT_PROXY_HISTORY \
or context == invocation.CONTEXT_TARGET_SITE_MAP_TABLE:
self.messages = invocation.getSelectedMessages()
if len(self.messages) == 1:
return [JMenuItem('Send URL to SpyDir',
actionPerformed=self.pass_url)]
else:
return None
def pass_url(self, event):
"""Handles the menu event"""
self.config_tab.update_url(self.messages)
class SpyTab(JPanel, ITab):
"""Defines the extension tabs"""
def __init__(self, callbacks):
super(SpyTab, self).__init__(GroupLayout(self))
self._callbacks = callbacks
config = Config(self._callbacks, self)
about = About(self._callbacks)
# plugs = Plugins(self._callbacks)
self.tabs = [config, about]
self.j_tabs = self.build_ui()
self.add(self.j_tabs)
def build_ui(self):
"""
Builds the tabbed pane within the main extension tab
Tabs are Config and About objects
"""
ui_tab = JTabbedPane()
for tab in self.tabs:
ui_tab.add(tab.getTabCaption(), tab.getUiComponent())
return ui_tab
def switch_focus(self):
"""Terrifically hacked together refresh mechanism"""
self.j_tabs.setSelectedIndex(1)
self.j_tabs.setSelectedIndex(0)
def update_url(self, host):
"""
Retrieves the selected host information from the menu click
Sends it to the config tab
"""
service = host[0].getHttpService()
url = "%s://%s:%s" % (service.getProtocol(), service.getHost(),
service.getPort())
self.tabs[0].set_url(url)
@staticmethod
def getTabCaption():
"""Returns the tab name for the Burp UI"""
return "SpyDir"
def getUiComponent(self):
"""Returns the UI component for the Burp UI"""
return self
class Config(ITab):
"""Defines the Configuration tab"""
def __init__(self, callbacks, parent):
# Initialze self stuff
self._callbacks = callbacks
self.config = {}
self.ext_stats = {}
self.url_reqs = []
self.parse_files = False
self.tab = JPanel(GridBagLayout())
self.view_port_text = JTextArea("===SpyDir===")
self.delim = JTextField(30)
self.ext_white_list = JTextField(30)
# I'm not sure if these fields are necessary still
# why not just use Burp func to handle this?
# leaving them in case I need it for the HTTP handler later
# self.cookies = JTextField(30)
# self.headers = JTextField(30)
self.url = JTextField(30)
self.parent_window = parent
self.plugins = {}
self.loaded_p_list = set()
self.loaded_plugins = False
self.config['Plugin Folder'] = None
self.double_click = False
self.source_input = ""
self.print_stats = True
self.curr_conf = JLabel()
self.window = JFrame("Select plugins",
preferredSize=(200, 250),
windowClosing=self.p_close)
self.window.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE)
self.window.setVisible(False)
self.path_vars = JTextField(30)
# Initialize local stuff
tab_constraints = GridBagConstraints()
status_field = JScrollPane(self.view_port_text)
# Configure view port
self.view_port_text.setEditable(False)
labels = self.build_ui()
# Add things to rows
tab_constraints.anchor = GridBagConstraints.FIRST_LINE_END
tab_constraints.gridx = 1
tab_constraints.gridy = 0
tab_constraints.fill = GridBagConstraints.HORIZONTAL
self.tab.add(JButton(
"Resize screen", actionPerformed=self.resize),
tab_constraints)
tab_constraints.gridx = 0
tab_constraints.gridy = 1
tab_constraints.anchor = GridBagConstraints.FIRST_LINE_START
self.tab.add(labels, tab_constraints)
tab_constraints.gridx = 1
tab_constraints.gridy = 1
tab_constraints.fill = GridBagConstraints.BOTH
tab_constraints.weightx = 1.0
tab_constraints.weighty = 1.0
tab_constraints.anchor = GridBagConstraints.FIRST_LINE_END
self.tab.add(status_field, tab_constraints)
try:
self._callbacks.customizeUiComponent(self.tab)
except Exception:
pass
def build_ui(self):
"""Builds the configuration screen"""
labels = JPanel(GridLayout(21, 1))
checkbox = JCheckBox("Attempt to parse files for URL patterns?",
False, actionPerformed=self.set_parse)
stats_box = JCheckBox("Show stats?", True,
actionPerformed=self.set_show_stats)
# The two year old in me is laughing heartily
plug_butt = JButton("Specify plugins location",
actionPerformed=self.set_plugin_loc)
load_plug_butt = JButton("Select plugins",
actionPerformed=self.p_build_ui)
parse_butt = JButton("Parse directory", actionPerformed=self.parse)
clear_butt = JButton("Clear text", actionPerformed=self.clear)
spider_butt = JButton("Send to Spider", actionPerformed=self.scan)
save_butt = JButton("Save config", actionPerformed=self.save)
rest_butt = JButton("Restore config", actionPerformed=self.restore)
source_butt = JButton("Input Source File/Directory",
actionPerformed=self.get_source_input)
# Build grid
labels.add(source_butt)
labels.add(self.curr_conf)
labels.add(JLabel("String Delimiter:"))
labels.add(self.delim)
labels.add(JLabel("Extension Whitelist:"))
labels.add(self.ext_white_list)
labels.add(JLabel("URL:"))
labels.add(self.url)
labels.add(JLabel("Path Variables"))
labels.add(self.path_vars)
# Leaving these here for now.
# labels.add(JLabel("Cookies:"))
# labels.add(self.cookies)
# labels.add(JLabel("HTTP Headers:"))
# labels.add(self.headers)
labels.add(checkbox)
labels.add(stats_box)
labels.add(plug_butt)
labels.add(parse_butt)
labels.add(JButton("Show all endpoints",
actionPerformed=self.print_endpoints))
labels.add(clear_butt)
labels.add(spider_butt)
labels.add(JLabel(""))
labels.add(save_butt)
labels.add(rest_butt)
labels.add(load_plug_butt)
# Tool tips!
self.delim.setToolTipText("Use to manipulate the final URL. "
"See About tab for example.")
self.ext_white_list.setToolTipText("Define a comma delimited list of"
" file extensions to parse. Use *"
" to parse all files.")
self.url.setToolTipText("Enter the target URL")
checkbox.setToolTipText("Parse files line by line using plugins"
" to enumerate language/framework specific"
" endpoints")
parse_butt.setToolTipText("Attempt to enumerate application endpoints")
clear_butt.setToolTipText("Clear status window and the parse results")
spider_butt.setToolTipText("Process discovered endpoints")
save_butt.setToolTipText("Saves the current config settings")
rest_butt.setToolTipText("<html>Restores previous config settings:"
"<br/>-Input Directory<br/>-String Delim"
"<br/>-Ext WL<br/>-URL<br/>-Plugins")
source_butt.setToolTipText("Select the application's "
"source directory or file to parse")
self.path_vars.setToolTipText("Supply a JSON object with values"
"for dynamically enumerated query"
"string variables")
return labels
def set_url(self, menu_url):
"""Changes the configuration URL to the one from the menu event"""
self.url.setText(menu_url)
# Event functions
def set_parse(self, event):
"""
Handles the click event from the UI checkbox
to attempt code level parsing
"""
self.parse_files = not self.parse_files
if self.parse_files:
if not self.loaded_plugins:
self._plugins_missing_warning()
def restore(self, event):
"""Attempts to restore the previously saved configuration."""
jdump = None
try:
jdump = loads(self._callbacks.loadExtensionSetting("config"))
except Exception as exc: # Generic exception thrown directly to user
self.update_scroll(
"[!!] Error during restore!\n\tException: %s" % str(exc))
if jdump is not None:
self.url.setText(jdump.get('URL'))
# self.cookies.setText(jdump.get('Cookies'))
# self.headers.setText(jdump.get("Headers"))
ewl = ""
for ext in jdump.get('Extension Whitelist'):
ewl += ext + ", "
self.ext_white_list.setText(ewl[:-2])
self.delim.setText(jdump.get('String Delimiter'))
self.source_input = jdump.get("Input Directory")
self.config['Plugin Folder'] = jdump.get("Plugin Folder")
if (self.config['Plugin Folder'] is not None and
(len(self.plugins.values()) < 1)):
self._load_plugins(self.config['Plugin Folder'])
self._update()
self.update_scroll("[^] Restore complete!")
else:
self.update_scroll("[!!] Restore failed!")
def save(self, event=None):
"""
Saves the configuration details to a Burp Suite's persistent store.
"""
self._update()
try:
if not self._callbacks.isInScope(URL(self.url.getText())):
self.update_scroll("[!!] URL provided is NOT in Burp Scope!")
except MalformedURLException: # If url field is blank we'll
pass # still save the settings.
try:
self._callbacks.saveExtensionSetting("config", dumps(self.config))
self.update_scroll("[^] Settings saved!")
except Exception:
self.update_scroll("[!!] Error saving settings to Burp Suite!")
def parse(self, event):
"""
Handles the click event from the UI.
Attempts to parse the given directory
(and/or source files) for url endpoints
Saves the items found within the url_reqs list
"""
self._update()
file_set = set()
fcount = 0
other_dirs = set()
self.ext_stats = {}
if self.loaded_plugins:
self.update_scroll("[^] Attempting to parse files" +
" for URL patterns. This might take a minute.")
if path.isdir(self.source_input):
for dirname, _, filenames in walk(self.source_input):
for filename in filenames:
fcount += 1
ext = path.splitext(filename)[1]
count = self.ext_stats.get(ext, 0) + 1
filename = "%s/%s" % (dirname, filename)
self.ext_stats.update({ext: count})
if self.parse_files and self._ext_test(ext):
# i can haz threading?
file_set.update(self._code_as_endpoints(filename, ext))
elif self._ext_test(ext):
r_files, oths = self._files_as_endpoints(filename, ext)
file_set.update(r_files)
other_dirs.update(oths)
elif path.isfile(self.source_input):
ext = path.splitext(self.source_input)[1]
file_set.update(self._code_as_endpoints(self.source_input, ext))
else:
self.update_scroll("[!!] Input Directory is not valid!")
if len(other_dirs) > 0:
self.update_scroll("[*] Found files matching file extension in:\n")
for other_dir in other_dirs:
self.update_scroll(" " * 4 + "%s\n" % other_dir)
self._handle_path_vars(file_set)
self._print_parsed_status(fcount)
return (other_dirs, self.url_reqs)
def _handle_path_vars(self, file_set):
proto = 'http://'
for item in file_set:
if item.startswith("http://") or item.startswith("https://"):
proto = item.split("//")[0] + '//'
item = item.replace(proto, "")
item = self._path_vars(item)
self.url_reqs.append(proto + item.replace('//', '/'))
def _path_vars(self, item):
p_vars = None
if self.path_vars.getText():
try:
p_vars = loads(str(self.path_vars.getText()))
except:
self.update_scroll("[!] Error reading supplied Path Variables!")
if p_vars is not None:
rep_str = ""
try:
for k in p_vars.keys():
rep_str += "[^] Replacing %s with %s!\n" % (k, str(p_vars.get(k)))
self.update_scroll(rep_str)
for k in p_vars.keys():
if str(k) in item:
item = item.replace(k, str(p_vars.get(k)))
except AttributeError:
self.update_scroll("[!] Error reading supplied Path Variables! This needs to be a JSON dictionary!")
return item
def scan(self, event):
"""
handles the click event from the UI.
Adds the given URL to the burp scope and sends the requests
to the burp spider
"""
temp_url = self.url.getText()
if not self._callbacks.isInScope(URL(temp_url)):
if not self.double_click:
self.update_scroll("[!!] URL is not in scope! Press Send to "
"Spider again to add to scope and scan!")
self.double_click = True
return
else:
self._callbacks.sendToSpider(URL(temp_url))
self.update_scroll(
"[^] Sending %d requests to Spider" % len(self.url_reqs))
for req in self.url_reqs:
self._callbacks.sendToSpider(URL(req))
def clear(self, event):
"""Clears the viewport and the current parse exts"""
self.view_port_text.setText("===SpyDir===")
self.ext_stats = {}
def print_endpoints(self, event):
"""Prints the discovered endpoints to the status window."""
req_str = ""
if len(self.url_reqs) > 0:
self.update_scroll("[*] Printing all discovered endpoints:")
for req in sorted(self.url_reqs):
req_str += " %s\n" % req
else:
req_str = "[!!] No endpoints discovered"
self.update_scroll(req_str)
def set_show_stats(self, event):
"""Modifies the show stats setting"""
self.print_stats = not self.print_stats
def get_source_input(self, event):
"""Sets the source dir/file for parsing"""
source_chooser = JFileChooser()
source_chooser.setFileSelectionMode(
JFileChooser.FILES_AND_DIRECTORIES)
source_chooser.showDialog(self.tab, "Choose Source Location")
chosen_source = source_chooser.getSelectedFile()
try:
self.source_input = chosen_source.getAbsolutePath()
except AttributeError:
pass
if self.source_input is not None:
self.update_scroll("[*] Source location: %s" % self.source_input)
self.curr_conf.setText(self.source_input)
# Plugin functions
def _parse_file(self, filename, file_url):
"""
Attempts to parse a file with the loaded plugins
Returns set of endpoints
"""
file_set = set()
with open(filename, 'r') as plug_in:
lines = plug_in.readlines()
ext = path.splitext(filename)[1].upper()
if ext in self.plugins.keys() and self._ext_test(ext):
for plug in self.plugins.get(ext):
if plug.enabled:
res = plug.run(lines)
if len(res) > 0:
for i in res:
i = file_url + i
file_set.add(i)
elif ext == '.TXT' and self._ext_test(ext):
for i in lines:
i = file_url + i
file_set.add(i.strip())
return file_set
def set_plugin_loc(self, event):
"""Attempts to load plugins from a specified location"""
if self.config['Plugin Folder'] is not None:
choose_plugin_location = JFileChooser(self.config['Plugin Folder'])
else:
choose_plugin_location = JFileChooser()
choose_plugin_location.setFileSelectionMode(
JFileChooser.DIRECTORIES_ONLY)
choose_plugin_location.showDialog(self.tab, "Choose Folder")
chosen_folder = choose_plugin_location.getSelectedFile()
self.config['Plugin Folder'] = chosen_folder.getAbsolutePath()
self._load_plugins(self.config['Plugin Folder'])
def _load_plugins(self, folder):
"""
Parses a local directory to get the plugins
related to code level scanning
"""
report = ""
if len(self.plugins.keys()) > 0:
report = "[^] Plugins reloaded!"
for _, _, filenames in walk(folder):
for p_name in filenames:
n_e = path.splitext(p_name) # n_e = name_extension
if n_e[1] == ".py":
f_loc = "%s/%s" % (folder, p_name)
loaded_plug = self._validate_plugin(n_e[0], f_loc)
if loaded_plug:
for p in self.loaded_p_list:
if p.get_name() == loaded_plug.get_name():
self.loaded_p_list.discard(p)
self.loaded_p_list.add(loaded_plug)
if not report.startswith("[^]"):
report += "%s loaded\n" % loaded_plug.get_name()
self._dictify(self.loaded_p_list)
if len(self.plugins.keys()) > 0:
self.loaded_plugins = True
else:
report = "[!!] Plugins load failure"
self.loaded_plugins = False
self.update_scroll(report)
return report
def _validate_plugin(self, p_name, f_loc):
"""
Attempts to verify the manditory plugin functions to prevent broken
plugins from loading.
Generates an error message if plugin does not contain an appropriate
function.
"""
# Load the plugin
try:
plug = load_source(p_name, f_loc)
except Exception as exc: # this needs to be generic.
self.update_scroll(
"[!!] Error loading: %s\n\tType:%s Error: %s"
% (f_loc, type(exc), str(exc)))
# Verify the plugin's functions
funcs = dir(plug)
err = []
if "get_name" not in funcs:
err.append("get_name()")
if "get_ext" not in funcs:
err.append("get_ext()")
if "run" not in funcs:
err.append("run()")
# Report errors & return
if len(err) < 1:
return Plugin(plug, True)
for issue in err:
self.update_scroll("[!!] %s is missing: %s func" %
(p_name, issue))
return None
def _dictify(self, plist):
"""Converts the list of loaded plugins (plist) into a dictionary"""
for p in plist:
exts = p.get_ext().upper()
for ext in exts.split(","):
prev_load = self.plugins.get(ext, [])
prev_load.append(p)
self.plugins[ext] = prev_load
# Status window functions
def _print_parsed_status(self, fcount):
"""Prints the parsed directory status information"""
if self.parse_files and not self.loaded_plugins:
self._plugins_missing_warning()
if len(self.url_reqs) > 0:
self.update_scroll("[*] Example URL: %s" % self.url_reqs[0])
if self.print_stats:
report = (("[*] Found: %r files to be requested.\n\n" +
"[*] Stats: \n " +
"Found: %r files.\n") % (len(self.url_reqs), fcount))
if len(self.ext_stats) > 0:
report += ("[*] Extensions found: %s"
% str(dumps(self.ext_stats,
sort_keys=True, indent=4)))
else:
report = ("[*] Found: %r files to be requested.\n" %
len(self.url_reqs))
self.update_scroll(report)
return report
def _plugins_missing_warning(self):
"""Prints a warning message"""
self.update_scroll("[!!] No plugins loaded!")
def update_scroll(self, text):
"""Updates the view_port_text with the new information"""
temp = self.view_port_text.getText().strip()
if text not in temp or text[0:4] == "[!!]":
self.view_port_text.setText("%s\n%s" % (temp, text))
elif not temp.endswith("[^] Status unchanged"):
self.view_port_text.setText("%s\n[^] Status unchanged" % temp)
# Internal functions
def _code_as_endpoints(self, filename, ext):
file_set = set()
file_url = self.config.get("URL")
if self.loaded_plugins or ext == '.txt':
if self._ext_test(ext):
file_set.update(
self._parse_file(filename, file_url))
else:
file_set.update(
self._parse_file(filename, file_url))
return file_set
def _files_as_endpoints(self, filename, ext):
"""Generates endpoints via files with the appropriate extension(s)"""
file_url = self.config.get("URL")
broken_splt = ""
other_dirs = set() # directories outside of the String Delim.
file_set = set()
str_del = self.config.get("String Delimiter")
if not str_del:
self.update_scroll("[!!] No available String Delimiter!")
return
spl_str = filename.split(str_del)
try:
# Fix for index out of bounds exception while parsing
# subfolders _not_ included by the split
if len(spl_str) > 1:
file_url += ((spl_str[1])
.replace('\\', '/'))
else:
broken_splt = filename.split(self.source_input)[1]
other_dirs.add(broken_splt)
except Exception as exc: # Generic exception thrown directly to user
self.update_scroll("[!!] Error parsing: " +
"%s\n\tException: %s"
% (filename, str(exc)))
if self._ext_test(ext):
if file_url != self.config.get("URL"):
file_set.add(file_url)
else:
other_dirs.discard(broken_splt)
return file_set, other_dirs
def _ext_test(self, ext):
"""Litmus test for extension whitelist"""
val = False
if "*" in self.config.get("Extension Whitelist"):
val = True
else:
val = (len(ext) > 0 and
(ext.strip().upper()
in self.config.get("Extension Whitelist")))
return val
def _update(self):
"""Updates internal data"""
self.config["Input Directory"] = self.source_input
self.config["String Delimiter"] = self.delim.getText()
white_list_text = self.ext_white_list.getText()
self.config["Extension Whitelist"] = white_list_text.upper().split(',')
file_url = self.url.getText()
if not (file_url.startswith('https://') or file_url.startswith('http://')):
self.update_scroll("[!] Assuming protocol! Default value: 'http://'")
file_url = 'http://' + file_url
self.url.setText(file_url)
if not file_url.endswith('/') and file_url != "":
file_url += '/'
self.config["URL"] = file_url
# self.config["Cookies"] = self.cookies.getText()
# self.config["Headers"] = self.headers.getText()
del self.url_reqs[:]
self.curr_conf.setText(self.source_input)
# Window sizing functions
def resize(self, event):
"""Resizes the window to better fit Burp"""
if self.parent_window is not None:
par_size = self.parent_window.getSize()
par_size.setSize(par_size.getWidth() * .99,
par_size.getHeight() * .9)
self.tab.setPreferredSize(par_size)
self.parent_window.validate()
self.parent_window.switch_focus()
def p_close(self, event):
"""
Handles the window close event.
"""
self.window.setVisible(False)
self.window.dispose()
def p_build_ui(self, event):
"""
Adds a list of checkboxes, one for each loaded plugin
to the Selct plugins window
"""
if not self.loaded_p_list:
self.update_scroll("[!!] No plugins loaded!")
return
scroll_pane = JScrollPane()
scroll_pane.setPreferredSize(Dimension(200, 250))
check_frame = JPanel(GridBagLayout())
constraints = GridBagConstraints()
constraints.fill = GridBagConstraints.HORIZONTAL
constraints.gridy = 0
constraints.anchor = GridBagConstraints.FIRST_LINE_START
for plug in self.loaded_p_list:
check_frame.add(JCheckBox(plug.get_name(), plug.enabled,
actionPerformed=self.update_box),
constraints)
constraints.gridy += 1
vport = JViewport()
vport.setView(check_frame)
scroll_pane.setViewport(vport)
self.window.contentPane.add(scroll_pane)
self.window.pack()
self.window.setVisible(True)
def update_box(self, event):
"""
Handles the check/uncheck event for the plugin's box.
"""
for plug in self.loaded_p_list:
if plug.get_name() == event.getActionCommand():
plug.enabled = not plug.enabled
if plug.enabled:
self.update_scroll("[^] Enabled: %s" %
event.getActionCommand())
else:
self.update_scroll("[^] Disabled: %s" %
event.getActionCommand())
# ITab required functions
@staticmethod
def getTabCaption():
"""Returns the name of the Burp Suite Tab"""
return "SpyDir"
def getUiComponent(self):
"""Returns the UI component for the Burp Suite tab"""
return self.tab
class About(ITab):
"""Defines the About tab"""
def __init__(self, callbacks):
self._callbacks = callbacks
self.tab = JPanel(GridBagLayout())
about_constraints = GridBagConstraints()
about_author = (("<html><center><h2>SpyDir</h2><b>Version:</b> "
"%s<br/>Created by: <em>Ryan Reid</em>"
" (@_aur3lius)<br/>https://github.com/aur3lius-dev/"
"SpyDir</center><br/>")
% VERSION)
about_spydir = """<em><center>
SpyDir is an extension that assists in the enumeration of
application<br/>
endpoints via an input directory containing the application's<br/>
source code. It provides an option to process files as endpoints,<br/>
think: ASP, PHP, HTML, or parse files to attempt to enumerate<br/>
endpoints via plugins, think: MVC. Users may opt to send the<br/>
discovered endpoints directly to the Burp Spider.
</center></em><br/>
This tool is in <b>Alpha</b>! <b>Please</b> provide feedback on the
GitHub page!<br/><br/>"""
getting_started = """<b>Getting started:</b><br/>
<ul>
<li>Add a local source repository</li>
<li>Add the target URL</li>
<li>Use the String delimiter to construct the appropriate
directory path (if necessary)</li>
<li>Alternatively, parse each file by selecting plugins
and checking the checkbox</li>
<li>Explicitly define the file extensions to process</li>
<li>Parse the directory</li>
<li>Verify output is correct <b>before</b> sending to spider</li>
<li>Send requests to the Burp Spider</li>
</ul>
"""
advanced_info = r"""<html><b>String Delimiter</b><br/>
String Delimiter allows us to append the necessary section
of the folder structure.<br/>
Suppose the target application is hosted at the following URL:
https://localhost:8080. <br/>The target code base is stored in:
'C:\Source\TestApp'. <br/>Within the TestApp folder there is a
subfolder, 'views', with static .html files.<br/>
In this case the String Delimiter will need to equal 'TestApp'.
<br/>With the expectation that the tool will produce an example URL
will such as:<br/>https://localhost:8080/views/view1.html.<br/><br/>
<b>Path Vars</b><br/>Use this option to swap values for dynamically
enumerated query string parameters. This needs to be a JSON object.
<br/><b>Example:</b>{"{userID}": "aur3lius", "admin_status=":
"admin_status=True"}<br/><br/>
<b>Note:</b> String Delimiter is ignored if parsing files using
plugins!
</html>"""
about_constraints.anchor = GridBagConstraints.FIRST_LINE_START
about_constraints.weightx = 1.0
about_constraints.weighty = 1.0
self.tab.add(JLabel("%s\n%s\n%s\n%s" % (about_author, about_spydir,
getting_started,
advanced_info)),
about_constraints)
@staticmethod
def getTabCaption():
"""Returns name of tab for Burp UI"""
return "About"
def getUiComponent(self):
"""Returns UI component for Burp UI"""
return self.tab
class Plugin():
"""Defines attributes for loaded extensions"""
def __init__(self, plugin, enabled):
self.plug = plugin
self.name = plugin.get_name()
self.exts = plugin.get_ext()
self.enabled = enabled
def run(self, lines):
"""Runs the plugin"""
return self.plug.run(lines)
def get_name(self):
"""Returns the name of the plugin"""
return self.name
def get_ext(self):
"""Returns the extension of the plugin"""
return self.exts
| |
# Copyright (c) 2015, Florian Jung and Timm Weber
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from log import log
from collections import deque
import sys
import mechanics
class DummySubscriber:
def on_connect_error(self,s):
log("on conn err"+s)
def on_sock_open(self):
log("on sock open")
def on_cell_skin(self, skin_url):
pass
def on_sock_closed(self):
log("on sock closed")
def on_server_version(self, number, text):
pass
def on_message_error(self,s):
log("on msg err "+s)
def on_ingame(self):
log("we're ingame :)")
def on_world_update_pre(self):
log("updatepre")
def on_cell_eaten(self,eater_id, eaten_id):
log("%s ate %s" % (eater_id, eaten_id))
def on_death(self):
log("we died :(")
def on_cell_removed(self,cid):
log("cell removed")
def on_cell_info(self,cid, x,y, size, name, color, is_virus, is_agitated):
log("cell info")
def on_world_update_post(self):
log("updatepost")
def on_leaderboard_names(self,leaderboard):
#OAR WINDOWS
if sys.platform != "win32":
log("leaderboard names")
log(leaderboard)
def on_leaderboard_groups(self,angles):
log("leaderboard groups")
def on_respawn(self):
log("respawned")
def on_own_id(self,cid):
log("my id is %i" % cid)
def on_world_rect(self,left,top,right,bottom):
log("worldrect %i,%i,%i,%i"%(left,top,right,bottom))
def on_spectate_update(self,pos, scale):
log("spect update")
def on_experience_info(self,level, current_xp, next_xp):
log("exper info")
def on_clear_cells(self):
log("clear cells")
def on_debug_line(self,x,y):
log("debug line")
class CellHistory:
def __init__(self):
self.poslog = deque(maxlen=300)
self.stale = False
class OtherPlayer:
def __init__(self, playerid):
self.playerid = playerid
self.cells = set()
class EnhancingSubscriber(DummySubscriber):
def __init__(self):
self.c = None
self.history = {}
self.time = 0
self.victims = {}
self.newFrame = False
def isNewFrame(self):
tmp = self.newFrame
self.newFrame = False
return tmp
def set_client(self,c):
self.c = c
def cleanup_victims(self):
delete = []
for eater in self.victims:
self.victims[eater] = list(filter(lambda v : v[1] < self.time - 100, self.victims[eater]))
if len(self.victims[eater]) == 0:
delete += [eater]
for eater in delete:
del self.victims[eater]
def on_cell_eaten(self, eater_id, eaten_id):
if eater_id in self.c.world.cells and self.c.world.cells[eater_id].is_virus:
print("virus ate something!")
if eater_id not in self.victims:
self.victims[eater_id] = []
try:
self.victims[eater_id] += [(self.c.world.cells[eaten_id], self.time)]
except KeyError:
pass
def on_world_update_post(self):
self.newFrame = True
self.c.world.time = self.time
self.time += 1
if self.time % 100 == 0:
self.cleanup_victims()
# create and purge poslog history, movement and movement_angle
for cid in self.history:
self.history[cid].stale = True
for cid in self.c.world.cells:
if cid not in self.history:
self.history[cid] = CellHistory()
self.history[cid].poslog.append(self.c.world.cells[cid].pos.copy())
self.c.world.cells[cid].poslog = self.history[cid].poslog
self.history[cid].stale = False
self.history = {k: v for k, v in self.history.items() if v.stale == False}
for cid in self.c.world.cells:
cell = self.c.world.cells[cid]
if not hasattr(cell, "spawntime"):
cell.spawntime = self.c.world.time
try:
oldpos = cell.poslog[-3-1]
cell.movement = (cell.pos - oldpos)/3
cell.movement_angle = cell.movement.angle()
except (AttributeError, IndexError):
pass
# create OtherPlayer entries
otherplayers = {}
for cell in self.c.world.cells.values():
playerid = None
if not cell.is_food and not cell.is_ejected_mass and not cell.is_virus:
playerid = (cell.name, cell.color)
elif cell.is_virus:
playerid = "virus"
elif cell.is_food:
playerid = "food"
elif cell.is_ejected_mass:
playerid = "ejected mass"
else:
playerid = "???"
if playerid not in otherplayers:
otherplayers[playerid] = OtherPlayer(playerid)
cell.player = otherplayers[playerid]
cell.player.cells.add(cell)
# detect split cells and clean up obsolete parent references
for cell in self.c.world.cells.values():
# create attribute if not already there
try:
cell.parent = cell.parent
except:
cell.parent = None
cell.calmed_down = True
# clean up obsolete parent references
if cell.parent and cell.parent.cid not in self.c.world.cells:
cell.parent = None
# find split cells
is_split = False
if not cell.is_food and not cell.is_ejected_mass and not cell.is_virus:
try:
if cell.parent == None and cell.movement.len() > 2 * mechanics.speed(cell.size):
print("looks like a split!"+str(cell.movement.len() / mechanics.speed(cell.size)))
is_split = True
except AttributeError:
pass
if is_split:
history_len = len(cell.poslog)
cell.parent = min(cell.player.cells, key=lambda c : (c.poslog[-history_len] - cell.poslog[-history_len]).len() if c != cell and len(c.poslog) >= history_len else float('inf'))
try:
cell.shoot_vec = cell.parent.movement.copy()
except:
cell.shoot_vec = None
cell.calmed_down = False
elif cell.is_virus:
try:
if cell.parent == None and cell.movement.len() > 0:
print("split virus!")
is_split = True
except AttributeError:
pass
if is_split:
cell.parent = min(cell.player.cells, key=lambda c : (c.pos - cell.poslog[0]).len() if c != cell else float('inf'))
try:
last_feed = self.victims[cell.parent.cid][-1][0]
if not last_feed.is_ejected_mass:
print("wtf, last virus feed was not ejected mass?!")
raise KeyError
else:
cell.shoot_vec = cell.parent.pos - last_feed.poslog[0]
cell.shoot_vec2 = last_feed.poslog[-1] - last_feed.poslog[0]
try:
pos_when_shot = last_feed.parent.poslog[-len(last_feed.poslog)]
cell.shoot_vec3 = cell.parent.pos - pos_when_shot
except:
print("MOAAAHH")
cell.shoot_vec3 = None
except KeyError:
print("wtf, no last virus feed?!")
cell.shoot_vec = None
cell.shoot_vec2 = None
cell.shoot_vec3 = None
cell.calmed_down = False
elif cell.is_ejected_mass:
try:
if cell.parent == None and cell.movement.len() > 0:
print("ejected mass!")
is_split = True
except AttributeError:
pass
if is_split:
history_len = len(cell.poslog)
try:
cell.parent = min(filter(lambda c : not c.is_ejected_mass and not c.is_food and not c.is_virus and c.color == cell.color, self.c.world.cells.values()), key=lambda c : (c.poslog[-history_len] - cell.poslog[-history_len]).len() if len(c.poslog) >= history_len else float('inf'))
try:
cell.shoot_vec = cell.parent.movement.copy()
except:
cell.shoot_vec = None
cell.calmed_down = False
except ValueError:
# if no possible parents are found, min will raise a ValueError. ignore that.
pass
if is_split:
cell.spawnpoint = cell.pos.copy()
cell.parentsize_when_spawned = cell.parent.size if cell.parent != None else None
cell.parentpos_when_spawned = cell.parent.pos.copy() if cell.parent != None else None
| |
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django import forms
from django.core.urlresolvers import reverse
from django.core import management
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from datetime import datetime
from .models import (Player,
PlayerClass,
Tournament,
TournamentPlayer,
TournamentNewsItem,
TournamentOption,
TournamentPage,)
from django.core.mail import send_mass_mail
class EmailPlayersForm(forms.Form):
email_player_list = forms.BooleanField(
required=False,
initial=True)
email_waiting_list = forms.BooleanField(
required=False,
initial=True)
subject = forms.CharField()
body = forms.CharField(
widget=forms.Textarea()
)
sender = forms.CharField()
def get_emails(self, tournament, player_list, waiting_list):
if not player_list and not waiting_list:
return []
emails = []
if waiting_list:
for player in tournament.get_waiting_list():
if player.player.email:
emails.append(player.player.email)
if player_list:
for player in tournament.get_player_list():
if player.player.email:
emails.append(player.player.email)
return emails
def save(self, tournament):
messages = []
recipients = self.get_emails(
tournament,
self.cleaned_data['email_player_list'],
self.cleaned_data['email_waiting_list'])
for recipient in recipients:
messages.append((
self.cleaned_data['subject'],
self.cleaned_data['body'],
self.cleaned_data['sender'],
[recipient,]))
send_mass_mail(messages)
class TournamentPlayerForm(forms.ModelForm):
class Meta:
model = Player
exclude = ('user',)
class TournamentRegistrationForm(forms.ModelForm):
class Meta:
model = TournamentPlayer
exclude = ('tournament', 'player')
class TournamentForm(forms.ModelForm):
class Meta:
model = Tournament
exclude = (
'slug', 'user', 'registration_stages',)
class TournamentNewsItemForm(forms.ModelForm):
def __init__(self, *kargs, **kwargs):
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.form_class = 'edit-page-form'
if kwargs.get('instance'):
self.helper.form_action = reverse(
'tournament-news-edit', args=[
kwargs.get('instance').slug])
else:
self.helper.form_action = reverse(
'tournament-news-create')
super(TournamentNewsItemForm, self).__init__(*kargs, **kwargs)
class Meta:
model = TournamentNewsItem
fields = ('title', 'introduction', 'body', 'is_published',)
class TournamentPageForm(forms.ModelForm):
def __init__(self, *kargs, **kwargs):
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.form_class = 'edit-page-form'
if kwargs.get('instance'):
self.helper.form_action = reverse(
'tournament-page-edit', args=[
kwargs.get('instance').slug])
else:
self.helper.form_action = reverse(
'tournament-page-create')
super(TournamentPageForm, self).__init__(*kargs, **kwargs)
class Meta:
model = TournamentPage
fields = ('title', 'body', 'show_in_navigation',
'navigation_position')
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
fields = (
'pdga_number', 'name', 'club', 'email',
'phonenumber')
class RegistrationForm(PlayerForm):
player_class = forms.ChoiceField(
label=_('Player class'))
options = forms.MultipleChoiceField(
label=_('Options'),
widget=forms.widgets.CheckboxSelectMultiple,
required=False)
pdga_terms = forms.BooleanField(
label=_('Approve PDGA terms'),
help_text=_('You must approve the PDGA terms' +
'to register for this tournament.'))
is_reserved_player = forms.BooleanField(
label=_('Reserved player'),
help_text=_('I am granted a reserved spot based on last years results.'),
required=False)
class Meta:
model = Player
fields = ('player_class', 'is_reserved_player', 'pdga_number', 'pdga_terms',
'name', 'club', 'country', 'email', 'phonenumber', 'options')
def __init__(self, *kargs, **kwargs):
if 'tournament' in kwargs:
self.tournament = kwargs['tournament']
del kwargs['tournament']
super(RegistrationForm, self).__init__(*kargs, **kwargs)
# Set default country to Norway since all tourneys
# so far have been in Norway.
self.fields['country'].initial = 'NO'
# If there actually are no choices for the "options" field
# for this tournament, we just remove the field from the form
if self.tournament.tournamentoption_set.count() == 0:
del self.fields['options']
# Each tournament has a setting to toggle the PDGA terms field,
# so we must remove the field if it is turned off
if not self.tournament.pdga_rules_approval:
del self.fields['pdga_terms']
# Take care of choices for "options" field
if 'options' in self.fields.keys():
option_choices = []
for o in self.tournament.tournamentoption_set.filter(
is_available=True):
label = '%s - %d %s' % (o.name, o.price,
self.tournament.currency)
option_choices.append((o.id, label))
self.fields['options'].choices = option_choices
# Take care of choices for player class
self.fields['player_class'].choices = (('', '--'), )
# Make some changes for couples tourneys
if self.tournament.is_couples_tourney:
# Remove PDGA field if couples tourney
del self.fields['pdga_number']
# Change labels
self.fields['name'].label = _('Name player 1 / player 2')
self.fields['club'].label = _('Club player 1 / player 2')
if self.tournament.registration_stages:
stage = self.tournament.get_registration_stage()
if stage:
rating = '%s - %s'
self.fields['player_class'].choices += [
(c.player_class.id, rating % (
c.player_class.name,
c.get_class_price())
)
for c in stage.registrationstageclass_set.all()]
else:
self.fields['player_class'].choices += [
(c.id, '%s - %s' % (
c.name,
c.get_class_price(self.tournament))
) for c in self.tournament.classes.all()]
def save(self, *kargs, **kwargs):
player = super(RegistrationForm, self).save(*kwargs, **kwargs)
player_class = PlayerClass.objects.get(
id=self.cleaned_data['player_class'])
tp_kwargs = {
'tournament': self.tournament,
'player': player,
'player_class': player_class,
'registered': now(),
}
# Create TournamentPlayer
tp = TournamentPlayer.objects.create(**tp_kwargs)
# TournamentPlayer saved, lets save options
if 'options' in self.fields.keys():
options = []
for option_id in self.cleaned_data['options']:
try:
option = self.tournament.tournamentoption_set.get(
id=option_id)
except TournamentOption.DoesNotExist:
pass
else:
options.append(option)
tp.options = options
if tp.player.pdga_number:
# Run management command to update rank
management.call_command('pdgarank', tp.player.id)
return tp
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for processing Lammps output files:
1. log file: contains the thermodynamic data with the format set by the
'thermo_style' command
2. trajectory file(dump file): the file generated by the 'dump' command
Restrictions:
The first 2 fields of the ATOMS section in the trajectory(dump) file
must be the atom id and the atom type. There can be arbitrary number
of fields after that and they all will be treated as floats and
updated based on the field names in the ITEM: ATOMS line.
"""
import re
import os
from io import open
import numpy as np
from monty.json import MSONable
from pymatgen.core.periodic_table import _pt_data
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer
from pymatgen.io.lammps.data import LammpsData
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
__credits__ = "Navnidhi Rajput, Michael Humbert"
# TODO write parser for one and multi thermo_styles
class LammpsLog(MSONable):
"""
Parser for LAMMPS log file.
"""
def __init__(self, log_file="log.lammps"):
"""
Args:
log_file (string): path to the log file
"""
self.log_file = os.path.abspath(log_file)
self.timestep = -1
self._parse_log()
def _parse_log(self):
"""
Parse the log file for run and thermodynamic data.
Sets the thermodynamic data as a structured numpy array with field names
taken from the custom thermo_style command. thermo_style one and multi
are not supported yet
"""
thermo_data = []
fixes = []
d_build = None
thermo_pattern = None
with open(self.log_file, 'r') as logfile:
for line in logfile:
# timestep, the unit depedns on the 'units' command
time = re.search(r'timestep\s+([0-9]+)', line)
if time and not d_build:
self.timestep = float(time.group(1))
# total number md steps
steps = re.search(r'run\s+([0-9]+)', line)
if steps and not d_build:
self.nmdsteps = int(steps.group(1))
# simulation info
fix = re.search(r'fix.+', line)
if fix and not d_build:
fixes.append(fix.group())
# dangerous builds
danger = re.search(r'Dangerous builds\s+([0-9]+)', line)
if danger and not d_build:
d_build = int(steps.group(1))
# logging interval
thermo = re.search(r'thermo\s+([0-9]+)', line)
if thermo and not d_build:
self.interval = float(thermo.group(1))
# thermodynamic data, set by the thermo_style command
fmt = re.search(r'thermo_style.+', line)
if fmt and not d_build:
thermo_type = fmt.group().split()[1]
fields = fmt.group().split()[2:]
no_parse = ["one", "multi"]
if thermo_type in no_parse:
thermo_data.append("cannot parse thermo_style")
else:
thermo_pattern_string = r"\s*([0-9eE\.+-]+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields) - 1)])
thermo_pattern = re.compile(thermo_pattern_string)
if thermo_pattern:
if thermo_pattern.search(line):
m = thermo_pattern.search(line)
thermo_data.append(tuple([float(x) for x in m.groups()]))
if thermo_data:
if isinstance(thermo_data[0], str):
self.thermo_data = [thermo_data]
else:
# numpy arrays are easier to reshape, previously we used np.array with dtypes
self.thermo_data = {
fields[i]: [thermo_data[j][i] for j in range(len(thermo_data))]
for i in range(len(fields))}
self.fixes = fixes
self.dangerous_builds = d_build
def as_dict(self):
d = {}
for attrib in [a for a in dir(self)
if not a.startswith('__') and not callable(getattr(self, a))]:
d[attrib] = getattr(self, attrib)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(log_file=d["log_file"])
# TODO: @wood-b parse binary dump files(*.dcd)
class LammpsDump(MSONable):
"""
Parse lammps dump file.
"""
def __init__(self, timesteps, natoms, box_bounds, atoms_data):
self.timesteps = timesteps
self.natoms = natoms
self.box_bounds = box_bounds
self.atoms_data = atoms_data
@classmethod
def from_file(cls, dump_file):
timesteps = []
atoms_data = []
natoms = 0
box_bounds = []
bb_flag = 0
parse_timestep, parse_natoms, parse_bb, parse_atoms = False, False, False, False
with open(dump_file) as tf:
for line in tf:
if "ITEM: TIMESTEP" in line:
parse_timestep = True
continue
if parse_timestep:
timesteps.append(float(line))
parse_timestep = False
if "ITEM: NUMBER OF ATOMS" in line:
parse_natoms = True
continue
if parse_natoms:
natoms = int(line)
parse_natoms = False
if "ITEM: BOX BOUNDS" in line:
parse_bb = True
continue
if parse_bb:
box_bounds.append([float(x) for x in line.split()])
bb_flag += 1
parse_bb = False if bb_flag >= 3 else True
if "ITEM: ATOMS" in line:
parse_atoms = True
continue
if parse_atoms:
line_data = [float(x) for x in line.split()]
atoms_data.append(line_data)
parse_atoms = False if len(atoms_data) == len(timesteps)*natoms else True
return cls(timesteps, natoms, box_bounds, atoms_data)
# TODO: @wood-b simplify this, use LammpsDump to parse + use mdanalysis to process.
# make sure its backward compatible
class LammpsRun(MSONable):
"""
Parse the lammps data file, trajectory(dump) file and the log file to extract
useful info about the system.
Note: In order to parse trajectory or dump file, the first 2 fields must be
the id and the atom type. There can be arbitrary number of fields after
that and they all will be treated as floats.
Args:
data_file (str): path to the data file
trajectory_file (str): path to the trajectory file or dump file
log_file (str): path to the log file
"""
def __init__(self, data_file, trajectory_file, log_file="log.lammps"):
self.data_file = os.path.abspath(data_file)
self.trajectory_file = os.path.abspath(trajectory_file)
self.log_file = os.path.abspath(log_file)
self.log = LammpsLog(log_file)
self.lammps_data = LammpsData.from_file(self.data_file)
self._set_mol_masses_and_charges()
self._parse_trajectory()
def _parse_trajectory(self):
"""
parse the trajectory file.
"""
traj_timesteps = []
trajectory = []
timestep_label = "ITEM: TIMESTEP"
# "ITEM: ATOMS id type ...
traj_label_pattern = re.compile(
r"^\s*ITEM:\s+ATOMS\s+id\s+type\s+([A-Za-z0-9[\]_\s]*)")
# default: id type x y z vx vy vz mol"
# updated below based on the field names in the ITEM: ATOMS line
# Note: the first 2 fields must be the id and the atom type. There can
# be arbitrary number of fields after that and they all will be treated
# as floats.
traj_pattern = re.compile(
r"\s*(\d+)\s+(\d+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+"
r"([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+(\d+)\s*")
parse_timestep = False
with open(self.trajectory_file) as tf:
for line in tf:
if timestep_label in line:
parse_timestep = True
continue
if parse_timestep:
traj_timesteps.append(float(line))
parse_timestep = False
if traj_label_pattern.search(line):
fields = traj_label_pattern.search(line).group(1)
fields = fields.split()
# example:- id type x y z vx vy vz mol ...
traj_pattern_string = r"\s*(\d+)\s+(\d+)" + "".join(
[r"\s+([0-9eE\.+-]+)" for _ in range(len(fields))])
traj_pattern = re.compile(traj_pattern_string)
if traj_pattern.search(line):
# first 2 fields must be id and type, the rest of them
# will be casted as floats
m = traj_pattern.search(line)
line_data = []
line_data.append(int(m.group(1)) - 1)
line_data.append(int(m.group(2)))
line_data.extend(
[float(x) for i, x in enumerate(m.groups()) if
i + 1 > 2])
trajectory.append(tuple(line_data))
traj_dtype = np.dtype([(str('Atoms_id'), np.int64),
(str('atom_type'), np.int64)] +
[(str(fld), np.float64) for fld in fields])
self.trajectory = np.array(trajectory, dtype=traj_dtype)
self.timesteps = np.array(traj_timesteps, dtype=np.float64)
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
self.trajectory[begin:end] = np.sort(self.trajectory[begin:end],
order=str("Atoms_id"))
def _set_mol_masses_and_charges(self):
"""
set the charge, mass and the atomic makeup for each molecule
"""
mol_config = [] # [ [atom id1, atom id2, ...], ... ]
mol_masses = [] # [ [atom mass1, atom mass2, ...], ... ]
# mol_charges = []
unique_atomic_masses = self.lammps_data.masses["mass"].values
mol_ids = self.lammps_data.atoms["molecule-ID"]
atom_ids = self.lammps_data.atoms.index
atomic_types = self.lammps_data.atoms["type"]
unique_mol_ids = np.unique(mol_ids)
atomic_masses = unique_atomic_masses[np.array(atomic_types) - 1]
self.nmols = unique_mol_ids.size
for umid in range(self.nmols):
mol_config.append(np.array(atom_ids)[np.where(mol_ids == umid + 1)] - 1)
mol_masses.append(atomic_masses[np.where(mol_ids == umid + 1)])
self.mol_config = np.array(mol_config)
self.mol_masses = np.array(mol_masses)
def _weighted_average(self, mol_id, mol_vector):
"""
Calculate the weighted average of the array comprising of
atomic vectors corresponding to the molecule with id mol_id.
Args:
mol_id (int): molecule id
mol_vector (numpy array): array of shape,
natoms_in_molecule with id mol_id x 3
Returns:
1D numpy array(3 x 1) of weighted averages in x, y, z directions
"""
mol_masses = self.mol_masses[mol_id]
return np.array(
[np.dot(mol_vector[:, dim], mol_masses) / np.sum(mol_masses)
for dim in range(3)])
def _get_mol_vector(self, step, mol_id, param=("x", "y", "z")):
"""
Returns numpy array corresponding to atomic vectors of parameter
"param" for the given time step and molecule id
Args:
step (int): time step
mol_id (int): molecule id
param (list): the atomic parameter for which the weighted
average is to be computed
Returns:
2D numpy array(natoms_in_molecule x 3) of atomic vectors
"""
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][self.mol_config[mol_id]][param]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(new_shape)
return mol_vector.copy()
# TODO: remove this and use only get_displacements(an order of magnitude faster)
def get_structures_from_trajectory(self):
"""
Convert the coordinates in each time step to a structure(boxed molecule).
Used to construct DiffusionAnalyzer object.
Returns:
list of Structure objects
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
structures = []
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = np.array([d["mass"] for d in self.lammps_data.masses])
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(
new_shape)
coords = mol_vector.copy()
species = [mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
try:
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
except ValueError as error:
print("Error: '{}' at timestep {} in the trajectory".format(
error,
int(self.timesteps[step])))
structures.append(structure)
return structures
def get_displacements(self):
"""
Return the initial structure and displacements for each time step.
Used to interface with the DiffusionAnalyzer.
Returns:
Structure object, numpy array of displacements
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = np.array([d["mass"] for d in self.lammps_data.masses])
frac_coords = []
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(
new_shape)
coords = mol_vector.copy()
if step == 0:
species = [
mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
structure = Structure(lattice, species, coords,
coords_are_cartesian=True)
step_frac_coords = [lattice.get_fractional_coords(crd)
for crd in coords]
frac_coords.append(np.array(step_frac_coords)[:, None])
frac_coords = np.concatenate(frac_coords, axis=1)
dp = frac_coords[:, 1:] - frac_coords[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
disp = lattice.get_cartesian_coords(f_disp)
return structure, disp
def get_diffusion_analyzer(self, specie, temperature, time_step, step_skip,
**kwargs):
"""
Args:
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
For the other parameters please see the
pymatgen.analysis.diffusion_analyzer.DiffusionAnalyzer documentation.
Returns:
DiffusionAnalyzer
"""
# structures = self.get_structures_from_trajectory()
structure, disp = self.get_displacements()
return DiffusionAnalyzer(structure, disp, specie, temperature,
time_step, step_skip=step_skip,
**kwargs)
@property
def natoms(self):
return len(self.lammps_data.atoms)
@property
def box_lengths(self):
return [l[1] - l[0] for l in self.lammps_data.box_bounds]
@property
def traj_timesteps(self):
"""
trajectory time steps in time units.
e.g. for units = real, time units = fmsec
"""
return self.timesteps * self.log.timestep
@property
def mol_trajectory(self):
"""
Compute the weighted average trajectory of each molecule at each
timestep
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
traj = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_coords = self._get_mol_vector(step, mol_id,
param=["x", "y", "z"])
# take care of periodic boundary conditions
pbc_wrap(mol_coords, self.box_lengths)
tmp_mol.append(self._weighted_average(mol_id, mol_coords))
traj.append(tmp_mol)
return np.array(traj)
@property
def mol_velocity(self):
"""
Compute the weighted average velcoity of each molecule at each
timestep.
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
velocity = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_velocities = self._get_mol_vector(step, mol_id,
param=["vx", "vy", "vz"])
tmp_mol.append(self._weighted_average(mol_id, mol_velocities))
velocity.append(tmp_mol)
return np.array(velocity)
def as_dict(self):
d = {}
skip = ["mol_velocity", "mol_trajectory"] # not applicable in general
attributes = [a for a in dir(self) if a not in skip and not a.startswith('__')]
attributes = [a for a in attributes if not callable(getattr(self, a))]
for attrib in attributes:
obj = getattr(self, attrib)
if isinstance(obj, MSONable):
d[attrib] = obj.as_dict()
elif isinstance(obj, np.ndarray):
d[attrib] = obj.tolist()
else:
d[attrib] = obj
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
# not really needed ?
@classmethod
def from_dict(cls, d):
return cls(data_file=d["data_file"], trajectory_file=d["trajectory_file"],
log_file=d["log_file"])
def pbc_wrap(array, box_lengths):
"""
wrap the array for molecule coordinates around the periodic boundary.
Args:
array (numpy.ndarray): molecule coordinates, [[x1,y1,z1],[x2,y2,z2],..]
box_lengths (list): [x_length, y_length, z_length]
"""
ref = array[0, 0]
for i in range(3):
array[:, i] = np.where((array[:, i] - ref) >= box_lengths[i] / 2,
array[:, i] - box_lengths[i], array[:, i])
array[:, i] = np.where((array[:, i] - ref) < -box_lengths[i] / 2,
array[:, i] + box_lengths[i], array[:, i])
| |
# -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Micro Document Object Model: a partial DOM implementation with SUX.
This is an implementation of what we consider to be the useful subset of the
DOM. The chief advantage of this library is that, not being burdened with
standards compliance, it can remain very stable between versions. We can also
implement utility 'pythonic' ways to access and mutate the XML tree.
Since this has not subjected to a serious trial by fire, it is not recommended
to use this outside of Twisted applications. However, it seems to work just
fine for the documentation generator, which parses a fairly representative
sample of XML.
Microdom mainly focuses on working with HTML and XHTML.
"""
# System Imports
import re
from cStringIO import StringIO
from types import StringTypes, UnicodeType
# Twisted Imports
from twisted.web.sux import XMLParser, ParseError
from twisted.python.util import InsensitiveDict
def getElementsByTagName(iNode, name):
"""
Return a list of all child elements of C{iNode} with a name matching
C{name}.
Note that this implementation does not conform to the DOM Level 1 Core
specification because it may return C{iNode}.
@param iNode: An element at which to begin searching. If C{iNode} has a
name matching C{name}, it will be included in the result.
@param name: A C{str} giving the name of the elements to return.
@return: A C{list} of direct or indirect child elements of C{iNode} with
the name C{name}. This may include C{iNode}.
"""
matches = []
matches_append = matches.append # faster lookup. don't do this at home
slice = [iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
def getElementsByTagNameNoCase(iNode, name):
name = name.lower()
matches = []
matches_append = matches.append
slice=[iNode]
while len(slice)>0:
c = slice.pop(0)
if c.nodeName.lower() == name:
matches_append(c)
slice[:0] = c.childNodes
return matches
# order is important
HTML_ESCAPE_CHARS = (('&', '&'), # don't add any entities before this one
('<', '<'),
('>', '>'),
('"', '"'))
REV_HTML_ESCAPE_CHARS = list(HTML_ESCAPE_CHARS)
REV_HTML_ESCAPE_CHARS.reverse()
XML_ESCAPE_CHARS = HTML_ESCAPE_CHARS + (("'", '''),)
REV_XML_ESCAPE_CHARS = list(XML_ESCAPE_CHARS)
REV_XML_ESCAPE_CHARS.reverse()
def unescape(text, chars=REV_HTML_ESCAPE_CHARS):
"Perform the exact opposite of 'escape'."
for s, h in chars:
text = text.replace(h, s)
return text
def escape(text, chars=HTML_ESCAPE_CHARS):
"Escape a few XML special chars with XML entities."
for s, h in chars:
text = text.replace(s, h)
return text
class MismatchedTags(Exception):
def __init__(self, filename, expect, got, endLine, endCol, begLine, begCol):
(self.filename, self.expect, self.got, self.begLine, self.begCol, self.endLine,
self.endCol) = filename, expect, got, begLine, begCol, endLine, endCol
def __str__(self):
return ("expected </%s>, got </%s> line: %s col: %s, began line: %s col: %s"
% (self.expect, self.got, self.endLine, self.endCol, self.begLine,
self.begCol))
class Node(object):
nodeName = "Node"
def __init__(self, parentNode=None):
self.parentNode = parentNode
self.childNodes = []
def isEqualToNode(self, other):
"""
Compare this node to C{other}. If the nodes have the same number of
children and corresponding children are equal to each other, return
C{True}, otherwise return C{False}.
@type other: L{Node}
@rtype: C{bool}
"""
if len(self.childNodes) != len(other.childNodes):
return False
for a, b in zip(self.childNodes, other.childNodes):
if not a.isEqualToNode(b):
return False
return True
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
raise NotImplementedError()
def toxml(self, indent='', addindent='', newl='', strip=0, nsprefixes={},
namespace=''):
s = StringIO()
self.writexml(s, indent, addindent, newl, strip, nsprefixes, namespace)
rv = s.getvalue()
return rv
def writeprettyxml(self, stream, indent='', addindent=' ', newl='\n', strip=0):
return self.writexml(stream, indent, addindent, newl, strip)
def toprettyxml(self, indent='', addindent=' ', newl='\n', strip=0):
return self.toxml(indent, addindent, newl, strip)
def cloneNode(self, deep=0, parent=None):
raise NotImplementedError()
def hasChildNodes(self):
if self.childNodes:
return 1
else:
return 0
def appendChild(self, child):
"""
Make the given L{Node} the last child of this node.
@param child: The L{Node} which will become a child of this node.
@raise TypeError: If C{child} is not a C{Node} instance.
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
self.childNodes.append(child)
child.parentNode = self
def insertBefore(self, new, ref):
"""
Make the given L{Node} C{new} a child of this node which comes before
the L{Node} C{ref}.
@param new: A L{Node} which will become a child of this node.
@param ref: A L{Node} which is already a child of this node which
C{new} will be inserted before.
@raise TypeError: If C{new} or C{ref} is not a C{Node} instance.
@return: C{new}
"""
if not isinstance(new, Node) or not isinstance(ref, Node):
raise TypeError("expected Node instance")
i = self.childNodes.index(ref)
new.parentNode = self
self.childNodes.insert(i, new)
return new
def removeChild(self, child):
"""
Remove the given L{Node} from this node's children.
@param child: A L{Node} which is a child of this node which will no
longer be a child of this node after this method is called.
@raise TypeError: If C{child} is not a C{Node} instance.
@return: C{child}
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
if child in self.childNodes:
self.childNodes.remove(child)
child.parentNode = None
return child
def replaceChild(self, newChild, oldChild):
"""
Replace a L{Node} which is already a child of this node with a
different node.
@param newChild: A L{Node} which will be made a child of this node.
@param oldChild: A L{Node} which is a child of this node which will
give up its position to C{newChild}.
@raise TypeError: If C{newChild} or C{oldChild} is not a C{Node}
instance.
@raise ValueError: If C{oldChild} is not a child of this C{Node}.
"""
if not isinstance(newChild, Node) or not isinstance(oldChild, Node):
raise TypeError("expected Node instance")
if oldChild.parentNode is not self:
raise ValueError("oldChild is not a child of this node")
self.childNodes[self.childNodes.index(oldChild)] = newChild
oldChild.parentNode = None
newChild.parentNode = self
def lastChild(self):
return self.childNodes[-1]
def firstChild(self):
if len(self.childNodes):
return self.childNodes[0]
return None
#def get_ownerDocument(self):
# """This doesn't really get the owner document; microdom nodes
# don't even have one necessarily. This gets the root node,
# which is usually what you really meant.
# *NOT DOM COMPLIANT.*
# """
# node=self
# while (node.parentNode): node=node.parentNode
# return node
#ownerDocument=node.get_ownerDocument()
# leaving commented for discussion; see also domhelpers.getParents(node)
class Document(Node):
def __init__(self, documentElement=None):
Node.__init__(self)
if documentElement:
self.appendChild(documentElement)
def cloneNode(self, deep=0, parent=None):
d = Document()
d.doctype = self.doctype
if deep:
newEl = self.documentElement.cloneNode(1, self)
else:
newEl = self.documentElement
d.appendChild(newEl)
return d
doctype = None
def isEqualToDocument(self, n):
return (self.doctype == n.doctype) and Node.isEqualToNode(self, n)
isEqualToNode = isEqualToDocument
def get_documentElement(self):
return self.childNodes[0]
documentElement=property(get_documentElement)
def appendChild(self, child):
"""
Make the given L{Node} the I{document element} of this L{Document}.
@param child: The L{Node} to make into this L{Document}'s document
element.
@raise ValueError: If this document already has a document element.
"""
if self.childNodes:
raise ValueError("Only one element per document.")
Node.appendChild(self, child)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write('<?xml version="1.0"?>' + newl)
if self.doctype:
stream.write("<!DOCTYPE "+self.doctype+">" + newl)
self.documentElement.writexml(stream, indent, addindent, newl, strip,
nsprefixes, namespace)
# of dubious utility (?)
def createElement(self, name, **kw):
return Element(name, **kw)
def createTextNode(self, text):
return Text(text)
def createComment(self, text):
return Comment(text)
def getElementsByTagName(self, name):
if self.documentElement.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def getElementById(self, id):
childNodes = self.childNodes[:]
while childNodes:
node = childNodes.pop(0)
if node.childNodes:
childNodes.extend(node.childNodes)
if hasattr(node, 'getAttribute') and node.getAttribute("id") == id:
return node
class EntityReference(Node):
def __init__(self, eref, parentNode=None):
Node.__init__(self, parentNode)
self.eref = eref
self.nodeValue = self.data = "&" + eref + ";"
def isEqualToEntityReference(self, n):
if not isinstance(n, EntityReference):
return 0
return (self.eref == n.eref) and (self.nodeValue == n.nodeValue)
isEqualToNode = isEqualToEntityReference
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write(self.nodeValue)
def cloneNode(self, deep=0, parent=None):
return EntityReference(self.eref, parent)
class CharacterData(Node):
def __init__(self, data, parentNode=None):
Node.__init__(self, parentNode)
self.value = self.data = self.nodeValue = data
def isEqualToCharacterData(self, n):
return self.value == n.value
isEqualToNode = isEqualToCharacterData
class Comment(CharacterData):
"""A comment node."""
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
val=self.data
if isinstance(val, UnicodeType):
val=val.encode('utf8')
stream.write("<!--%s-->" % val)
def cloneNode(self, deep=0, parent=None):
return Comment(self.nodeValue, parent)
class Text(CharacterData):
def __init__(self, data, parentNode=None, raw=0):
CharacterData.__init__(self, data, parentNode)
self.raw = raw
def isEqualToNode(self, other):
"""
Compare this text to C{text}. If the underlying values and the C{raw}
flag are the same, return C{True}, otherwise return C{False}.
"""
return (
CharacterData.isEqualToNode(self, other) and
self.raw == other.raw)
def cloneNode(self, deep=0, parent=None):
return Text(self.nodeValue, parent, self.raw)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
if self.raw:
val = self.nodeValue
if not isinstance(val, StringTypes):
val = str(self.nodeValue)
else:
v = self.nodeValue
if not isinstance(v, StringTypes):
v = str(v)
if strip:
v = ' '.join(v.split())
val = escape(v)
if isinstance(val, UnicodeType):
val = val.encode('utf8')
stream.write(val)
def __repr__(self):
return "Text(%s" % repr(self.nodeValue) + ')'
class CDATASection(CharacterData):
def cloneNode(self, deep=0, parent=None):
return CDATASection(self.nodeValue, parent)
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
stream.write("<![CDATA[")
stream.write(self.nodeValue)
stream.write("]]>")
def _genprefix():
i = 0
while True:
yield 'p' + str(i)
i = i + 1
genprefix = _genprefix().next
class _Attr(CharacterData):
"Support class for getAttributeNode."
class Element(Node):
preserveCase = 0
caseInsensitive = 1
nsprefixes = None
def __init__(self, tagName, attributes=None, parentNode=None,
filename=None, markpos=None,
caseInsensitive=1, preserveCase=0,
namespace=None):
Node.__init__(self, parentNode)
self.preserveCase = preserveCase or not caseInsensitive
self.caseInsensitive = caseInsensitive
if not preserveCase:
tagName = tagName.lower()
if attributes is None:
self.attributes = {}
else:
self.attributes = attributes
for k, v in self.attributes.items():
self.attributes[k] = unescape(v)
if caseInsensitive:
self.attributes = InsensitiveDict(self.attributes,
preserve=preserveCase)
self.endTagName = self.nodeName = self.tagName = tagName
self._filename = filename
self._markpos = markpos
self.namespace = namespace
def addPrefixes(self, pfxs):
if self.nsprefixes is None:
self.nsprefixes = pfxs
else:
self.nsprefixes.update(pfxs)
def endTag(self, endTagName):
if not self.preserveCase:
endTagName = endTagName.lower()
self.endTagName = endTagName
def isEqualToElement(self, n):
if self.caseInsensitive:
return ((self.attributes == n.attributes)
and (self.nodeName.lower() == n.nodeName.lower()))
return (self.attributes == n.attributes) and (self.nodeName == n.nodeName)
def isEqualToNode(self, other):
"""
Compare this element to C{other}. If the C{nodeName}, C{namespace},
C{attributes}, and C{childNodes} are all the same, return C{True},
otherwise return C{False}.
"""
return (
self.nodeName.lower() == other.nodeName.lower() and
self.namespace == other.namespace and
self.attributes == other.attributes and
Node.isEqualToNode(self, other))
def cloneNode(self, deep=0, parent=None):
clone = Element(
self.tagName, parentNode=parent, namespace=self.namespace,
preserveCase=self.preserveCase, caseInsensitive=self.caseInsensitive)
clone.attributes.update(self.attributes)
if deep:
clone.childNodes = [child.cloneNode(1, clone) for child in self.childNodes]
else:
clone.childNodes = []
return clone
def getElementsByTagName(self, name):
if self.caseInsensitive:
return getElementsByTagNameNoCase(self, name)
return getElementsByTagName(self, name)
def hasAttributes(self):
return 1
def getAttribute(self, name, default=None):
return self.attributes.get(name, default)
def getAttributeNS(self, ns, name, default=None):
nsk = (ns, name)
if self.attributes.has_key(nsk):
return self.attributes[nsk]
if ns == self.namespace:
return self.attributes.get(name, default)
return default
def getAttributeNode(self, name):
return _Attr(self.getAttribute(name), self)
def setAttribute(self, name, attr):
self.attributes[name] = attr
def removeAttribute(self, name):
if name in self.attributes:
del self.attributes[name]
def hasAttribute(self, name):
return name in self.attributes
def writexml(self, stream, indent='', addindent='', newl='', strip=0,
nsprefixes={}, namespace=''):
"""
Serialize this L{Element} to the given stream.
@param stream: A file-like object to which this L{Element} will be
written.
@param nsprefixes: A C{dict} mapping namespace URIs as C{str} to
prefixes as C{str}. This defines the prefixes which are already in
scope in the document at the point at which this L{Element} exists.
This is essentially an implementation detail for namespace support.
Applications should not try to use it.
@param namespace: The namespace URI as a C{str} which is the default at
the point in the document at which this L{Element} exists. This is
essentially an implementation detail for namespace support.
Applications should not try to use it.
"""
# write beginning
ALLOWSINGLETON = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param',
'area', 'input', 'col', 'basefont', 'isindex',
'frame')
BLOCKELEMENTS = ('html', 'head', 'body', 'noscript', 'ins', 'del',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'script',
'ul', 'ol', 'dl', 'pre', 'hr', 'blockquote',
'address', 'p', 'div', 'fieldset', 'table', 'tr',
'form', 'object', 'fieldset', 'applet', 'map')
FORMATNICELY = ('tr', 'ul', 'ol', 'head')
# this should never be necessary unless people start
# changing .tagName on the fly(?)
if not self.preserveCase:
self.endTagName = self.tagName
w = stream.write
if self.nsprefixes:
newprefixes = self.nsprefixes.copy()
for ns in nsprefixes.keys():
if ns in newprefixes:
del newprefixes[ns]
else:
newprefixes = {}
begin = ['<']
if self.tagName in BLOCKELEMENTS:
begin = [newl, indent] + begin
bext = begin.extend
writeattr = lambda _atr, _val: bext((' ', _atr, '="', escape(_val), '"'))
# Make a local for tracking what end tag will be used. If namespace
# prefixes are involved, this will be changed to account for that
# before it's actually used.
endTagName = self.endTagName
if namespace != self.namespace and self.namespace is not None:
# If the current default namespace is not the namespace of this tag
# (and this tag has a namespace at all) then we'll write out
# something related to namespaces.
if self.namespace in nsprefixes:
# This tag's namespace already has a prefix bound to it. Use
# that prefix.
prefix = nsprefixes[self.namespace]
bext(prefix + ':' + self.tagName)
# Also make sure we use it for the end tag.
endTagName = prefix + ':' + self.endTagName
else:
# This tag's namespace has no prefix bound to it. Change the
# default namespace to this tag's namespace so we don't need
# prefixes. Alternatively, we could add a new prefix binding.
# I'm not sure why the code was written one way rather than the
# other. -exarkun
bext(self.tagName)
writeattr("xmlns", self.namespace)
# The default namespace just changed. Make sure any children
# know about this.
namespace = self.namespace
else:
# This tag has no namespace or its namespace is already the default
# namespace. Nothing extra to do here.
bext(self.tagName)
j = ''.join
for attr, val in self.attributes.iteritems():
if isinstance(attr, tuple):
ns, key = attr
if nsprefixes.has_key(ns):
prefix = nsprefixes[ns]
else:
prefix = genprefix()
newprefixes[ns] = prefix
assert val is not None
writeattr(prefix+':'+key,val)
else:
assert val is not None
writeattr(attr, val)
if newprefixes:
for ns, prefix in newprefixes.iteritems():
if prefix:
writeattr('xmlns:'+prefix, ns)
newprefixes.update(nsprefixes)
downprefixes = newprefixes
else:
downprefixes = nsprefixes
w(j(begin))
if self.childNodes:
w(">")
newindent = indent + addindent
for child in self.childNodes:
if self.tagName in BLOCKELEMENTS and \
self.tagName in FORMATNICELY:
w(j((newl, newindent)))
child.writexml(stream, newindent, addindent, newl, strip,
downprefixes, namespace)
if self.tagName in BLOCKELEMENTS:
w(j((newl, indent)))
w(j(('</', endTagName, '>')))
elif self.tagName.lower() not in ALLOWSINGLETON:
w(j(('></', endTagName, '>')))
else:
w(" />")
def __repr__(self):
rep = "Element(%s" % repr(self.nodeName)
if self.attributes:
rep += ", attributes=%r" % (self.attributes,)
if self._filename:
rep += ", filename=%r" % (self._filename,)
if self._markpos:
rep += ", markpos=%r" % (self._markpos,)
return rep + ')'
def __str__(self):
rep = "<" + self.nodeName
if self._filename or self._markpos:
rep += " ("
if self._filename:
rep += repr(self._filename)
if self._markpos:
rep += " line %s column %s" % self._markpos
if self._filename or self._markpos:
rep += ")"
for item in self.attributes.items():
rep += " %s=%r" % item
if self.hasChildNodes():
rep += " >...</%s>" % self.nodeName
else:
rep += " />"
return rep
def _unescapeDict(d):
dd = {}
for k, v in d.items():
dd[k] = unescape(v)
return dd
def _reverseDict(d):
dd = {}
for k, v in d.items():
dd[v]=k
return dd
class MicroDOMParser(XMLParser):
# <dash> glyph: a quick scan thru the DTD says BODY, AREA, LINK, IMG, HR,
# P, DT, DD, LI, INPUT, OPTION, THEAD, TFOOT, TBODY, COLGROUP, COL, TR, TH,
# TD, HEAD, BASE, META, HTML all have optional closing tags
soonClosers = 'area link br img hr input base meta'.split()
laterClosers = {'p': ['p', 'dt'],
'dt': ['dt','dd'],
'dd': ['dt', 'dd'],
'li': ['li'],
'tbody': ['thead', 'tfoot', 'tbody'],
'thead': ['thead', 'tfoot', 'tbody'],
'tfoot': ['thead', 'tfoot', 'tbody'],
'colgroup': ['colgroup'],
'col': ['col'],
'tr': ['tr'],
'td': ['td'],
'th': ['th'],
'head': ['body'],
'title': ['head', 'body'], # this looks wrong...
'option': ['option'],
}
def __init__(self, beExtremelyLenient=0, caseInsensitive=1, preserveCase=0,
soonClosers=soonClosers, laterClosers=laterClosers):
self.elementstack = []
d = {'xmlns': 'xmlns', '': None}
dr = _reverseDict(d)
self.nsstack = [(d,None,dr)]
self.documents = []
self._mddoctype = None
self.beExtremelyLenient = beExtremelyLenient
self.caseInsensitive = caseInsensitive
self.preserveCase = preserveCase or not caseInsensitive
self.soonClosers = soonClosers
self.laterClosers = laterClosers
# self.indentlevel = 0
def shouldPreserveSpace(self):
for edx in xrange(len(self.elementstack)):
el = self.elementstack[-edx]
if el.tagName == 'pre' or el.getAttribute("xml:space", '') == 'preserve':
return 1
return 0
def _getparent(self):
if self.elementstack:
return self.elementstack[-1]
else:
return None
COMMENT = re.compile(r"\s*/[/*]\s*")
def _fixScriptElement(self, el):
# this deals with case where there is comment or CDATA inside
# <script> tag and we want to do the right thing with it
if not self.beExtremelyLenient or not len(el.childNodes) == 1:
return
c = el.firstChild()
if isinstance(c, Text):
# deal with nasty people who do stuff like:
# <script> // <!--
# x = 1;
# // --></script>
# tidy does this, for example.
prefix = ""
oldvalue = c.value
match = self.COMMENT.match(oldvalue)
if match:
prefix = match.group()
oldvalue = oldvalue[len(prefix):]
# now see if contents are actual node and comment or CDATA
try:
e = parseString("<a>%s</a>" % oldvalue).childNodes[0]
except (ParseError, MismatchedTags):
return
if len(e.childNodes) != 1:
return
e = e.firstChild()
if isinstance(e, (CDATASection, Comment)):
el.childNodes = []
if prefix:
el.childNodes.append(Text(prefix))
el.childNodes.append(e)
def gotDoctype(self, doctype):
self._mddoctype = doctype
def gotTagStart(self, name, attributes):
# print ' '*self.indentlevel, 'start tag',name
# self.indentlevel += 1
parent = self._getparent()
if (self.beExtremelyLenient and isinstance(parent, Element)):
parentName = parent.tagName
myName = name
if self.caseInsensitive:
parentName = parentName.lower()
myName = myName.lower()
if myName in self.laterClosers.get(parentName, []):
self.gotTagEnd(parent.tagName)
parent = self._getparent()
attributes = _unescapeDict(attributes)
namespaces = self.nsstack[-1][0]
newspaces = {}
for k, v in attributes.items():
if k.startswith('xmlns'):
spacenames = k.split(':',1)
if len(spacenames) == 2:
newspaces[spacenames[1]] = v
else:
newspaces[''] = v
del attributes[k]
if newspaces:
namespaces = namespaces.copy()
namespaces.update(newspaces)
for k, v in attributes.items():
ksplit = k.split(':', 1)
if len(ksplit) == 2:
pfx, tv = ksplit
if pfx != 'xml' and pfx in namespaces:
attributes[namespaces[pfx], tv] = v
del attributes[k]
el = Element(name, attributes, parent,
self.filename, self.saveMark(),
caseInsensitive=self.caseInsensitive,
preserveCase=self.preserveCase,
namespace=namespaces.get(''))
revspaces = _reverseDict(newspaces)
el.addPrefixes(revspaces)
if newspaces:
rscopy = self.nsstack[-1][2].copy()
rscopy.update(revspaces)
self.nsstack.append((namespaces, el, rscopy))
self.elementstack.append(el)
if parent:
parent.appendChild(el)
if (self.beExtremelyLenient and el.tagName in self.soonClosers):
self.gotTagEnd(name)
def _gotStandalone(self, factory, data):
parent = self._getparent()
te = factory(data, parent)
if parent:
parent.appendChild(te)
elif self.beExtremelyLenient:
self.documents.append(te)
def gotText(self, data):
if data.strip() or self.shouldPreserveSpace():
self._gotStandalone(Text, data)
def gotComment(self, data):
self._gotStandalone(Comment, data)
def gotEntityReference(self, entityRef):
self._gotStandalone(EntityReference, entityRef)
def gotCData(self, cdata):
self._gotStandalone(CDATASection, cdata)
def gotTagEnd(self, name):
# print ' '*self.indentlevel, 'end tag',name
# self.indentlevel -= 1
if not self.elementstack:
if self.beExtremelyLenient:
return
raise MismatchedTags(*((self.filename, "NOTHING", name)
+self.saveMark()+(0,0)))
el = self.elementstack.pop()
pfxdix = self.nsstack[-1][2]
if self.nsstack[-1][1] is el:
nstuple = self.nsstack.pop()
else:
nstuple = None
if self.caseInsensitive:
tn = el.tagName.lower()
cname = name.lower()
else:
tn = el.tagName
cname = name
nsplit = name.split(':',1)
if len(nsplit) == 2:
pfx, newname = nsplit
ns = pfxdix.get(pfx,None)
if ns is not None:
if el.namespace != ns:
if not self.beExtremelyLenient:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
if not (tn == cname):
if self.beExtremelyLenient:
if self.elementstack:
lastEl = self.elementstack[0]
for idx in xrange(len(self.elementstack)):
if self.elementstack[-(idx+1)].tagName == cname:
self.elementstack[-(idx+1)].endTag(name)
break
else:
# this was a garbage close tag; wait for a real one
self.elementstack.append(el)
if nstuple is not None:
self.nsstack.append(nstuple)
return
del self.elementstack[-(idx+1):]
if not self.elementstack:
self.documents.append(lastEl)
return
else:
raise MismatchedTags(*((self.filename, el.tagName, name)
+self.saveMark()+el._markpos))
el.endTag(name)
if not self.elementstack:
self.documents.append(el)
if self.beExtremelyLenient and el.tagName == "script":
self._fixScriptElement(el)
def connectionLost(self, reason):
XMLParser.connectionLost(self, reason) # This can cause more events!
if self.elementstack:
if self.beExtremelyLenient:
self.documents.append(self.elementstack[0])
else:
raise MismatchedTags(*((self.filename, self.elementstack[-1],
"END_OF_FILE")
+self.saveMark()
+self.elementstack[-1]._markpos))
def parse(readable, *args, **kwargs):
"""Parse HTML or XML readable."""
if not hasattr(readable, "read"):
readable = open(readable, "rb")
mdp = MicroDOMParser(*args, **kwargs)
mdp.filename = getattr(readable, "name", "<xmlfile />")
mdp.makeConnection(None)
if hasattr(readable,"getvalue"):
mdp.dataReceived(readable.getvalue())
else:
r = readable.read(1024)
while r:
mdp.dataReceived(r)
r = readable.read(1024)
mdp.connectionLost(None)
if not mdp.documents:
raise ParseError(mdp.filename, 0, 0, "No top-level Nodes in document")
if mdp.beExtremelyLenient:
if len(mdp.documents) == 1:
d = mdp.documents[0]
if not isinstance(d, Element):
el = Element("html")
el.appendChild(d)
d = el
else:
d = Element("html")
for child in mdp.documents:
d.appendChild(child)
else:
d = mdp.documents[0]
doc = Document(d)
doc.doctype = mdp._mddoctype
return doc
def parseString(st, *args, **kw):
if isinstance(st, UnicodeType):
# this isn't particularly ideal, but it does work.
return parse(StringIO(st.encode('UTF-16')), *args, **kw)
return parse(StringIO(st), *args, **kw)
def parseXML(readable):
"""Parse an XML readable object."""
return parse(readable, caseInsensitive=0, preserveCase=1)
def parseXMLString(st):
"""Parse an XML readable object."""
return parseString(st, caseInsensitive=0, preserveCase=1)
# Utility
class lmx:
"""Easy creation of XML."""
def __init__(self, node='div'):
if isinstance(node, StringTypes):
node = Element(node)
self.node = node
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError("no private attrs")
return lambda **kw: self.add(name,**kw)
def __setitem__(self, key, val):
self.node.setAttribute(key, val)
def __getitem__(self, key):
return self.node.getAttribute(key)
def text(self, txt, raw=0):
nn = Text(txt, raw=raw)
self.node.appendChild(nn)
return self
def add(self, tagName, **kw):
newNode = Element(tagName, caseInsensitive=0, preserveCase=0)
self.node.appendChild(newNode)
xf = lmx(newNode)
for k, v in kw.items():
if k[0] == '_':
k = k[1:]
xf[k]=v
return xf
| |
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2012 Robert Yang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, logging, re, sys
import bb
logger = logging.getLogger("BitBake.Monitor")
def printErr(info):
logger.error("%s\n Disk space monitor will NOT be enabled" % info)
def convertGMK(unit):
""" Convert the space unit G, M, K, the unit is case-insensitive """
unitG = re.match('([1-9][0-9]*)[gG]\s?$', unit)
if unitG:
return int(unitG.group(1)) * (1024 ** 3)
unitM = re.match('([1-9][0-9]*)[mM]\s?$', unit)
if unitM:
return int(unitM.group(1)) * (1024 ** 2)
unitK = re.match('([1-9][0-9]*)[kK]\s?$', unit)
if unitK:
return int(unitK.group(1)) * 1024
unitN = re.match('([1-9][0-9]*)\s?$', unit)
if unitN:
return int(unitN.group(1))
else:
return None
def getMountedDev(path):
""" Get the device mounted at the path, uses /proc/mounts """
# Get the mount point of the filesystem containing path
# st_dev is the ID of device containing file
parentDev = os.stat(path).st_dev
currentDev = parentDev
# When the current directory's device is different from the
# parent's, then the current directory is a mount point
while parentDev == currentDev:
mountPoint = path
# Use dirname to get the parent's directory
path = os.path.dirname(path)
# Reach the "/"
if path == mountPoint:
break
parentDev= os.stat(path).st_dev
try:
with open("/proc/mounts", "r") as ifp:
for line in ifp:
procLines = line.rstrip('\n').split()
if procLines[1] == mountPoint:
return procLines[0]
except EnvironmentError:
pass
return None
def getDiskData(BBDirs, configuration):
"""Prepare disk data for disk space monitor"""
# Save the device IDs, need the ID to be unique (the dictionary's key is
# unique), so that when more than one directory is located on the same
# device, we just monitor it once
devDict = {}
for pathSpaceInode in BBDirs.split():
# The input format is: "dir,space,inode", dir is a must, space
# and inode are optional
pathSpaceInodeRe = re.match('([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode)
if not pathSpaceInodeRe:
printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode)
return None
action = pathSpaceInodeRe.group(1)
if action not in ("ABORT", "STOPTASKS", "WARN"):
printErr("Unknown disk space monitor action: %s" % action)
return None
path = os.path.realpath(pathSpaceInodeRe.group(2))
if not path:
printErr("Invalid path value in BB_DISKMON_DIRS: %s" % pathSpaceInode)
return None
# The disk space or inode is optional, but it should have a correct
# value once it is specified
minSpace = pathSpaceInodeRe.group(3)
if minSpace:
minSpace = convertGMK(minSpace)
if not minSpace:
printErr("Invalid disk space value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(3))
return None
else:
# None means that it is not specified
minSpace = None
minInode = pathSpaceInodeRe.group(4)
if minInode:
minInode = convertGMK(minInode)
if not minInode:
printErr("Invalid inode value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(4))
return None
else:
# None means that it is not specified
minInode = None
if minSpace is None and minInode is None:
printErr("No disk space or inode value in found BB_DISKMON_DIRS: %s" % pathSpaceInode)
return None
# mkdir for the directory since it may not exist, for example the
# DL_DIR may not exist at the very beginning
if not os.path.exists(path):
bb.utils.mkdirhier(path)
dev = getMountedDev(path)
# Use path/action as the key
devDict[os.path.join(path, action)] = [dev, minSpace, minInode]
return devDict
def getInterval(configuration):
""" Get the disk space interval """
# The default value is 50M and 5K.
spaceDefault = 50 * 1024 * 1024
inodeDefault = 5 * 1024
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True)
if not interval:
return spaceDefault, inodeDefault
else:
# The disk space or inode interval is optional, but it should
# have a correct value once it is specified
intervalRe = re.match('([^,]*),?\s*(.*)', interval)
if intervalRe:
intervalSpace = intervalRe.group(1)
if intervalSpace:
intervalSpace = convertGMK(intervalSpace)
if not intervalSpace:
printErr("Invalid disk space interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(1))
return None, None
else:
intervalSpace = spaceDefault
intervalInode = intervalRe.group(2)
if intervalInode:
intervalInode = convertGMK(intervalInode)
if not intervalInode:
printErr("Invalid disk inode interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(2))
return None, None
else:
intervalInode = inodeDefault
return intervalSpace, intervalInode
else:
printErr("Invalid interval value in BB_DISKMON_WARNINTERVAL: %s" % interval)
return None, None
class diskMonitor:
"""Prepare the disk space monitor data"""
def __init__(self, configuration):
self.enableMonitor = False
self.configuration = configuration
BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
if BBDirs:
self.devDict = getDiskData(BBDirs, configuration)
if self.devDict:
self.spaceInterval, self.inodeInterval = getInterval(configuration)
if self.spaceInterval and self.inodeInterval:
self.enableMonitor = True
# These are for saving the previous disk free space and inode, we
# use them to avoid printing too many warning messages
self.preFreeS = {}
self.preFreeI = {}
# This is for STOPTASKS and ABORT, to avoid printing the message
# repeatedly while waiting for the tasks to finish
self.checked = {}
for k in self.devDict:
self.preFreeS[k] = 0
self.preFreeI[k] = 0
self.checked[k] = False
if self.spaceInterval is None and self.inodeInterval is None:
self.enableMonitor = False
def check(self, rq):
""" Take action for the monitor """
if self.enableMonitor:
for k in self.devDict:
path = os.path.dirname(k)
action = os.path.basename(k)
dev = self.devDict[k][0]
minSpace = self.devDict[k][1]
minInode = self.devDict[k][2]
st = os.statvfs(path)
# The free space, float point number
freeSpace = st.f_bavail * st.f_frsize
if minSpace and freeSpace < minSpace:
# Always show warning, the self.checked would always be False if the action is WARN
if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
logger.warn("The free space of %s (%s) is running low (%.3fGB left)" % \
(path, dev, freeSpace / 1024 / 1024 / 1024.0))
self.preFreeS[k] = freeSpace
if action == "STOPTASKS" and not self.checked[k]:
logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!")
self.checked[k] = True
rq.finish_runqueue(False)
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
elif action == "ABORT" and not self.checked[k]:
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
self.checked[k] = True
rq.finish_runqueue(True)
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
# The free inodes, float point number
freeInode = st.f_favail
if minInode and freeInode < minInode:
# Some filesystems use dynamic inodes so can't run out
# (e.g. btrfs). This is reported by the inode count being 0.
if st.f_files == 0:
self.devDict[k][2] = None
continue
# Always show warning, the self.checked would always be False if the action is WARN
if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]:
logger.warn("The free inode of %s (%s) is running low (%.3fK left)" % \
(path, dev, freeInode / 1024.0))
self.preFreeI[k] = freeInode
if action == "STOPTASKS" and not self.checked[k]:
logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!")
self.checked[k] = True
rq.finish_runqueue(False)
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
elif action == "ABORT" and not self.checked[k]:
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
self.checked[k] = True
rq.finish_runqueue(True)
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
return
| |
""" Cisco_IOS_XR_ppp_ea_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ppp\-ea package operational data.
This module contains definitions
for the following management objects\:
pppea\: PPPEA operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PppEaAdjStateEnum(Enum):
"""
PppEaAdjStateEnum
Ppp ea adj state
.. data:: ppp_ea_adj_state_not_installed = 0
Ajacency not installed in AIB
.. data:: ppp_ea_adj_state_installed = 1
Adjacency installed in AIB
"""
ppp_ea_adj_state_not_installed = 0
ppp_ea_adj_state_installed = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['PppEaAdjStateEnum']
class Pppea(object):
"""
PPPEA operational data
.. attribute:: nodes
Per node PPPEA operational data
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.Pppea.Nodes>`
"""
_prefix = 'ppp-ea-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Pppea.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
Per node PPPEA operational data
.. attribute:: node
The PPPEA operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.Pppea.Nodes.Node>`
"""
_prefix = 'ppp-ea-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
The PPPEA operational data for a particular
node
.. attribute:: node_name <key>
The identifier for the node
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: ea_interface_names
Show interface related information from the PPP EA
**type**\: :py:class:`EaInterfaceNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.Pppea.Nodes.Node.EaInterfaceNames>`
"""
_prefix = 'ppp-ea-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.ea_interface_names = Pppea.Nodes.Node.EaInterfaceNames()
self.ea_interface_names.parent = self
class EaInterfaceNames(object):
"""
Show interface related information from the
PPP EA
.. attribute:: ea_interface_name
Interface name
**type**\: list of :py:class:`EaInterfaceName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.Pppea.Nodes.Node.EaInterfaceNames.EaInterfaceName>`
"""
_prefix = 'ppp-ea-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ea_interface_name = YList()
self.ea_interface_name.parent = self
self.ea_interface_name.name = 'ea_interface_name'
class EaInterfaceName(object):
"""
Interface name
.. attribute:: interface_name <key>
Interface running PPPEA
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: echo_request_interval
Echo\-Request interval
**type**\: int
**range:** 0..4294967295
.. attribute:: echo_request_retry_count
Echo\-Request retry count
**type**\: int
**range:** 0..4294967295
.. attribute:: forwarding_enabled
Forwarding State
**type**\: bool
.. attribute:: interface
Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: interface_adjacency_state
Interface adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: ipv4_adjacency_state
Ipv4 adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: ipv6_adjacency_state
IPv6 adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: ipv6vrf_table_id
IPv6CP VRF Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: is_ipcp_running
TRUE if IPCP is running in the dataplane for the interface
**type**\: bool
.. attribute:: is_ipv6cp_running
TRUE if IPV6CP is running in the dataplane for the interface
**type**\: bool
.. attribute:: is_lcp_running
TRUE if LCP is running in the dataplane for the interface
**type**\: bool
.. attribute:: is_mplscp_running
TRUE if MPLSCP is running in the dataplane for the interface
**type**\: bool
.. attribute:: is_multilink_bundle
TRUE if this is a Multilink bundle interface
**type**\: bool
.. attribute:: is_vpdn_tunneled
Is VPDN tunneled
**type**\: bool
.. attribute:: l2_adjacency_state
L2 adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: l2_provisioned
L2 Provisioned State
**type**\: bool
.. attribute:: l2_tunnel_enabled
L2 Tunnel State
**type**\: bool
.. attribute:: l2ip_interworking_adjacency_state
L2 IP Interworking adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: l2ip_interworking_enabled
L2 IP Interworking State
**type**\: bool
.. attribute:: lac_adjacency_state
LAC adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: local_magic
Local magic number
**type**\: int
**range:** 0..4294967295
.. attribute:: local_mcmp_classes
Local number of MCMP Suspension classes
**type**\: int
**range:** 0..255
.. attribute:: local_mrru
Local MRRU
**type**\: int
**range:** 0..65535
.. attribute:: local_mtu
Local interface MTU
**type**\: int
**range:** 0..65535
.. attribute:: mpls_adjacency_state
MPLS adjacency state
**type**\: :py:class:`PppEaAdjStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ea_oper.PppEaAdjStateEnum>`
.. attribute:: multilink_interface
Multilink interface that this interface is a member of, if any
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: parent_interface_handle
Parent Interface Handle
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: peer_magic
Peer magic number
**type**\: int
**range:** 0..4294967295
.. attribute:: peer_mcmp_classes
Peer number of MCMP Suspension classes
**type**\: int
**range:** 0..255
.. attribute:: peer_mrru
Peer MRRU
**type**\: int
**range:** 0..65535
.. attribute:: synchronized
MA synchronization
**type**\: bool
.. attribute:: vrf_table_id
IPCP VRF Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: xconnect_id
XConnect ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ppp-ea-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.echo_request_interval = None
self.echo_request_retry_count = None
self.forwarding_enabled = None
self.interface = None
self.interface_adjacency_state = None
self.ipv4_adjacency_state = None
self.ipv6_adjacency_state = None
self.ipv6vrf_table_id = None
self.is_ipcp_running = None
self.is_ipv6cp_running = None
self.is_lcp_running = None
self.is_mplscp_running = None
self.is_multilink_bundle = None
self.is_vpdn_tunneled = None
self.l2_adjacency_state = None
self.l2_provisioned = None
self.l2_tunnel_enabled = None
self.l2ip_interworking_adjacency_state = None
self.l2ip_interworking_enabled = None
self.lac_adjacency_state = None
self.local_magic = None
self.local_mcmp_classes = None
self.local_mrru = None
self.local_mtu = None
self.mpls_adjacency_state = None
self.multilink_interface = None
self.parent_interface_handle = None
self.peer_magic = None
self.peer_mcmp_classes = None
self.peer_mrru = None
self.synchronized = None
self.vrf_table_id = None
self.xconnect_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ppp-ea-oper:ea-interface-name[Cisco-IOS-XR-ppp-ea-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.echo_request_interval is not None:
return True
if self.echo_request_retry_count is not None:
return True
if self.forwarding_enabled is not None:
return True
if self.interface is not None:
return True
if self.interface_adjacency_state is not None:
return True
if self.ipv4_adjacency_state is not None:
return True
if self.ipv6_adjacency_state is not None:
return True
if self.ipv6vrf_table_id is not None:
return True
if self.is_ipcp_running is not None:
return True
if self.is_ipv6cp_running is not None:
return True
if self.is_lcp_running is not None:
return True
if self.is_mplscp_running is not None:
return True
if self.is_multilink_bundle is not None:
return True
if self.is_vpdn_tunneled is not None:
return True
if self.l2_adjacency_state is not None:
return True
if self.l2_provisioned is not None:
return True
if self.l2_tunnel_enabled is not None:
return True
if self.l2ip_interworking_adjacency_state is not None:
return True
if self.l2ip_interworking_enabled is not None:
return True
if self.lac_adjacency_state is not None:
return True
if self.local_magic is not None:
return True
if self.local_mcmp_classes is not None:
return True
if self.local_mrru is not None:
return True
if self.local_mtu is not None:
return True
if self.mpls_adjacency_state is not None:
return True
if self.multilink_interface is not None:
return True
if self.parent_interface_handle is not None:
return True
if self.peer_magic is not None:
return True
if self.peer_mcmp_classes is not None:
return True
if self.peer_mrru is not None:
return True
if self.synchronized is not None:
return True
if self.vrf_table_id is not None:
return True
if self.xconnect_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['Pppea.Nodes.Node.EaInterfaceNames.EaInterfaceName']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ppp-ea-oper:ea-interface-names'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ea_interface_name is not None:
for child_ref in self.ea_interface_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['Pppea.Nodes.Node.EaInterfaceNames']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-ppp-ea-oper:pppea/Cisco-IOS-XR-ppp-ea-oper:nodes/Cisco-IOS-XR-ppp-ea-oper:node[Cisco-IOS-XR-ppp-ea-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node_name is not None:
return True
if self.ea_interface_names is not None and self.ea_interface_names._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['Pppea.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ppp-ea-oper:pppea/Cisco-IOS-XR-ppp-ea-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['Pppea.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ppp-ea-oper:pppea'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ppp_ea_oper as meta
return meta._meta_table['Pppea']['meta_info']
| |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python command line interface for converting TF models to TFLite models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import warnings
import six
from six.moves import zip
from tensorflow.lite.python import lite
from tensorflow.lite.python.convert import register_custom_opdefs
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco.logging import gen_html
from tensorflow.python import tf2
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.util import keras_deps
def _parse_array(values, type_fn=str):
if values is not None:
return [type_fn(val) for val in six.ensure_str(values).split(",") if val]
return None
def _parse_set(values):
if values is not None:
return set([item for item in six.ensure_str(values).split(",") if item])
return None
def _parse_inference_type(value, flag):
"""Converts the inference type to the value of the constant.
Args:
value: str representing the inference type.
flag: str representing the flag name.
Returns:
tf.dtype.
Raises:
ValueError: Unsupported value.
"""
if value == "FLOAT":
return dtypes.float32
if value == "INT8":
return dtypes.int8
if value == "UINT8" or value == "QUANTIZED_UINT8":
return dtypes.uint8
raise ValueError(
"Unsupported value for `{}` flag. Expected FLOAT, INT8 or UINT8, instead "
"got {}.".format(flag, value))
def _get_tflite_converter(flags):
"""Makes a TFLiteConverter object based on the flags provided.
Args:
flags: argparse.Namespace object containing TFLite flags.
Returns:
TFLiteConverter object.
Raises:
ValueError: Invalid flags.
"""
# Parse input and output arrays.
input_arrays = _parse_array(flags.input_arrays)
input_shapes = None
if flags.input_shapes:
input_shapes_list = [
_parse_array(shape, type_fn=int)
for shape in six.ensure_str(flags.input_shapes).split(":")
]
input_shapes = dict(list(zip(input_arrays, input_shapes_list)))
output_arrays = _parse_array(flags.output_arrays)
converter_kwargs = {
"input_arrays": input_arrays,
"input_shapes": input_shapes,
"output_arrays": output_arrays
}
# Create TFLiteConverter.
if flags.graph_def_file:
converter_fn = lite.TFLiteConverter.from_frozen_graph
converter_kwargs["graph_def_file"] = flags.graph_def_file
elif flags.saved_model_dir:
converter_fn = lite.TFLiteConverter.from_saved_model
converter_kwargs["saved_model_dir"] = flags.saved_model_dir
converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
converter_kwargs["signature_key"] = flags.saved_model_signature_key
elif flags.keras_model_file:
converter_fn = lite.TFLiteConverter.from_keras_model_file
converter_kwargs["model_file"] = flags.keras_model_file
else:
raise ValueError("--graph_def_file, --saved_model_dir, or "
"--keras_model_file must be specified.")
return converter_fn(**converter_kwargs)
def _convert_tf1_model(flags):
"""Calls function to convert the TensorFlow 1.X model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Invalid flags.
"""
# Register custom opdefs before converter object creation.
if flags.custom_opdefs:
register_custom_opdefs(_parse_array(flags.custom_opdefs))
# Create converter.
converter = _get_tflite_converter(flags)
if flags.inference_type:
converter.inference_type = _parse_inference_type(flags.inference_type,
"inference_type")
if flags.inference_input_type:
converter.inference_input_type = _parse_inference_type(
flags.inference_input_type, "inference_input_type")
if flags.output_format:
converter.output_format = _toco_flags_pb2.FileFormat.Value(
flags.output_format)
if flags.mean_values and flags.std_dev_values:
input_arrays = converter.get_input_arrays()
std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)
# In quantized inference, mean_value has to be integer so that the real
# value 0.0 is exactly representable.
if converter.inference_type == dtypes.float32:
mean_values = _parse_array(flags.mean_values, type_fn=float)
else:
mean_values = _parse_array(flags.mean_values, type_fn=int)
quant_stats = list(zip(mean_values, std_dev_values))
if ((not flags.input_arrays and len(input_arrays) > 1) or
(len(input_arrays) != len(quant_stats))):
raise ValueError("Mismatching --input_arrays, --std_dev_values, and "
"--mean_values. The flags must have the same number of "
"items. The current input arrays are '{0}'. "
"--input_arrays must be present when specifying "
"--std_dev_values and --mean_values with multiple input "
"tensors in order to map between names and "
"values.".format(",".join(input_arrays)))
converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats)))
if (flags.default_ranges_min is not None) and (flags.default_ranges_max is
not None):
converter.default_ranges_stats = (flags.default_ranges_min,
flags.default_ranges_max)
if flags.drop_control_dependency:
converter.drop_control_dependency = flags.drop_control_dependency
if flags.reorder_across_fake_quant:
converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
if flags.change_concat_input_ranges:
converter.change_concat_input_ranges = (
flags.change_concat_input_ranges == "TRUE")
if flags.allow_custom_ops:
converter.allow_custom_ops = flags.allow_custom_ops
if flags.target_ops:
ops_set_options = lite.OpsSet.get_options()
converter.target_spec.supported_ops = set()
for option in six.ensure_str(flags.target_ops).split(","):
if option not in ops_set_options:
raise ValueError("Invalid value for --target_ops. Options: "
"{0}".format(",".join(ops_set_options)))
converter.target_spec.supported_ops.add(lite.OpsSet(option))
if flags.post_training_quantize:
converter.optimizations = [lite.Optimize.DEFAULT]
if converter.inference_type != dtypes.float32:
print("--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference_type to FLOAT.")
converter.inference_type = dtypes.float32
if flags.quantize_to_float16:
converter.target_spec.supported_types = [dtypes.float16]
if not flags.post_training_quantize:
print("--quantize_to_float16 will only take effect with the "
"--post_training_quantize flag enabled.")
if flags.dump_graphviz_dir:
converter.dump_graphviz_dir = flags.dump_graphviz_dir
if flags.dump_graphviz_video:
converter.dump_graphviz_vode = flags.dump_graphviz_video
if flags.conversion_summary_dir:
converter.conversion_summary_dir = flags.conversion_summary_dir
if flags.experimental_new_converter is not None:
converter.experimental_new_converter = flags.experimental_new_converter
# Convert model.
output_data = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(six.ensure_binary(output_data))
def _convert_tf2_model(flags):
"""Calls function to convert the TensorFlow 2.0 model into a TFLite model.
Args:
flags: argparse.Namespace object.
Raises:
ValueError: Unsupported file format.
"""
# Load the model.
if flags.saved_model_dir:
converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir)
elif flags.keras_model_file:
model = keras_deps.get_load_model_function()(flags.keras_model_file)
converter = lite.TFLiteConverterV2.from_keras_model(model)
if flags.experimental_new_converter is not None:
converter.experimental_new_converter = flags.experimental_new_converter
# Convert the model.
tflite_model = converter.convert()
with open(flags.output_file, "wb") as f:
f.write(six.ensure_binary(tflite_model))
def _check_tf1_flags(flags, unparsed):
"""Checks the parsed and unparsed flags to ensure they are valid in 1.X.
Raises an error if previously support unparsed flags are found. Raises an
error for parsed flags that don't meet the required conditions.
Args:
flags: argparse.Namespace object containing TFLite flags.
unparsed: List of unparsed flags.
Raises:
ValueError: Invalid flags.
"""
# Check unparsed flags for common mistakes based on previous TOCO.
def _get_message_unparsed(flag, orig_flag, new_flag):
if six.ensure_str(flag).startswith(orig_flag):
return "\n Use {0} instead of {1}".format(new_flag, orig_flag)
return ""
if unparsed:
output = ""
for flag in unparsed:
output += _get_message_unparsed(flag, "--input_file", "--graph_def_file")
output += _get_message_unparsed(flag, "--savedmodel_directory",
"--saved_model_dir")
output += _get_message_unparsed(flag, "--std_value", "--std_dev_values")
output += _get_message_unparsed(flag, "--batch_size", "--input_shapes")
output += _get_message_unparsed(flag, "--dump_graphviz",
"--dump_graphviz_dir")
if output:
raise ValueError(output)
# Check that flags are valid.
if flags.graph_def_file and (not flags.input_arrays or
not flags.output_arrays):
raise ValueError("--input_arrays and --output_arrays are required with "
"--graph_def_file")
if flags.input_shapes:
if not flags.input_arrays:
raise ValueError("--input_shapes must be used with --input_arrays")
if flags.input_shapes.count(":") != flags.input_arrays.count(","):
raise ValueError("--input_shapes and --input_arrays must have the same "
"number of items")
if flags.std_dev_values or flags.mean_values:
if bool(flags.std_dev_values) != bool(flags.mean_values):
raise ValueError("--std_dev_values and --mean_values must be used "
"together")
if flags.std_dev_values.count(",") != flags.mean_values.count(","):
raise ValueError("--std_dev_values, --mean_values must have the same "
"number of items")
if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):
raise ValueError("--default_ranges_min and --default_ranges_max must be "
"used together")
if flags.dump_graphviz_video and not flags.dump_graphviz_dir:
raise ValueError("--dump_graphviz_video must be used with "
"--dump_graphviz_dir")
if flags.custom_opdefs and not flags.experimental_new_converter:
raise ValueError("--custom_opdefs must be used with "
"--experimental_new_converter")
if flags.custom_opdefs and not flags.allow_custom_ops:
raise ValueError("--custom_opdefs must be used with --allow_custom_ops")
def _check_tf2_flags(flags):
"""Checks the parsed and unparsed flags to ensure they are valid in 2.X.
Args:
flags: argparse.Namespace object containing TFLite flags.
Raises:
ValueError: Invalid flags.
"""
if not flags.keras_model_file and not flags.saved_model_dir:
raise ValueError("one of the arguments --saved_model_dir "
"--keras_model_file is required")
def _get_tf1_flags(parser):
"""Returns ArgumentParser for tflite_convert for TensorFlow 1.X.
Args:
parser: ArgumentParser
"""
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group(required=True)
input_file_group.add_argument(
"--graph_def_file",
type=str,
help="Full filepath of file containing frozen TensorFlow GraphDef.")
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full filepath of directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
# Model format flags.
parser.add_argument(
"--output_format",
type=str.upper,
choices=["TFLITE", "GRAPHVIZ_DOT"],
help="Output file format.")
parser.add_argument(
"--inference_type",
type=str.upper,
default="FLOAT",
help=("Target data type of real-number arrays in the output file. "
"Must be either FLOAT, INT8 or UINT8."))
parser.add_argument(
"--inference_input_type",
type=str.upper,
help=("Target data type of real-number input arrays. Allows for a "
"different type for input arrays in the case of quantization. "
"Must be either FLOAT, INT8 or UINT8."))
# Input and output arrays flags.
parser.add_argument(
"--input_arrays",
type=str,
help="Names of the input arrays, comma-separated.")
parser.add_argument(
"--input_shapes",
type=str,
help="Shapes corresponding to --input_arrays, colon-separated.")
parser.add_argument(
"--output_arrays",
type=str,
help="Names of the output arrays, comma-separated.")
# SavedModel related flags.
parser.add_argument(
"--saved_model_tag_set",
type=str,
help=("Comma-separated set of tags identifying the MetaGraphDef within "
"the SavedModel to analyze. All tags must be present. In order to "
"pass in an empty tag set, pass in \"\". (default \"serve\")"))
parser.add_argument(
"--saved_model_signature_key",
type=str,
help=("Key identifying the SignatureDef containing inputs and outputs. "
"(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)"))
# Quantization flags.
parser.add_argument(
"--std_dev_values",
type=str,
help=("Standard deviation of training data for each input tensor, "
"comma-separated floats. Used for quantized input tensors. "
"(default None)"))
parser.add_argument(
"--mean_values",
type=str,
help=("Mean of training data for each input tensor, comma-separated "
"floats. Used for quantized input tensors. (default None)"))
parser.add_argument(
"--default_ranges_min",
type=float,
help=("Default value for min bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
parser.add_argument(
"--default_ranges_max",
type=float,
help=("Default value for max bound of min/max range values used for all "
"arrays without a specified range, Intended for experimenting with "
"quantization via \"dummy quantization\". (default None)"))
# quantize_weights is DEPRECATED.
parser.add_argument(
"--quantize_weights",
dest="post_training_quantize",
action="store_true",
help=argparse.SUPPRESS)
parser.add_argument(
"--post_training_quantize",
dest="post_training_quantize",
action="store_true",
help=(
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy). (default False)"))
parser.add_argument(
"--quantize_to_float16",
dest="quantize_to_float16",
action="store_true",
help=("Boolean indicating whether to quantize weights to fp16 instead of "
"the default int8 when post-training quantization "
"(--post_training_quantize) is enabled. (default False)"))
# Graph manipulation flags.
parser.add_argument(
"--drop_control_dependency",
action="store_true",
help=("Boolean indicating whether to drop control dependencies silently. "
"This is due to TensorFlow not supporting control dependencies. "
"(default True)"))
parser.add_argument(
"--reorder_across_fake_quant",
action="store_true",
help=("Boolean indicating whether to reorder FakeQuant nodes in "
"unexpected locations. Used when the location of the FakeQuant "
"nodes is preventing graph transformations necessary to convert "
"the graph. Results in a graph that differs from the quantized "
"training graph, potentially causing differing arithmetic "
"behavior. (default False)"))
# Usage for this flag is --change_concat_input_ranges=true or
# --change_concat_input_ranges=false in order to make it clear what the flag
# is set to. This keeps the usage consistent with other usages of the flag
# where the default is different. The default value here is False.
parser.add_argument(
"--change_concat_input_ranges",
type=str.upper,
choices=["TRUE", "FALSE"],
help=("Boolean to change behavior of min/max ranges for inputs and "
"outputs of the concat operator for quantized models. Changes the "
"ranges of concat operator overlap when true. (default False)"))
# Permitted ops flags.
parser.add_argument(
"--allow_custom_ops",
action="store_true",
help=("Boolean indicating whether to allow custom operations. When false "
"any unknown operation is an error. When true, custom ops are "
"created for any op that is unknown. The developer will need to "
"provide these to the TensorFlow Lite runtime with a custom "
"resolver. (default False)"))
parser.add_argument(
"--custom_opdefs",
type=str,
help=("String representing a list of custom ops OpDefs delineated with "
"commas that are included in the GraphDef. Required when using "
"custom operations with --experimental_new_converter."))
parser.add_argument(
"--target_ops",
type=str,
help=("Experimental flag, subject to change. Set of OpsSet options "
"indicating which converter to use. Options: {0}. One or more "
"option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))"
"".format(",".join(lite.OpsSet.get_options()))))
# Logging flags.
parser.add_argument(
"--dump_graphviz_dir",
type=str,
help=("Full filepath of folder to dump the graphs at various stages of "
"processing GraphViz .dot files. Preferred over --output_format="
"GRAPHVIZ_DOT in order to keep the requirements of the output "
"file."))
parser.add_argument(
"--dump_graphviz_video",
action="store_true",
help=("Boolean indicating whether to dump the graph after every graph "
"transformation"))
parser.add_argument(
"--conversion_summary_dir",
type=str,
help=("Full filepath to store the conversion logs, which includes "
"graphviz of the model before/after the conversion, an HTML report "
"and the conversion proto buffers. This will only be generated "
"when passing --experimental_new_converter"))
def _get_tf2_flags(parser):
"""Returns ArgumentParser for tflite_convert for TensorFlow 2.0.
Args:
parser: ArgumentParser
"""
# Input file flags.
input_file_group = parser.add_mutually_exclusive_group()
input_file_group.add_argument(
"--saved_model_dir",
type=str,
help="Full path of the directory containing the SavedModel.")
input_file_group.add_argument(
"--keras_model_file",
type=str,
help="Full filepath of HDF5 file containing tf.Keras model.")
# Enables 1.X converter in 2.X.
parser.add_argument(
"--enable_v1_converter",
action="store_true",
help=("Enables the TensorFlow V1 converter in 2.0"))
class _ParseExperimentalNewConverter(argparse.Action):
"""Helper class to parse --experimental_new_converter argument."""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs != "?":
# This should never happen. This class is only used once below with
# nargs="?".
raise ValueError(
"This parser only supports nargs='?' (0 or 1 additional arguments)")
super(_ParseExperimentalNewConverter, self).__init__(
option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
# Handling `--experimental_new_converter`.
# Without additional arguments, it implies enabling the new converter.
experimental_new_converter = True
elif values.lower() == "true":
# Handling `--experimental_new_converter=true`.
# (Case insensitive after the equal sign)
experimental_new_converter = True
elif values.lower() == "false":
# Handling `--experimental_new_converter=false`.
# (Case insensitive after the equal sign)
experimental_new_converter = False
else:
raise ValueError("Invalid --experimental_new_converter argument.")
setattr(namespace, self.dest, experimental_new_converter)
def _get_parser(use_v2_converter):
"""Returns an ArgumentParser for tflite_convert.
Args:
use_v2_converter: Indicates which converter to return.
Return: ArgumentParser.
"""
parser = argparse.ArgumentParser(
description=("Command line tool to run TensorFlow Lite Converter."))
# Output file flag.
parser.add_argument(
"--output_file",
type=str,
help="Full filepath of the output file.",
required=True)
if use_v2_converter:
_get_tf2_flags(parser)
else:
_get_tf1_flags(parser)
parser.add_argument(
"--experimental_new_converter",
action=_ParseExperimentalNewConverter,
nargs="?",
help=("Experimental flag, subject to change. Enables MLIR-based "
"conversion instead of TOCO conversion. (default True)"))
return parser
def run_main(_):
"""Main in tflite_convert.py."""
use_v2_converter = tf2.enabled()
parser = _get_parser(use_v2_converter)
tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
# If the user is running TensorFlow 2.X but has passed in enable_v1_converter
# then parse the flags again with the 1.X converter flags.
if tf2.enabled() and tflite_flags.enable_v1_converter:
use_v2_converter = False
parser = _get_parser(use_v2_converter)
tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
# Checks if the flags are valid.
try:
if use_v2_converter:
_check_tf2_flags(tflite_flags)
else:
_check_tf1_flags(tflite_flags, unparsed)
except ValueError as e:
parser.print_usage()
file_name = os.path.basename(sys.argv[0])
sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e)))
sys.exit(1)
# Convert the model according to the user provided flag.
if use_v2_converter:
_convert_tf2_model(tflite_flags)
else:
try:
_convert_tf1_model(tflite_flags)
finally:
if tflite_flags.conversion_summary_dir:
if tflite_flags.experimental_new_converter:
gen_html.gen_conversion_log_html(tflite_flags.conversion_summary_dir,
tflite_flags.post_training_quantize,
tflite_flags.output_file)
else:
warnings.warn(
"Conversion summary will only be generated when enabling"
" the new converter via --experimental_new_converter. ")
def main():
app.run(main=run_main, argv=sys.argv[:1])
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.migrations.operations.base import Operation
from pg_fts.fields import TSVectorField
__all__ = ('CreateFTSIndexOperation', 'CreateFTSTriggerOperation',
'DeleteFTSIndexOperation', 'DeleteFTSTriggerOperation',
'UpdateVectorOperation')
"""
pg_fts.migrations
-----------------
Migrations module for `pg_fts.fields.TSVectorField`
@author: David Miguel
"""
class PgFtsSQL(object):
sql_delete_trigger = ("DROP TRIGGER {model}_{fts_name}_update ON \"{model}\";"
"DROP FUNCTION {model}_{fts_name}_update()")
sql_create_trigger = """
CREATE FUNCTION {model}_{fts_name}_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.{fts_name} = {vectors};
END IF;
IF TG_OP = 'UPDATE' THEN
IF {fts_fields} THEN
new.{fts_name} = {vectors};
ELSE
new.{fts_name} = old.{fts_name};
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER {model}_{fts_name}_update BEFORE INSERT OR UPDATE ON \"{model}\"
FOR EACH ROW EXECUTE PROCEDURE {model}_{fts_name}_update()"""
sql_create_index = ("CREATE INDEX {model}_{fts_name} ON \"{model}\" "
"USING {fts_index}({fts_name})")
sql_delete_index = 'DROP INDEX {model}_{fts_name}'
sql_update_vector = 'UPDATE \"{model}\" SET {vector} = {fields}'
def delete_trigger(self, model, field):
return self.sql_delete_trigger.format(
model=model._meta.db_table,
fts_name=field.get_attname_column()[1]
)
def create_fts_trigger(self, model, vector_field):
fields = []
vectors = []
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "NEW.%s::regconfig" % (
dict_field.get_attname_column()[1])
fields.append('NEW.{0} <> OLD.{0}'.format(vector_field.dictionary))
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
fields.append('NEW.{0} <> OLD.{0}'.format(
field.get_attname_column()[1]))
vectors.append(self._get_vector_for_field(field, rank, dictionary))
return self.sql_create_trigger.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1],
fts_fields=' OR '.join(fields),
vectors=' || '.join(vectors)
)
def update_vector(self, model, vector_field):
vectors = []
sql_fn = "setweight(to_tsvector(%s, COALESCE(%s, '')), '%s')"
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "%s::regconfig" % (
dict_field.get_attname_column()[1])
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
vectors.append(sql_fn % (
dictionary, field.get_attname_column()[1], rank))
return self.sql_update_vector.format(
model=model._meta.db_table,
vector=vector_field.get_attname_column()[1],
fields=' || '.join(vectors)
)
def _get_vector_for_field(self, field, weight, dictionary):
return "setweight(to_tsvector(%s, COALESCE(NEW.%s, '')), '%s')" % (
dictionary, field.get_attname_column()[1], weight
)
def create_index(self, model, vector_field, index):
return self.sql_create_index.format(
model=model._meta.db_table,
fts_index=index,
fts_name=vector_field.get_attname_column()[1]
)
def delete_index(self, model, vector_field):
return self.sql_delete_index.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1]
)
class BaseVectorOperation(Operation):
"""
Base migrations class
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
reduces_to_sql = True
reversible = True
sql_creator = PgFtsSQL()
forward_fn = None
backward_fn = None
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.forward_fn(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class UpdateVectorOperation(BaseVectorOperation):
"""
Updates changes to :class:`~pg_fts.fields.TSVectorField` for existing
models
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.update_vector
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
pass
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSTriggerOperation(BaseVectorOperation):
"""
Creates a :pg_docs:`custom trigger <textsearch-features.html#TEXTSEARCH-UPDATE-TRIGGERS>`
for updating the :class:`~pg_fts.fields.TSVectorField` with rank values
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.create_fts_trigger
self.backward_fn = self.sql_creator.delete_trigger
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class DeleteFTSTriggerOperation(BaseVectorOperation):
"""
Deletes trigger generated by :class:`~pg_fts.migrations.CreateFTSTriggerOperation`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.delete_trigger
self.backward_fn = self.sql_creator.create_fts_trigger
def describe(self):
return "Delete trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSIndexOperation(BaseVectorOperation):
"""
Creates a index for :class:`~pg_fts.fields.TSVectorField`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
:param index: The type of index 'gin' or 'gist' for more information go to
:pg_docs:`PostgreSQL documentation 12.9. GiST and GIN Index Types
<textsearch-indexes.html>`
"""
# http://www.postgresql.org/docs/9.3/static/textsearch-indexes.html
INDEXS = ('gin', 'gist')
def __init__(self, name, fts_vector, index):
assert index in self.INDEXS, "Invalid index '%s'. Options %s " % (
index, ', '.join(self.INDEXS))
self.name = name
self.fts_vector = fts_vector
self.index = index
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
# print(dir(from_state))
# django 1.8 doesn't have ProjectState.render()
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def describe(self):
return "Create %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
class DeleteFTSIndexOperation(CreateFTSIndexOperation):
"""
Removes index created by :class:`~pg_fts.migrations.CreateFTSIndexOperation`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
:param index: The type of index 'gin' or 'gist' for more information go to
"""
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index))
def describe(self):
return "Delete %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
| |
from __future__ import unicode_literals
from django import VERSION
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError, models, transaction
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify as default_slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from taggit.utils import _get_field
try:
from unidecode import unidecode
except ImportError:
def unidecode(tag):
return tag
@python_2_unicode_compatible
class TagBase(models.Model):
name = models.CharField(verbose_name=_('Name'), unique=True, max_length=100)
slug = models.SlugField(verbose_name=_('Slug'), unique=True, max_length=100)
def __str__(self):
return self.name
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = self.slugify(self.name)
from django.db import router
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
# Be oportunistic and try to save the tag, this should work for
# most cases ;)
try:
with transaction.atomic(using=using):
res = super(TagBase, self).save(*args, **kwargs)
return res
except IntegrityError:
pass
# Now try to find existing slugs with similar names
slugs = set(
self.__class__._default_manager
.filter(slug__startswith=self.slug)
.values_list('slug', flat=True)
)
i = 1
while True:
slug = self.slugify(self.name, i)
if slug not in slugs:
self.slug = slug
# We purposely ignore concurrecny issues here for now.
# (That is, till we found a nice solution...)
return super(TagBase, self).save(*args, **kwargs)
i += 1
else:
return super(TagBase, self).save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = default_slugify(unidecode(tag))
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
app_label = 'taggit'
@python_2_unicode_compatible
class ItemBase(models.Model):
def __str__(self):
return ugettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
field = _get_field(cls, 'tag')
return field.remote_field.model if VERSION >= (1, 9) else field.rel.to
@classmethod
def tag_relname(cls):
field = _get_field(cls, 'tag')
return field.remote_field.related_name if VERSION >= (1, 9) else field.rel.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {
'content_object': instance
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
return {
"content_object__in": instances,
}
class TaggedItemBase(ItemBase):
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items", on_delete=models.CASCADE)
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class CommonGenericTaggedItemBase(ItemBase):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
verbose_name=_('Content type'),
related_name="%(app_label)s_%(class)s_tagged_items"
)
content_object = GenericForeignKey()
class Meta:
abstract = True
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
if isinstance(instances, QuerySet):
# Can do a real object_id IN (SELECT ..) query.
return {
"object_id__in": instances,
"content_type": ContentType.objects.get_for_model(instances.model),
}
else:
# TODO: instances[0], can we assume there are instances.
return {
"object_id__in": [instance.pk for instance in instances],
"content_type": ContentType.objects.get_for_model(instances[0]),
}
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
ct = ContentType.objects.get_for_model(model)
kwargs = {
"%s__content_type" % cls.tag_relname(): ct
}
if instance is not None:
kwargs["%s__object_id" % cls.tag_relname()] = instance.pk
if extra_filters:
kwargs.update(extra_filters)
return cls.tag_model().objects.filter(**kwargs).distinct()
class GenericTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.IntegerField(verbose_name=_('Object id'), db_index=True)
class Meta:
abstract = True
if VERSION >= (1, 8):
class GenericUUIDTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.UUIDField(verbose_name=_('Object id'), db_index=True)
class Meta:
abstract = True
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
app_label = 'taggit'
index_together = [
["content_type", "object_id"],
]
| |
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os, re, jsonHelper
import os.path
import time
from time import mktime
import datetime
#ABS_PATH = '~/Desktop/Dev/Learning/tests/scrapWeb/hello-world/selenium'
CRED = '.s3'
ACCESS = 'accesskey'
SECRET = 'secret'
BUCKET_NAME = 'data.hotoppy.com'
IMAGE_PATH = '/imaging/images'
SECONDS_IN_A_DAY = 24 * 60 * 60
OneAndHalfDay = SECONDS_IN_A_DAY * 2
#OneAndHalfDay = 1
UPLOAD_SUFFIX = 'uploaded'
#IMAGE_KEY_SUBFOLDER = "images/"
IMAGE_KEY_SUBFOLDER = "i/"
MAX_IMAGE_SIZE = 60000
MIN_IMAGE_SIZE = 5000
#TEMP_PATH = 'guardian/1420592.json'
def absoluteImagePath():
return "{0}{1}".format(jsonHelper.getCompleteFilePath(), imagePath())
def imagePath():
return IMAGE_PATH
def readCred():
if len(CRED) < 1:
#print "please provide the credential file name"
return
counter = 0
cred_path = jsonHelper.getCompleteFilePath(CRED)
dictCred = { ACCESS: '' , SECRET: '' }
with open(cred_path) as f:
content = f.read().splitlines()
dictCred[ACCESS] = content[counter]
counter = counter + 1
dictCred[SECRET] = content[counter]
#print content
return dictCred
def accessBucket(buckName=None):
if not buckName or len(buckName) < 1:
buckName = BUCKET_NAME
#return
try:
cred = readCred()
conn = S3Connection(cred[ACCESS], cred[SECRET])
b = None
try:
b = conn.get_bucket(buckName)
except Exception as e:
b = conn.create_bucket(buckName)
if not b:
print("Error: bucket cannot be nil")
return
else:
return b
except Exception as e:
print("Access Bucket Error: {0}".format(e))
def getBucketList(pathToFileName=IMAGE_KEY_SUBFOLDER, bucket=None):
if not bucket:
bucket = accessBucket()
#nameList = [v.name[len(pathToFileName):] for v in list(b.list(pathToFileName, "/"))]
return list(bucket.list(pathToFileName, "/"))
def getBuckNameList(pathToFileName=IMAGE_KEY_SUBFOLDER, bucket=None):
keyList = getBucketList(bucket=bucket)
return [v.name for v in keyList]
def deleteProcedure(specifiedDuration=OneAndHalfDay):
dt = datetime.datetime.now()
print("1")
nowInSeconds = time.mktime(dt.timetuple())
b = accessBucket()
keyList = getBuckNameList(bucket=b)
for keyName in keyList[:]:
imagekey = b.get_key(keyName)
print("Not Uploading file name: {0} last-modified: {1}".format(keyName, imagekey.last_modified))
##"Thu Jan 29 19:13:17 GMT-800 2015"
# print("imageNameList: {0}".format(imageNameList[0]))
#2015-02-19T18:32:32.000Z
modified = time.strptime(imagekey.last_modified, '%a, %d %b %Y %H:%M:%S %Z')
#convert to datetime
print("time date 0 keyName: {0}".format(keyName))
mdt = datetime.datetime.fromtimestamp(mktime(modified))
print("time date 1")
#(dt.datetime(1970,1,1)).total_seconds()
#modifiedTimeInSeconds = mdt.datetime(1970,1,1).total_seconds()
modifiedTimeInSeconds = time.mktime(mdt.timetuple())
print("time date 2")
durationInSeconds = nowInSeconds - modifiedTimeInSeconds
#systemPath = jsonHelper.getCompleteFilePath()
#print("should delete: {0}{1}/{2}".format(systemPath, dirname[1:], name))
#os.remove(localPath)
#assume default dirname is "./xyz"
#deleteFilePath = "{0}{1}/{2}".format(systemPath, dirname[1:], name)
if durationInSeconds > specifiedDuration:
try:
print("LONGER THAN ONE DAY deleting {0}".format(imagekey))
b.delete_key(imagekey)
# os.remove(deleteFilePath)
except Exception as e:
print ("Exception in deleting key: {0} - {1}".format(imagekey, e))
else:
print("WITHIN ONE DAY {0}".format(imagekey))
#NOTE: the S3 path will be lower case where local file name maybe upper case
#Parameters: forwardWrite default to 5 to ensure continuity of the data for upcoming event and expected the newer data will overwrite this
# localPath, consist of this pattern publicationName/timestamp.json
#
# minor error [Errno 1] Operation not permitted: '/Users/pro001/Desktop/Dev/Learning/tests/scrapWeb/hello-world/selenium/imaging/images'
# possible fix: http://stackoverflow.com/questions/10937806/oserror-error-1-operation-not-permitted
# chown -R username:groupname .
def sendData( localPath, buckName=None, forwardWrite=36):
#thelocalPath = "{0}".format( localpath )
##print "localPath 1 %s" % localPath
if not buckName or len(buckName) < 1:
buckName = BUCKET_NAME
#return
if len (localPath) < 1:
return
try:
# cred = readCred()
# conn = S3Connection(cred[ACCESS], cred[SECRET])
b = accessBucket()
# try:
# b = conn.get_bucket(buckName)
# except Exception as e:
# b = conn.create_bucket(buckName)
# if not b:
# #print "Error: bucket cannot be nil"
# return
systemPath = jsonHelper.getCompleteFilePath().lower().split('/')
localPathArray = localPath.lower().split('/')
print("systemPath: {0}, localPath: {1}".format(systemPath, localPathArray))
for pathIndex in range(len(systemPath)):
pathStr = systemPath[pathIndex]
if pathStr.find(localPathArray[pathIndex]) < 0:
print("NOT MATCH Path name s3Interface: {0}".format(localPathArray[pathIndex]))
return
#re.sub(r'\.json$',"",localPath.lower())
#strippedPath = re.sub(r'\.json$',"",localPath.lower())
if len(localPath) < 7 or len(localPathArray) < 2:
print("Error check localpath {0}".format(localpath))
return;
if IMAGE_PATH in localPath:
##image Operation
topdir = '{0}'.format(localPath)
# # The arg argument for walk, and subsequently ext for step
exten = '.jpg'
#imageNameList = [v.name[len("images/"):] for v in list(b.list("images/", "/"))]
imageNameList = [v.name[len(IMAGE_KEY_SUBFOLDER):] for v in list(b.list(IMAGE_KEY_SUBFOLDER, "/"))]
# print("imageName: {0}".format(imageNameList[4]) )
uploadSuffixSubstringHelper = -len(UPLOAD_SUFFIX)
##PRECONDITION
## it download image files to a local folder in python
## on the bash level, the images should be reformatted within the range of acceptable bytes size as JPG images and JPG extension
##
##CONDITION
## it will iterate through the destination folders.
## searches for jpg files to upload and compare the S3 image folder.
## IF no match is identified and conform to acceptable size, it will be uploaded to the S3 folder and rename the extension to uploaded.
## elif match is identified with jpg extension"
## delete the file in the local machine
## elif file match uploaded extension
## check if exceeded the minimum time
## delete the file in the S3 and local machine
## else do nothing
##
##
##
##
def step(ext, dirname, names):
#global _localPath
ext = ext.lower()
print("0 ext: {0}".format(ext))
dt = datetime.datetime.now()
print("1")
nowInSeconds = time.mktime(dt.timetuple())
print("2")
for name in names[:]:
if len(name) <2:
continue
#nameInTheList will be used for idenitfying whether the name is in the S3 data network.
nameInTheList = False
_name =""
if name.lower().endswith(UPLOAD_SUFFIX) is True:
_name = name[:uploadSuffixSubstringHelper]
else:
_name = name
# check the image name exists in the cloud for with or without upload suffix
# this will double check if the image has been uploaded to the cloud as sometime the image fail to upload but is incorrectly rename to upload suffix.
if _name in imageNameList[:]:
nameInTheList = True
else:
nameInTheList = False
#print("name[:-len(UPLOAD_SUFFIX)]: {0}".format(name[:-(len(UPLOAD_SUFFIX)]))
print("3 try: {0}".format(name[:uploadSuffixSubstringHelper]))
if name.lower().endswith(ext) is True and not nameInTheList or name.lower().endswith(UPLOAD_SUFFIX) is True and not nameInTheList:
if name.lower().endswith(UPLOAD_SUFFIX) is True:
thisName = name[:uploadSuffixSubstringHelper]
else:
thisName = name
print("4")
keyName = "{0}{1}".format(IMAGE_KEY_SUBFOLDER, thisName)
print("2 keyName: {0}".format(keyName))
imagekey = b.new_key(keyName)
print("Uploading file name: {0}".format(thisName))
imagekey.Content_Type = "image/jpeg"
try:
pathToImageFile = "{0}/{1}".format(localPath,name)
img_size = os.stat(pathToImageFile).st_size
if img_size > MAX_IMAGE_SIZE or MIN_IMAGE_SIZE > img_size:
print(" WARNING: improper image size {0}: {1}".format(img_size, name ))
os.remove(pathToImageFile)
continue
imagekey.set_contents_from_filename(pathToImageFile)
imagekey.make_public()
if name.lower().endswith(ext) is True:
localPathExt = "{0}{1}".format(pathToImageFile, UPLOAD_SUFFIX)
os.rename(pathToImageFile, localPathExt)
#if os.path.exists(pathToImageFile):
# os.remove(pathToImageFile)
except Exception as e:
print("Exception uploading image 0: {0} - {1}".format(name, e))
elif name.lower().endswith(UPLOAD_SUFFIX) is True and nameInTheList or name.lower().endswith(ext) is True and nameInTheList:
if name.lower().endswith(UPLOAD_SUFFIX) is True:
_name = name[:uploadSuffixSubstringHelper]
else:
_name = name
keyName = "{0}{1}".format(IMAGE_KEY_SUBFOLDER, _name)
imagekey = b.get_key(keyName)
print("Not Uploading file name: {0} last-modified: {1}".format(keyName, imagekey.last_modified))
##"Thu Jan 29 19:13:17 GMT-800 2015"
# print("imageNameList: {0}".format(imageNameList[0]))
modified = time.strptime(imagekey.last_modified, '%a, %d %b %Y %H:%M:%S %Z')
#convert to datetime
print("time date 0 keyName: {0}".format(keyName))
mdt = datetime.datetime.fromtimestamp(mktime(modified))
print("time date 1")
#(dt.datetime(1970,1,1)).total_seconds()
#modifiedTimeInSeconds = mdt.datetime(1970,1,1).total_seconds()
modifiedTimeInSeconds = time.mktime(mdt.timetuple())
print("time date 2")
durationInSeconds = nowInSeconds - modifiedTimeInSeconds
systemPath = jsonHelper.getCompleteFilePath()
print("should delete: {0}{1}/{2}".format(systemPath, dirname[1:], name))
#os.remove(localPath)
#assume default dirname is "./xyz"
deleteFilePath = "{0}{1}/{2}".format(systemPath, dirname[1:], name)
if durationInSeconds > OneAndHalfDay and len(imageNameList) > 0:
try:
print("LONGER THAN ONE DAY deleting {0}".format(imagekey))
b.delete_key(imagekey)
os.remove(deleteFilePath)
except Exception as e:
print ("Exception in deleting key: {0} - {1}".format(imagekey, e))
elif name.lower().endswith(ext) is True:
pathToImageFile = "{0}/{1}".format(localPath,name)
localPathExt = "{0}{1}".format(pathToImageFile, UPLOAD_SUFFIX)
try:
os.rename(pathToImageFile, localPathExt)
except Exception as e:
print ("Exception in deleting key: {0} - {1}".format(pathToImageFile, e))
else:
print("WITHIN ONE DAY {0}".format(imagekey))
# elif name.lower().endswith(UPLOAD_SUFFIX) is True:
# systemPath = jsonHelper.getCompleteFilePath()
# deleteFilePath = "{0}{1}/{2}".format(systemPath, dirname[1:], name)
# try:
# print("Deleting Path: {0}".format(deleteFilePath))
# os.remove(deleteFilePath)
# except Exception as e:
# print ("Exception in deleting path: {0} - {1}".format(deleteFilePath, e))
os.path.walk(topdir, step, exten)
else:
##JSON Operation
timeName = localPathArray[len(localPathArray)-1]
strippedPath = re.sub(r'\.json$',"",timeName.lower())
timeStampStr = re.search( r'\d+$', strippedPath).group()
timestamp = int(timeStampStr)
print 'strippedPath ' + strippedPath
#publicationName = re.search( r'^\w+', strippedPath).group()
publicationName = localPathArray[len(localPathArray)-2]
print('publicationName {0}'.format(publicationName))
if timestamp < 100 and len(publicationName) < 1:
#print "error in publication name or time stamp"
return
# metaData = {'charset': 'utf-8', 'Content-Type': 'application/json; '}
k = Key(b)
# k.metadata = metaData
k.Content_Type = 'application/json; charset=utf-8'
k.content_disposition = 'inline'
# k.content_encoding = 'gzip'
for num in range(forwardWrite):
if num == 0:
k.key = "%s/%d.json" % (publicationName, timestamp)
k.set_contents_from_filename(localPath)
k.make_public()
else:
k.copy(buckName,"%s/%d.json" % (publicationName, timestamp)).make_public()
timestamp = timestamp + 1
#print("should delete: {0}".format(localpath))
#os.remove(localPath)
except Exception as e:
print(e)
# print "ERROR s3Interface %s" % e
def uploadImagesProcedure():
imagePath = jsonHelper.getCompleteFilePath('imaging', 'images')
sendData(imagePath)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import time
import os
import copy
import requests
import backoff
from requests.adapters import HTTPAdapter
import six
from .contract import KongAdminContract, APIAdminContract, ConsumerAdminContract, PluginAdminContract, \
APIPluginConfigurationAdminContract, BasicAuthAdminContract, KeyAuthAdminContract, OAuth2AdminContract
from .utils import add_url_params, assert_dict_keys_in, ensure_trailing_slash
from .compat import OK, CREATED, NO_CONTENT, NOT_FOUND, CONFLICT, INTERNAL_SERVER_ERROR, urljoin, utf8_or_str
from .exceptions import ConflictError, ServerError
# WTF: As this is CI/Test specific, maybe better to only have this piece of code in your tests directory?
########################################################################################################################
# BEGIN: CI fixes
#
# Because of memory/performance limitations in the CI, it often happened that connections to Kong got messed up
# during unittests. To prevent this from happening, we've implemented both throttling and connection dropping as
# optional measures during testing.
########################################################################################################################
# Minimum interval between requests (measured in seconds)
KONG_MINIMUM_REQUEST_INTERVAL = float(os.getenv('KONG_MINIMUM_REQUEST_INTERVAL', 0))
# Whether or not to reuse connections after a request (1 = true, otherwise false)
KONG_REUSE_CONNECTIONS = int(os.getenv('KONG_REUSE_CONNECTIONS', '1')) == 1
def get_default_kong_headers():
headers = {}
if not KONG_REUSE_CONNECTIONS:
headers.update({'Connection': 'close'})
return headers
class ThrottlingHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
super(ThrottlingHTTPAdapter, self).__init__(*args, **kwargs)
self._last_request = None
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
if self._last_request is not None and KONG_MINIMUM_REQUEST_INTERVAL > 0:
diff = (self._last_request + KONG_MINIMUM_REQUEST_INTERVAL) - time.time()
if diff > 0:
time.sleep(diff)
result = super(ThrottlingHTTPAdapter, self).send(request, stream, timeout, verify, cert, proxies)
self._last_request = time.time()
return result
# Create a singleton
THROTTLING_ADAPTER = ThrottlingHTTPAdapter()
########################################################################################################################
# END: CI fixes
########################################################################################################################
def raise_response_error(response, exception_class=None):
exception_class = exception_class or ValueError
assert issubclass(exception_class, BaseException)
raise exception_class(response.content)
INVALID_FIELD_ERROR_TEMPLATE = '%r is not a valid field. Allowed fields: %r'
class RestClient(object):
def __init__(self, api_url, headers=None):
self.api_url = api_url
self.headers = headers
self._session = None
def destroy(self):
self.api_url = None
self.headers = None
if self._session is not None:
self._session.close()
self._session = None
@property
def session(self):
if self._session is None:
self._session = requests.session()
if KONG_MINIMUM_REQUEST_INTERVAL > 0:
self._session.mount(self.api_url, THROTTLING_ADAPTER)
elif not KONG_REUSE_CONNECTIONS:
self._session.close()
self._session = None
return self.session
return self._session
def get_headers(self, **headers):
result = {}
result.update(self.headers)
result.update(headers)
return result
def get_url(self, *path, **query_params):
# WTF: Never use str, unless in some very specific cases, like in compatibility layers! Fixed for you.
path = [six.text_type(p) for p in path]
url = ensure_trailing_slash(urljoin(self.api_url, '/'.join(path)))
return add_url_params(url, query_params)
class APIPluginConfigurationAdminClient(APIPluginConfigurationAdminContract, RestClient):
def __init__(self, api_admin, api_name_or_id, api_url):
super(APIPluginConfigurationAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
self.api_admin = api_admin
self.api_name_or_id = api_name_or_id
def destroy(self):
super(APIPluginConfigurationAdminClient, self).destroy()
self.api_admin = None
self.api_name_or_id = None
def create(self, plugin_name, enabled=None, consumer_id=None, **fields):
values = {}
for key in fields:
values['config.%s' % key] = fields[key]
data = dict({
'name': plugin_name,
'consumer_id': consumer_id,
}, **values)
if enabled is not None and isinstance(enabled, bool):
data['enabled'] = enabled
response = self.session.post(self.get_url('apis', self.api_name_or_id, 'plugins'), data=data,
headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
def create_or_update(self, plugin_name, plugin_configuration_id=None, enabled=None, consumer_id=None, **fields):
values = {}
for key in fields:
values['config.%s' % key] = fields[key]
data = dict({
'name': plugin_name,
'consumer_id': consumer_id,
}, **values)
if enabled is not None and isinstance(enabled, bool):
data['enabled'] = enabled
if plugin_configuration_id is not None:
data['id'] = plugin_configuration_id
response = self.session.put(self.get_url('apis', self.api_name_or_id, 'plugins'), data=data,
headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def update(self, plugin_id, enabled=None, consumer_id=None, **fields):
values = {}
for key in fields:
values['config.%s' % key] = fields[key]
data_struct_update = copy.copy(values)
if consumer_id is not None:
data_struct_update['consumer_id'] = consumer_id
if enabled is not None and isinstance(enabled, bool):
data_struct_update['enabled'] = enabled
url = self.get_url('apis', self.api_name_or_id, 'plugins', plugin_id)
response = self.session.patch(url, data=data_struct_update, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'name', 'api_id', 'consumer_id'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset is not None:
query_params['offset'] = offset
url = self.get_url('apis', self.api_name_or_id, 'plugins', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, plugin_id):
response = self.session.delete(self.get_url('apis', self.api_name_or_id, 'plugins', plugin_id),
headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete Plugin Configuration (status: %s): %s' % (
response.status_code, plugin_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, plugin_id):
response = self.session.get(self.get_url('apis', self.api_name_or_id, 'plugins', plugin_id),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('apis', self.api_name_or_id, 'plugins'), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
class APIAdminClient(APIAdminContract, RestClient):
def __init__(self, api_url):
super(APIAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
def destroy(self):
super(APIAdminClient, self).destroy()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('apis'), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
def create(self, upstream_url, name=None, request_host=None, request_path=None, strip_request_path=False,
preserve_host=False):
response = self.session.post(self.get_url('apis'), data={
'name': name,
'request_host': request_host or None, # Empty strings are not allowed
'request_path': request_path or None, # Empty strings are not allowed
'strip_request_path': strip_request_path,
'preserve_host': preserve_host,
'upstream_url': upstream_url
}, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
def create_or_update(self, upstream_url, api_id=None, name=None, request_host=None, request_path=None,
strip_request_path=False, preserve_host=False):
data = {
'name': name,
'request_host': request_host or None, # Empty strings are not allowed
'request_path': request_path or None, # Empty strings are not allowed
'strip_request_path': strip_request_path,
'preserve_host': preserve_host,
'upstream_url': upstream_url
}
if api_id is not None:
data['id'] = api_id
response = self.session.put(self.get_url('apis'), data=data, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def update(self, name_or_id, upstream_url, **fields):
assert_dict_keys_in(
fields, ['name', 'request_host', 'request_path', 'strip_request_path', 'preserve_host'],
INVALID_FIELD_ERROR_TEMPLATE)
# Explicitly encode on beforehand before passing to requests!
fields = dict((k, utf8_or_str(v)) if isinstance(v, six.text_type) else v for k, v in fields.items())
response = self.session.patch(self.get_url('apis', name_or_id), data=dict({
'upstream_url': upstream_url
}, **fields), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, name_or_id):
response = self.session.delete(self.get_url('apis', name_or_id), headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete API (status: %s): %s' % (response.status_code, name_or_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, name_or_id):
response = self.session.get(self.get_url('apis', name_or_id), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'name', 'request_host', 'request_path'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset:
query_params['offset'] = offset
url = self.get_url('apis', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
def plugins(self, name_or_id):
return APIPluginConfigurationAdminClient(self, name_or_id, self.api_url)
class BasicAuthAdminClient(BasicAuthAdminContract, RestClient):
def __init__(self, consumer_admin, consumer_id, api_url):
super(BasicAuthAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
self.consumer_admin = consumer_admin
self.consumer_id = consumer_id
def destroy(self):
super(BasicAuthAdminClient, self).destroy()
self.consumer_admin = None
self.consumer_id = None
def create_or_update(self, basic_auth_id=None, username=None, password=None):
data = {
'username': utf8_or_str(username),
'password': utf8_or_str(password),
}
if basic_auth_id is not None:
data['id'] = basic_auth_id
response = self.session.put(self.get_url('consumers', self.consumer_id, 'basicauth'), data=data,
headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def create(self, username, password):
response = self.session.post(self.get_url('consumers', self.consumer_id, 'basicauth'), data={
'username': utf8_or_str(username),
'password': utf8_or_str(password),
}, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'username'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset:
query_params['offset'] = offset
url = self.get_url('consumers', self.consumer_id, 'basicauth', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, basic_auth_id):
url = self.get_url('consumers', self.consumer_id, 'basicauth', basic_auth_id)
response = self.session.delete(url, headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete Basic Auth (status: %s): %s for Consumer: %s' % (
response.status_code, basic_auth_id, self.consumer_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, basic_auth_id):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'basicauth', basic_auth_id),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'basicauth'),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
def update(self, basic_auth_id, **fields):
assert_dict_keys_in(fields, ['username', 'password'], INVALID_FIELD_ERROR_TEMPLATE)
response = self.session.patch(
self.get_url('consumers', self.consumer_id, 'basicauth', basic_auth_id), data=fields,
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
class KeyAuthAdminClient(KeyAuthAdminContract, RestClient):
def __init__(self, consumer_admin, consumer_id, api_url):
super(KeyAuthAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
self.consumer_admin = consumer_admin
self.consumer_id = consumer_id
def destroy(self):
super(KeyAuthAdminClient, self).destroy()
self.consumer_admin = None
self.consumer_id = None
def create_or_update(self, key_auth_id=None, key=None):
data = {
'key': key
}
if key_auth_id is not None:
data['id'] = key_auth_id
response = self.session.put(self.get_url('consumers', self.consumer_id, 'keyauth'), data=data,
headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def create(self, key=None):
response = self.session.post(self.get_url('consumers', self.consumer_id, 'keyauth'), data={
'key': key,
}, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'key'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset:
query_params['offset'] = offset
url = self.get_url('consumers', self.consumer_id, 'keyauth', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, key_auth_id):
url = self.get_url('consumers', self.consumer_id, 'keyauth', key_auth_id)
response = self.session.delete(url, headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete Key Auth (status: %s): %s for Consumer: %s' % (
response.status_code, key_auth_id, self.consumer_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, key_auth_id):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'keyauth', key_auth_id),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'keyauth'),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
def update(self, key_auth_id, **fields):
assert_dict_keys_in(fields, ['key'], INVALID_FIELD_ERROR_TEMPLATE)
response = self.session.patch(
self.get_url('consumers', self.consumer_id, 'keyauth', key_auth_id), data=fields,
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
class OAuth2AdminClient(OAuth2AdminContract, RestClient):
def __init__(self, consumer_admin, consumer_id, api_url):
super(OAuth2AdminClient, self).__init__(api_url, headers=get_default_kong_headers())
self.consumer_admin = consumer_admin
self.consumer_id = consumer_id
def destroy(self):
super(OAuth2AdminClient, self).destroy()
self.consumer_admin = None
self.consumer_id = None
def create_or_update(self, oauth2_id=None, name=None, redirect_uri=None, client_id=None, client_secret=None):
data = {
'name': name,
'redirect_uri': redirect_uri,
'client_id': client_id,
'client_secret': client_secret
}
if oauth2_id is not None:
data['id'] = oauth2_id
response = self.session.put(self.get_url('consumers', self.consumer_id, 'oauth2'), data=data,
headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def create(self, name, redirect_uri, client_id=None, client_secret=None):
response = self.session.post(self.get_url('consumers', self.consumer_id, 'oauth2'), data={
'name': name,
'redirect_uri': redirect_uri,
'client_id': client_id,
'client_secret': client_secret
}, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'name', 'redirect_url', 'client_id'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset:
query_params['offset'] = offset
url = self.get_url('consumers', self.consumer_id, 'oauth2', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, oauth2_id):
url = self.get_url('consumers', self.consumer_id, 'oauth2', oauth2_id)
response = self.session.delete(url, headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete OAuth2 (status: %s): %s for Consumer: %s' % (
response.status_code, oauth2_id, self.consumer_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, oauth2_id):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'oauth2', oauth2_id),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('consumers', self.consumer_id, 'oauth2'),
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
def update(self, oauth2_id, **fields):
assert_dict_keys_in(
fields, ['name', 'redirect_uri', 'client_id', 'client_secret'], INVALID_FIELD_ERROR_TEMPLATE)
response = self.session.patch(
self.get_url('consumers', self.consumer_id, 'oauth2', oauth2_id), data=fields,
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
class ConsumerAdminClient(ConsumerAdminContract, RestClient):
def __init__(self, api_url):
super(ConsumerAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
def destroy(self):
super(ConsumerAdminClient, self).destroy()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def count(self):
response = self.session.get(self.get_url('consumers'), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
result = response.json()
amount = result.get('total', len(result.get('data')))
return amount
def create(self, username=None, custom_id=None):
response = self.session.post(self.get_url('consumers'), data={
'username': username,
'custom_id': custom_id,
}, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != CREATED:
raise_response_error(response, ValueError)
return response.json()
def create_or_update(self, consumer_id=None, username=None, custom_id=None):
data = {
'username': username,
'custom_id': custom_id,
}
if consumer_id is not None:
data['id'] = consumer_id
response = self.session.put(self.get_url('consumers'), data=data, headers=self.get_headers())
if response.status_code == CONFLICT:
raise_response_error(response, ConflictError)
elif response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code not in (CREATED, OK):
raise_response_error(response, ValueError)
return response.json()
def update(self, username_or_id, **fields):
assert_dict_keys_in(fields, ['username', 'custom_id'], INVALID_FIELD_ERROR_TEMPLATE)
response = self.session.patch(self.get_url('consumers', username_or_id), data=fields,
headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self, size=100, offset=None, **filter_fields):
assert_dict_keys_in(filter_fields, ['id', 'custom_id', 'username'], INVALID_FIELD_ERROR_TEMPLATE)
query_params = filter_fields
query_params['size'] = size
if offset:
query_params['offset'] = offset
url = self.get_url('consumers', **query_params)
response = self.session.get(url, headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ValueError, max_tries=3)
def delete(self, username_or_id):
response = self.session.delete(self.get_url('consumers', username_or_id), headers=self.get_headers())
if response.status_code not in (NO_CONTENT, NOT_FOUND):
raise ValueError('Could not delete Consumer (status: %s): %s' % (response.status_code, username_or_id))
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve(self, username_or_id):
response = self.session.get(self.get_url('consumers', username_or_id), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
def basic_auth(self, username_or_id):
return BasicAuthAdminClient(self, username_or_id, self.api_url)
def key_auth(self, username_or_id):
return KeyAuthAdminClient(self, username_or_id, self.api_url)
def oauth2(self, username_or_id):
return OAuth2AdminClient(self, username_or_id, self.api_url)
class PluginAdminClient(PluginAdminContract, RestClient):
def __init__(self, api_url):
super(PluginAdminClient, self).__init__(api_url, headers=get_default_kong_headers())
def destroy(self):
super(PluginAdminClient, self).destroy()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def list(self):
response = self.session.get(self.get_url('plugins'), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
@backoff.on_exception(backoff.expo, ServerError, max_tries=3)
def retrieve_schema(self, plugin_name):
response = self.session.get(self.get_url('plugins', plugin_name, 'schema'), headers=self.get_headers())
if response.status_code == INTERNAL_SERVER_ERROR:
raise_response_error(response, ServerError)
elif response.status_code != OK:
raise_response_error(response, ValueError)
return response.json()
class KongAdminClient(KongAdminContract):
def __init__(self, api_url):
super(KongAdminClient, self).__init__(
apis=APIAdminClient(api_url),
consumers=ConsumerAdminClient(api_url),
plugins=PluginAdminClient(api_url))
def close(self):
self.apis.destroy()
self.consumers.destroy()
self.plugins.destroy()
| |
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import argparse
import dateutil.parser
from datetime import datetime
import logging
import os
import subprocess
import sys
import airflow
from airflow import jobs, settings, utils
from airflow.configuration import conf
from airflow.executors import DEFAULT_EXECUTOR
from airflow.models import DagBag, TaskInstance, DagPickle
from airflow.utils import AirflowException
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
# Common help text across subcommands
mark_success_help = "Mark jobs as succeeded without running them"
subdir_help = "File location or directory from which to look for the dag"
def log_to_stdout():
log = logging.getLogger()
log.setLevel(settings.LOGGING_LEVEL)
logformat = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logformat)
log.addHandler(ch)
def backfill(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
if args.dry_run:
print("Dry run of DAG {0} on {1}".format(args.dag_id,
args.start_date))
for task in dag.tasks:
print("Task {0}".format(task.task_id))
ti = TaskInstance(task, args.start_date)
ti.dry_run()
else:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
include_adhoc=args.include_adhoc,
local=args.local,
donot_pickle=(args.donot_pickle or conf.getboolean('core', 'donot_pickle')),
ignore_dependencies=args.ignore_dependencies)
def run(args):
utils.pessimistic_connection_handling()
# Setting up logging
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
directory = log + "/{args.dag_id}/{args.task_id}".format(args=args)
if not os.path.exists(directory):
os.makedirs(directory)
args.execution_date = dateutil.parser.parse(args.execution_date)
iso = args.execution_date.isoformat()
filename = "{directory}/{iso}".format(**locals())
subdir = None
if args.subdir:
subdir = args.subdir.replace(
"DAGS_FOLDER", conf.get("core", "DAGS_FOLDER"))
subdir = os.path.expanduser(subdir)
logging.basicConfig(
filename=filename,
level=settings.LOGGING_LEVEL,
format=settings.LOG_FORMAT)
if not args.pickle:
dagbag = DagBag(subdir)
if args.dag_id not in dagbag.dags:
msg = 'DAG [{0}] could not be found'.format(args.dag_id)
logging.error(msg)
raise AirflowException(msg)
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
else:
session = settings.Session()
logging.info('Loading pickle id {args.pickle}'.format(**locals()))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
task_start_date = None
if args.task_start_date:
task_start_date = dateutil.parser.parse(args.task_start_date)
task.start_date = task_start_date
ti = TaskInstance(task, args.execution_date)
if args.local:
print("Logging into: " + filename)
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
force=args.force,
pickle_id=args.pickle,
task_start_date=task_start_date,
ignore_dependencies=args.ignore_dependencies)
run_job.run()
elif args.raw:
ti.run(
mark_success=args.mark_success,
force=args.force,
ignore_dependencies=args.ignore_dependencies,
job_id=args.job_id,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
print((
'Pickled dag {dag} '
'as pickle_id:{pickle_id}').format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = DEFAULT_EXECUTOR
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_dependencies=args.ignore_dependencies,
force=args.force)
executor.heartbeat()
executor.end()
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
def list_dags(args):
dagbag = DagBag(args.subdir)
print("\n".join(sorted(dagbag.dags)))
def list_tasks(args):
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
def test(args):
log_to_stdout()
args.execution_date = dateutil.parser.parse(args.execution_date)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
if args.dry_run:
ti.dry_run()
else:
ti.run(force=True, ignore_dependencies=True, test_mode=True)
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dagbag = DagBag(args.subdir)
if args.dag_id not in dagbag.dags:
raise AirflowException('dag_id could not be found')
dag = dagbag.dags[args.dag_id]
if args.start_date:
args.start_date = dateutil.parser.parse(args.start_date)
if args.end_date:
args.end_date = dateutil.parser.parse(args.end_date)
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
dag.clear(
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm)
def webserver(args):
print(settings.HEADER)
log_to_stdout()
from airflow.www.app import app
threads = args.threads or conf.get('webserver', 'threads')
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app.run(debug=True, port=args.port, host=args.hostname)
else:
print(
'Running the Gunicorn server with {threads}'
'on host {args.hostname} and port '
'{args.port}...'.format(**locals()))
sp = subprocess.Popen([
'gunicorn', '-w', str(args.threads), '-t', '120', '-b',
args.hostname + ':' + str(args.port), 'airflow.www.app:app'])
sp.wait()
def scheduler(args):
print(settings.HEADER)
log_to_stdout()
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=args.subdir,
num_runs=args.num_runs,
do_pickle=args.do_pickle)
job.run()
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename):
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
def worker(args):
# Worker to serve static log files through this simple flask app
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
sp = subprocess.Popen(
['airflow', 'serve_logs'],
env=env,
)
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
}
worker.run(**options)
sp.kill()
def initdb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
utils.initdb()
print("Done.")
def resetdb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
if input(
"This will drop existing tables if they exist. "
"Proceed? (y/n)").upper() == "Y":
logging.basicConfig(level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
utils.resetdb()
else:
print("Bail.")
def upgradedb(args):
print("DB: " + conf.get('core', 'SQL_ALCHEMY_CONN'))
utils.upgradedb()
def version(args):
print(settings.HEADER + " v" + airflow.__version__)
def flower(args):
broka = conf.get('celery', 'BROKER_URL')
args.port = args.port or conf.get('celery', 'FLOWER_PORT')
port = '--port=' + args.port
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
sp = subprocess.Popen(['flower', '-b', broka, port, api])
sp.wait()
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help')
ht = "Run subsections of a DAG for a specified date range"
parser_backfill = subparsers.add_parser('backfill', help=ht)
parser_backfill.add_argument("dag_id", help="The id of the dag to run")
parser_backfill.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to backfill (optional)")
parser_backfill.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_backfill.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
parser_backfill.add_argument(
"-m", "--mark_success",
help=mark_success_help, action="store_true")
parser_backfill.add_argument(
"-l", "--local",
help="Run the task using the LocalExecutor", action="store_true")
parser_backfill.add_argument(
"-x", "--donot_pickle",
help=(
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
action="store_true")
parser_backfill.add_argument(
"-a", "--include_adhoc",
help="Include dags with the adhoc parameter.", action="store_true")
parser_backfill.add_argument(
"-i", "--ignore_dependencies",
help=(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction with task_regex"),
action="store_true")
parser_backfill.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_backfill.add_argument(
"-dr", "--dry_run", help="Perform a dry run", action="store_true")
parser_backfill.set_defaults(func=backfill)
ht = "Clear a set of task instance, as if they never ran"
parser_clear = subparsers.add_parser('clear', help=ht)
parser_clear.add_argument("dag_id", help="The id of the dag to run")
parser_clear.add_argument(
"-t", "--task_regex",
help="The regex to filter specific task_ids to clear (optional)")
parser_clear.add_argument(
"-s", "--start_date", help="Override start_date YYYY-MM-DD")
parser_clear.add_argument(
"-e", "--end_date", help="Override end_date YYYY-MM-DD")
ht = "Include upstream tasks"
parser_clear.add_argument(
"-u", "--upstream", help=ht, action="store_true")
ht = "Only failed jobs"
parser_clear.add_argument(
"-f", "--only_failed", help=ht, action="store_true")
ht = "Only running jobs"
parser_clear.add_argument(
"-r", "--only_running", help=ht, action="store_true")
ht = "Include downstream tasks"
parser_clear.add_argument(
"-d", "--downstream", help=ht, action="store_true")
parser_clear.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_clear.add_argument(
"-c", "--no_confirm", help=ht, action="store_true")
parser_clear.set_defaults(func=clear)
ht = "Run a single task instance"
parser_run = subparsers.add_parser('run', help=ht)
parser_run.add_argument("dag_id", help="The id of the dag to run")
parser_run.add_argument("task_id", help="The task_id to run")
parser_run.add_argument(
"execution_date", help="The execution date to run")
parser_run.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_run.add_argument(
"-s", "--task_start_date",
help="Override the tasks's start_date (used internally)",)
parser_run.add_argument(
"-m", "--mark_success", help=mark_success_help, action="store_true")
parser_run.add_argument(
"-f", "--force",
help="Force a run regardless or previous success",
action="store_true")
parser_run.add_argument(
"-l", "--local",
help="Runs the task locally, don't use the executor",
action="store_true")
parser_run.add_argument(
"-r", "--raw",
help=argparse.SUPPRESS,
action="store_true")
parser_run.add_argument(
"-i", "--ignore_dependencies",
help="Ignore upstream and depends_on_past dependencies",
action="store_true")
parser_run.add_argument(
"--ship_dag",
help="Pickles (serializes) the DAG and ships it to the worker",
action="store_true")
parser_run.add_argument(
"-p", "--pickle",
help="Serialized pickle object of the entire dag (used internally)")
parser_run.add_argument(
"-j", "--job_id", help=argparse.SUPPRESS)
parser_run.set_defaults(func=run)
ht = (
"Test a task instance. This will run a task without checking for "
"dependencies or recording it's state in the database."
)
parser_test = subparsers.add_parser('test', help=ht)
parser_test.add_argument("dag_id", help="The id of the dag to run")
parser_test.add_argument("task_id", help="The task_id to run")
parser_test.add_argument(
"execution_date", help="The execution date to run")
parser_test.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_test.add_argument(
"-dr", "--dry_run", help="Perform a dry run", action="store_true")
parser_test.set_defaults(func=test)
ht = "Get the status of a task instance."
parser_task_state = subparsers.add_parser('task_state', help=ht)
parser_task_state.add_argument("dag_id", help="The id of the dag to check")
parser_task_state.add_argument("task_id", help="The task_id to check")
parser_task_state.add_argument(
"execution_date", help="The execution date to check")
parser_task_state.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_task_state.set_defaults(func=task_state)
ht = "Start a Airflow webserver instance"
parser_webserver = subparsers.add_parser('webserver', help=ht)
parser_webserver.add_argument(
"-p", "--port",
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="Set the port on which to run the web server")
parser_webserver.add_argument(
"-w", "--threads",
default=conf.get('webserver', 'THREADS'),
type=int,
help="Number of threads to run the webserver on")
parser_webserver.add_argument(
"-hn", "--hostname",
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server")
ht = "Use the server that ships with Flask in debug mode"
parser_webserver.add_argument(
"-d", "--debug", help=ht, action="store_true")
parser_webserver.set_defaults(func=webserver)
ht = "Start a scheduler scheduler instance"
parser_scheduler = subparsers.add_parser('scheduler', help=ht)
parser_scheduler.add_argument(
"-d", "--dag_id", help="The id of the dag to run")
parser_scheduler.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_scheduler.add_argument(
"-n", "--num_runs",
default=None,
type=int,
help="Set the number of runs to execute before exiting")
parser_scheduler.add_argument(
"-p", "--do_pickle",
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code."),
action="store_true")
parser_scheduler.set_defaults(func=scheduler)
ht = "Initialize the metadata database"
parser_initdb = subparsers.add_parser('initdb', help=ht)
parser_initdb.set_defaults(func=initdb)
ht = "Burn down and rebuild the metadata database"
parser_resetdb = subparsers.add_parser('resetdb', help=ht)
parser_resetdb.set_defaults(func=resetdb)
ht = "Upgrade metadata database to latest version"
parser_upgradedb = subparsers.add_parser('upgradedb', help=ht)
parser_upgradedb.set_defaults(func=upgradedb)
ht = "List the DAGs"
parser_list_dags = subparsers.add_parser('list_dags', help=ht)
parser_list_dags.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_dags.set_defaults(func=list_dags)
ht = "List the tasks within a DAG"
parser_list_tasks = subparsers.add_parser('list_tasks', help=ht)
parser_list_tasks.add_argument(
"-t", "--tree", help="Tree view", action="store_true")
parser_list_tasks.add_argument(
"dag_id", help="The id of the dag")
parser_list_tasks.add_argument(
"-sd", "--subdir", help=subdir_help,
default=DAGS_FOLDER)
parser_list_tasks.set_defaults(func=list_tasks)
ht = "Start a Celery worker node"
parser_worker = subparsers.add_parser('worker', help=ht)
parser_worker.add_argument(
"-q", "--queues",
help="Comma delimited list of queues to serve",
default=conf.get('celery', 'DEFAULT_QUEUE'))
parser_worker.set_defaults(func=worker)
ht = "Serve logs generate by worker"
parser_logs = subparsers.add_parser('serve_logs', help=ht)
parser_logs.set_defaults(func=serve_logs)
ht = "Start a Celery Flower"
parser_flower = subparsers.add_parser('flower', help=ht)
parser_flower.add_argument(
"-p", "--port", help="The port")
parser_flower.add_argument(
"-a", "--broker_api", help="Broker api")
parser_flower.set_defaults(func=flower)
parser_version = subparsers.add_parser('version', help="Show version")
parser_version.set_defaults(func=version)
return parser
| |
#
# Copyright (c) 2019 - 2021 StorPool.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests for the storpool.spconfig.SPConfig class. """
import collections
import errno
import itertools
import os
import mock
import pytest
from storpool import spconfig
ConfigData = collections.namedtuple('ConfigData', [
'filename',
'exists',
'data',
])
CONFIG_DATA = (
ConfigData(
filename='/etc/storpool.conf',
exists=True,
data={'beleriand': {'SP_CACHE_SIZE': '8192'}},
),
ConfigData(
filename='/etc/storpool.conf.d/local.conf',
exists=True,
data={'': {'c': '3'}, 'beleriand': {'a': '4'}},
),
ConfigData(
filename='/etc/storpool.conf.d/storpool.conf',
exists=False,
data={},
),
)
CONFIG_FILES = {
item.filename: item for item in CONFIG_DATA
}
TEST_CONFIG_FILES = {
"/etc/storpool.conf": True,
"/etc/storpool-defaults.conf": False,
"/etc/storpool.conf.d/.hidden.conf": False,
"/etc/storpool.conf.d/local.conf": True,
"/etc/storpool.conf.d/local.confx": False,
"/etc/storpool.conf.d/server.conf.bak": False,
"/etc/storpool.conf.d/storpool.conf": True,
"/etc/storpool.conf.d/storpool.conf~": False,
"/usr/lib/storpool/storpool.conf": False,
}
TEST_CONFIG_FILES_LISTDIR = dict(
(
dirname,
["subdir", "another-subdir.conf"]
+ sorted(filename for _, filename in files)
)
for dirname, files in itertools.groupby(
sorted(os.path.split(fname) for fname in TEST_CONFIG_FILES),
lambda item: item[0],
)
)
def fake_get_config_files(_cls, missing_ok=False):
""" Simulate looking for the StorPool configuration files. """
assert missing_ok is not None # Make this do something...
return [item.filename for item in CONFIG_DATA]
class FakeConfig(object):
# pylint: disable=too-few-public-methods
""" Simulate a confget.Config settings holder. """
def __init__(self, varnames, filename='(invalid)'):
""" Initialize a fake Config object: store the filename. """
assert varnames == []
assert filename in CONFIG_FILES
self.filename = filename
class FakeINI(object):
# pylint: disable=too-few-public-methods
""" Simulate a confget.backend.ini.INIBackend reader. """
def __init__(self, config):
""" Initialize a fake INI reader: store the fake config object. """
assert isinstance(config, FakeConfig)
self.config = config
def read_file(self):
""" Simulate reading from the INI file. """
return CONFIG_FILES[self.config.filename].data
class FakeINICheck(object):
# pylint: disable=too-few-public-methods
""" Simulate a confget.backend.ini.INIBackend reader. """
def __init__(self, config):
""" Initialize a fake INI reader: store the fake config object. """
assert isinstance(config, FakeConfig)
self.config = config
def read_file(self):
""" Simulate reading from the INI file. """
res = CONFIG_FILES[self.config.filename]
if not res.exists:
raise IOError(
errno.ENOENT,
"No such file or directory",
self.config.filename,
)
return res.data
@mock.patch('storpool.spconfig.SPConfig.get_config_files',
new=fake_get_config_files)
@mock.patch('confget.Config', new=FakeConfig)
@mock.patch('confget.BACKENDS', new={'ini': FakeINI})
def test_success():
""" Test that a SPConfig object behaves almost like a dictionary. """
cfg = spconfig.SPConfig(section='beleriand', missing_ok=True)
assert cfg['SP_CACHE_SIZE'] == '8192'
with pytest.raises(KeyError):
assert cfg['d'] == 'we should never get here, right?'
assert cfg.get('a', 42) == '4'
assert cfg.get('d', 42) == 42
assert dict(cfg.items()) == dict(
set(
item
for item in spconfig.DEFAULTS.items()
if item[0] != 'SP_CACHE_SIZE'
)
| set({'SP_CACHE_SIZE': '8192', 'a': '4', 'c': '3'}.items())
)
assert (
sorted(set(cfg.keys()) - set(spconfig.DEFAULTS.keys()))
) == ['a', 'c']
assert sorted(
set(cfg.iteritems()) - set(spconfig.DEFAULTS.items())
) == [('SP_CACHE_SIZE', '8192'), ('a', '4'), ('c', '3')]
assert sorted(
set(cfg.iterkeys()) - set(spconfig.DEFAULTS.keys())
) == ['a', 'c']
cfg = spconfig.SPConfig(section='beleriand')
assert cfg['SP_CACHE_SIZE'] == '8192'
with pytest.raises(KeyError):
assert cfg['d'] == 'we should never get here, right?'
assert cfg.get('a', 42) == '4'
assert cfg.get('d', 42) == 42
assert dict(cfg.items()) == dict(
set(
item
for item in spconfig.DEFAULTS.items()
if item[0] != 'SP_CACHE_SIZE'
)
| set({'SP_CACHE_SIZE': '8192', 'a': '4', 'c': '3'}.items())
)
assert (
sorted(set(cfg.keys()) - set(spconfig.DEFAULTS.keys()))
) == ['a', 'c']
assert sorted(
set(cfg.iteritems()) - set(spconfig.DEFAULTS.items())
) == [('SP_CACHE_SIZE', '8192'), ('a', '4'), ('c', '3')]
assert sorted(
set(cfg.iterkeys()) - set(spconfig.DEFAULTS.keys())
) == ['a', 'c']
@mock.patch('storpool.spconfig.SPConfig.get_config_files',
new=fake_get_config_files)
@mock.patch('confget.Config', new=FakeConfig)
@mock.patch('confget.BACKENDS', new={'ini': FakeINICheck})
def test_file_not_found():
""" Test that a SPConfig object behaves almost like a dictionary. """
with pytest.raises(spconfig.SPConfigException) as err:
spconfig.SPConfig(section='beleriand', missing_ok=True)
assert "/etc/storpool.conf.d/storpool.conf" in str(err.value)
def test_get_config_files():
"""Test that SPConfig.get_config_files() works properly."""
dirs_checked = set()
files_checked = set()
def mock_listdir(dirname):
"""Mock os.listdir(), return our synthetic filesystem's contents."""
return TEST_CONFIG_FILES_LISTDIR[dirname]
def mock_is_dir(path):
"""Mock os.path.isdir(), check and record."""
dirs_checked.add(path)
return path in TEST_CONFIG_FILES_LISTDIR
def mock_is_file(path):
"""Mock os.path.isfile(), check and record."""
files_checked.add(path)
return path in TEST_CONFIG_FILES
with mock.patch("os.listdir", new=mock_listdir), mock.patch(
"os.path.isdir", new=mock_is_dir
), mock.patch(
"os.path.isfile", new=mock_is_file
):
res = set(spconfig.SPConfig.get_config_files(missing_ok=True))
assert dirs_checked == set(["/etc/storpool.conf.d"])
assert files_checked == set(
[
"/etc/storpool.conf",
"/etc/storpool.conf.d/local.conf",
"/etc/storpool.conf.d/storpool.conf",
"/etc/storpool.conf.d/another-subdir.conf",
]
)
assert res == set(
filename
for filename, wanted in TEST_CONFIG_FILES.items()
if wanted
)
dirs_checked.clear()
files_checked.clear()
with mock.patch("os.listdir", new=mock_listdir), mock.patch(
"os.path.isdir", new=mock_is_dir
), mock.patch(
"os.path.isfile", new=mock_is_file
):
res = set(spconfig.SPConfig.get_config_files())
assert dirs_checked == set(["/etc/storpool.conf.d"])
assert not files_checked
assert res == set(
filename
for filename, wanted in TEST_CONFIG_FILES.items()
if wanted
) | set(["/etc/storpool.conf.d/another-subdir.conf"])
| |
from PyQt4 import QtCore
from PyQt4.QtCore import QVariant, QObject, pyqtSignal
class TreeItem(object):
def __init__(self, model, parent=None):
super(TreeItem, self).__init__()
self.__children = []
self.__parent = parent
self.__model = model
@property
def model(self):
"""The treemodel where this node is attached."""
return self.__model
@property
def mask(self):
""" return the mask asociated with the item, or None if there is no mask."""
return None
@property
def name(self):
""" the name of the group. if there is a mask attached return mask name, otherwise group name."""
raise NotImplementedError("implement in derived class")
def child(self, row):
return self.children[row]
def find(self, mask):
"""find the treeitem of the given mask"""
raise NotImplementedError("implement in derived class")
def add(self, children):
children = children if hasattr(children, '__iter__') else [children]
i = len(self)
self.model.beginInsertRows(self.index, len(self), len(self) + len(children) - 1)
self.__children.extend(children)
self.model.endInsertRows()
self.model.dataChanged.emit(QtCore.QModelIndex(), QtCore.QModelIndex())
return i
def remove(self, child=None, slice=None):
if child is not None:
childi = self.row(child)
self.model.beginRemoveRows(self.index, childi, childi)
self.__children.remove(child)
self.model.endRemoveRows()
elif slice is not None:
indices = slice.indices(len(self))
self.model.beginRemoveRows(self.index, indices[0], indices[0] + indices[1] - indices[0] - 1)
del self.__children[slice]
self.model.endRemoveRows()
@property
def index(self):
"""Get the model index of this tree item"""
# root does have default model index
if self.__parent is None:
return QtCore.QModelIndex()
else:
return self.model.index(self.parent.row(self), 0, self.parent.index)
@property
def parent(self):
return self.__parent
@property
def children(self):
return self.__children
def row(self, child):
return self.children.index(child)
def __len__(self):
return len(self.__children)
class RootItem(TreeItem):
def __init__(self, model):
TreeItem.__init__(self, model=model)
# keep track of type to child index mapping
self.type2group = {} # todo rename to better name
def add(self, mask):
"""return the added item"""
# check if we have a group for the type of the mask
if type(mask) not in self.type2group:
group = self.type2group[type(mask)] = RoiGroupItem(model=self.model, parent=self,
name=str(type(mask).__name__))
TreeItem.add(self, group)
# call on_added on child item
index = self.row(self.type2group[type(mask)])
self.child(index).add(mask)
def find(self, mask):
"""Return the tree node that holds given mask."""
# because the mask could be a child mask of any type we need to search all groups
for child in self.children:
node = child.find(mask)
if node is not None:
return node
return None
@TreeItem.mask.getter
def mask(self):
return None
@TreeItem.name.getter
def name(self):
return "Root"
def remove(self, mask):
# remove the mask from the respective child item
index = self.row(self.type2group[type(mask)])
self.child(index).remove(mask)
# check if there are still other items within the group
if len(self.child(index)) == 0:
TreeItem.remove(self, self.child(index))
del self.type2group[type(mask)]
class RoiGroupItem(TreeItem):
def __init__(self, model, parent, name):
TreeItem.__init__(self, model=model, parent=parent)
self.__name = name
@TreeItem.name.getter
def name(self):
return self.__name
def find(self, mask):
for child in self.children:
node = child.find(mask)
if node is not None:
return node
return None
def add(self, mask):
child = RoiItem(model=self.model, parent=self)
TreeItem.add(self, child)
# set mask after insertion into tree, because setting the mask may require adding sub masks.
# adding sub masks required the parent item to be in the tree already
child.mask = mask
def remove(self, mask):
for node in self.children:
if node.mask is mask:
return TreeItem.remove(self, node)
# mask needs to be one of the direct child nodes
assert (False)
def __repr__(self):
return self.__name
@TreeItem.mask.getter
def mask(self):
return None
class RoiItem(TreeItem):
def __init__(self, parent, model):
super(RoiItem, self).__init__(parent=parent, model=model)
self.__mask = None
@property
def mask(self):
return self.__mask
@mask.setter
def mask(self, m):
self.__mask = m
if hasattr(self.mask, "children"):
TreeItem.add(self, [RoiItem(parent=self, model=self.model) for child in self.mask.children])
for item, mask in zip(self.children, self.mask.children):
item.mask = mask
if hasattr(self.mask, "changed"):
self.mask.changed.append(self.on_mask_changed)
def find(self, mask):
if mask is self.__mask:
return self
for child in self.children:
node = child.find(mask)
if node is not None:
return node
return None
@TreeItem.name.getter
def name(self):
return self.__mask.name
def on_mask_changed(self, mask):
assert (mask is self.mask)
# remove all children
TreeItem.remove(self, slice=slice(None))
# add all children
if (hasattr(self.mask, "children")):
TreeItem.add(self, [RoiItem(parent=self, model=self.model) for child in self.mask.children])
for item, mask in zip(self.children, self.mask.children):
item.mask = mask
def __repr__(self):
if hasattr(self.mask, "events"):
if len(self.mask.events.indices) > 0:
return self.mask.name + "*"
return self.mask.name
class RoiTreeModel(QtCore.QAbstractItemModel):
mask_added = pyqtSignal(object)
mask_removed = pyqtSignal(object)
def __init__(self, rois, parent=None):
super(RoiTreeModel, self).__init__(parent)
self.root = RootItem(model=self)
self.masks = rois
# notify the data tree about changes, to do this, proxy the events into the qt event loop
self.masks.added.append(self.mask_added.emit)
self.masks.preremove.append(self.mask_removed.emit)
# now connect to the own signals
self.mask_added.connect(self.root.add)
self.mask_removed.connect(self.root.remove)
# self.mask2roitreeitem = {}
""" Keep track of all items in the hierarchy and provide easy mapping from mask to the treeitems of the rois"""
def flags(self, index):
"""Determines whether a field is editable, selectable checkable etc"""
if index.isValid():
item = index.internalPointer()
if item.mask is not None:
# allow to change the name of a mask
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
# all other fields can't be edited
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
def find(self, mask):
""" find the tree index of the given mask"""
return self.root.find(mask).index
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""Sets the role data for the item at index to value."""
if not index.isValid():
return False
if role == QtCore.Qt.EditRole:
name = str(value.toPyObject())
print name
index.internalPointer().mask.name = name
self.dataChanged.emit(index, index)
return True
return False
def columnCount(self, parent):
"""Returns the number of columns for the children of the given parent index.
Here the number of columns will always be one."""
return 1
def data(self, index, role):
"""Returns the data stored under the given role for the item referred to by the index."""
if not index.isValid():
return None
if role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole:
return None
item = index.internalPointer()
return QVariant(repr(item))
def index(self, row, column, parent):
"""
Returns the index of the item in the model specified by the given row,
column and parent index.
Calls base class createIndex() to generate model indexes that other
components can use to refer to items in this model.
"""
# convert parent index to parent item
item = parent.internalPointer() if parent.isValid() else self.root
# get the respective child from the parent item
child = item.child(row)
return self.createIndex(row, column, child)
def parent(self, index):
"""Returns the parent of the model item with the given index. If the item has
no parent, an invalid QModelIndex is returned."""
item = index.internalPointer() if index.isValid() else self.root
if item is self.root:
return QtCore.QModelIndex()
return item.parent.index
def rowCount(self, parent):
"""Returns the number of rows under the given parent index. When the parent is valid
it means that rowCount is returning the number of children of parent."""
item = parent.internalPointer() if parent.isValid() else self.root
return len(item)
| |
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User, Group
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save, post_delete
from ckeditor import fields as ckedit_fields
from fields import AutoOneToOneField, JSONField
from util import smiles, convert_text_to_html
import settings as forum_settings
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^djangobb_forum\.fields\.AutoOneToOneField',
'^djangobb_forum\.fields\.JSONField',
'^djangobb_forum\.fields\.ExtendedImageField',
'^ckeditor\.fields\.RichTextField',])
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
groups = models.ManyToManyField(Group,blank=True, null=True,
verbose_name=_('Groups'),
help_text=_('Only users from these groups can see this category'))
position = models.IntegerField(_('Position'), blank=True, default=0)
class Meta:
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __unicode__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
@property
def topics(self):
return Topic.objects.filter(forum__category__id=self.id).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category__id=self.id).select_related()
def has_access(self, user):
if self.groups.exists():
if user.is_authenticated():
if not self.groups.filter(user__pk=user.id).exists():
return False
else:
return False
return True
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True, default='')
moderators = models.ManyToManyField(User, blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), auto_now=True)
class Meta:
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
def __unicode__(self):
return self.name
def _get_last_post(self):
def _get_last_post_inner():
topic_children = self.topics.values_list('id', flat=True)
return Post.objects.filter(topic__id__in=topic_children).latest()
return _get_cached_prop_val(self, '_last_post', _get_last_post_inner)
last_post = property(_get_last_post)
def _get_topic_count(self):
return _get_cached_prop_val(self, '_topic_count', lambda: self.topics.all().count())
topic_count = property(_get_topic_count)
def _get_post_count(self):
def _get_post_count_inner():
cnt = 0
for t in self.topics.all():
cnt += t.post_count
return cnt
return _get_cached_prop_val(self, '_post_count', _get_post_count_inner)
post_count = property(_get_post_count)
@models.permalink
def get_absolute_url(self):
return ('djangobb:forum', [self.id])
@property
def posts(self):
return Post.objects.filter(topic__forum__id=self.id).select_related()
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(User, verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(User, related_name='subscriptions',
verbose_name=_('Subscribers'), blank=True)
class Meta:
ordering = ['-updated']
get_latest_by = 'updated'
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
def __unicode__(self):
return self.name
def _get_post_count(self):
return _get_cached_prop_val(self, '_post_count', lambda: self.posts.all().count())
post_count = property(_get_post_count)
def _get_last_post(self):
return _get_cached_prop_val(self, '_last_post', lambda: self.posts.all().latest())
last_post = property(_get_last_post)
@property
def head(self):
try:
return self.posts.select_related().order_by('created')[0]
except IndexError:
return None
@property
def reply_count(self):
return self.post_count - 1
@models.permalink
def get_absolute_url(self):
return ('djangobb:topic', [self.id])
def update_read(self, user):
tracking = user.posttracking
#if last_read > last_read - don't check topics
if tracking.last_read and (tracking.last_read > self.last_post.created):
return
if isinstance(tracking.topics, dict):
#clear topics if len > 5Kb and set last_read to current time
if len(tracking.topics) > 5120:
tracking.topics = None
tracking.last_read = datetime.now()
tracking.save()
#update topics if exist new post or does't exist in dict
if self.last_post.id > tracking.topics.get(str(self.id), 0):
tracking.topics[str(self.id)] = self.last_post.id
tracking.save()
else:
#initialize topic tracking dict
tracking.topics = {self.id: self.last_post.id}
tracking.save()
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(User, related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), auto_now_add=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
updated_by = models.ForeignKey(User, verbose_name=_('Updated by'), blank=True, null=True)
body = ckedit_fields.RichTextField(config_name='bbcode', verbose_name=_('Message'))
body_html = models.TextField(_('HTML version'))
user_ip = models.IPAddressField(_('User IP'), blank=True, null=True)
class Meta:
ordering = ['created']
get_latest_by = 'created'
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def save(self, *args, **kwargs):
self.body_html = convert_text_to_html(self.body)
if forum_settings.SMILES_SUPPORT:
self.body_html = smiles(self.body_html)
super(Post, self).save(*args, **kwargs)
def title(self):
""" Needed for searching """
return self.topic
@models.permalink
def get_absolute_url(self):
return ('djangobb:post', [self.id])
def summary(self):
LIMIT = 50
tail = len(self.body) > LIMIT and '...' or ''
return self.body[:LIMIT] + tail
__unicode__ = summary
class Profile(models.Model):
user = AutoOneToOneField(User, related_name='forum_profile', verbose_name=_('User'))
status = models.CharField(_('Status'), max_length=30, blank=True)
site = models.URLField(_('Site'), verify_exists=False, blank=True)
jabber = models.CharField(_('Jabber'), max_length=80, blank=True)
icq = models.CharField(_('ICQ'), max_length=12, blank=True)
msn = models.CharField(_('MSN'), max_length=80, blank=True)
aim = models.CharField(_('AIM'), max_length=80, blank=True)
yahoo = models.CharField(_('Yahoo'), max_length=80, blank=True)
location = models.CharField(_('Location'), max_length=30, blank=True)
signature = models.TextField(_('Signature'), blank=True, default='',
max_length=64)
language = models.CharField(_('Language'), max_length=5, default='',
choices=settings.LANGUAGES)
show_signatures = models.BooleanField(_('Show signatures'),
blank=True, default=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
class Meta:
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def last_post(self):
posts = Post.objects.filter(user__id=self.user_id).order_by('-created')
if posts:
return posts[0].created
else:
return None
class PostTracking(models.Model):
"""
Model for tracking read/unread posts.
In topics stored ids of topics and last_posts as dict.
"""
user = AutoOneToOneField(User)
topics = JSONField(null=True)
last_read = models.DateTimeField(null=True)
class Meta:
verbose_name = _('Post tracking')
verbose_name_plural = _('Post tracking')
def __unicode__(self):
return self.user.username
BAN_REASON_CHOICES = (
(1, _('spam')),
(2, _('rude posts')),
)
class Ban(models.Model):
user = models.OneToOneField(User, verbose_name=_('Banned user'),
related_name='ban_users')
reason = models.IntegerField(_('Reason'),
choices=BAN_REASON_CHOICES, default=1)
class Meta:
verbose_name = _('Ban')
verbose_name_plural = _('Bans')
def __unicode__(self):
return self.user.username
class Report(models.Model):
reported_by = models.ForeignKey(User, related_name='reported_by',
verbose_name=_('Reported by'))
post = models.ForeignKey(Post, verbose_name=_('Post'))
zapped = models.BooleanField(_('Zapped'), blank=True, default=False)
zapped_by = models.ForeignKey(User, related_name='zapped_by', blank=True,
null=True, verbose_name=_('Zapped by'))
created = models.DateTimeField(_('Created'), blank=True)
reason = models.TextField(_('Reason'), blank=True, default='',
max_length=1000)
class Meta:
verbose_name = _('Report')
verbose_name_plural = _('Reports')
def __unicode__(self):
return u'%s %s' % (self.reported_by ,self.zapped)
# ------------------------- signals ----------------------------
from .signals import post_saved, topic_saved, ban_saved, ban_deleted, \
forum_post_deleted
post_save.connect(post_saved, sender=Post, dispatch_uid='forum_post_save')
post_save.connect(topic_saved, sender=Topic, dispatch_uid='forum_topic_save')
post_save.connect(ban_saved, sender=Ban, dispatch_uid='forum_ban_save')
post_delete.connect(ban_deleted, sender=Ban, dispatch_uid='forum_ban_deleted')
post_delete.connect(forum_post_deleted, sender=Post, dispatch_uid='forum_post_deleted')
# ------------------------- privates ------------------------
def _get_cached_prop_val(obj, prop_name, init_func):
""" Support function for getting cached property values from models """
try:
return getattr(obj, prop_name)
except AttributeError:
setattr(obj, prop_name, init_func())
return getattr(obj, prop_name)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=no-member, invalid-name, protected-access, no-self-use
# pylint: disable=too-many-branches, too-many-arguments, no-self-use
# pylint: disable=too-many-lines, arguments-differ
"""Definition of various recurrent neural network layers."""
from __future__ import print_function
from ... import ndarray
from ..nn import Block
from . import rnn_cell
class _RNNLayer(Block):
"""Implementation of recurrent layers."""
def __init__(self, hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
mode, **kwargs):
super(_RNNLayer, self).__init__(**kwargs)
assert layout == 'TNC' or layout == 'NTC', \
"Invalid layout %s; must be one of ['TNC' or 'NTC']"%layout
self._hidden_size = hidden_size
self._num_layers = num_layers
self._mode = mode
self._layout = layout
self._dropout = dropout
self._dir = 2 if bidirectional else 1
self._input_size = input_size
self._i2h_weight_initializer = i2h_weight_initializer
self._h2h_weight_initializer = h2h_weight_initializer
self._i2h_bias_initializer = i2h_bias_initializer
self._h2h_bias_initializer = h2h_bias_initializer
self._gates = {'rnn_relu': 1, 'rnn_tanh': 1, 'lstm': 4, 'gru': 3}[mode]
self.i2h_weight = []
self.h2h_weight = []
self.i2h_bias = []
self.h2h_bias = []
ng, ni, nh = self._gates, input_size, hidden_size
for i in range(num_layers):
for j in (['l', 'r'] if self._dir == 2 else ['l']):
self.i2h_weight.append(
self.params.get('%s%d_i2h_weight'%(j, i), shape=(ng*nh, ni),
init=i2h_weight_initializer,
allow_deferred_init=True))
self.h2h_weight.append(
self.params.get('%s%d_h2h_weight'%(j, i), shape=(ng*nh, nh),
init=h2h_weight_initializer,
allow_deferred_init=True))
self.i2h_bias.append(
self.params.get('%s%d_i2h_bias'%(j, i), shape=(ng*nh,),
init=i2h_bias_initializer,
allow_deferred_init=True))
self.h2h_bias.append(
self.params.get('%s%d_h2h_bias'%(j, i), shape=(ng*nh,),
init=h2h_bias_initializer,
allow_deferred_init=True))
ni = nh * self._dir
self._unfused = self._unfuse()
def __repr__(self):
s = '{name}({mapping}, {_layout}'
if self._num_layers != 1:
s += ', num_layers={_num_layers}'
if self._dropout != 0:
s += ', dropout={_dropout}'
if self._dir == 2:
s += ', bidirectional'
s += ')'
mapping = ('{_input_size} -> {_hidden_size}'.format(**self.__dict__) if self._input_size
else self._hidden_size)
return s.format(name=self.__class__.__name__,
mapping=mapping,
**self.__dict__)
def state_info(self, batch_size=0):
raise NotImplementedError
def _unfuse(self):
"""Unfuses the fused RNN in to a stack of rnn cells."""
get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='relu',
**kwargs),
'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='tanh',
**kwargs),
'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size,
**kwargs),
'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size,
**kwargs)}[self._mode]
stack = rnn_cell.SequentialRNNCell(prefix=self.prefix, params=self.params)
with stack.name_scope():
ni = self._input_size
for i in range(self._num_layers):
kwargs = {'input_size': ni,
'i2h_weight_initializer': self._i2h_weight_initializer,
'h2h_weight_initializer': self._h2h_weight_initializer,
'i2h_bias_initializer': self._i2h_bias_initializer,
'h2h_bias_initializer': self._h2h_bias_initializer}
if self._dir == 2:
stack.add(rnn_cell.BidirectionalCell(
get_cell(prefix='l%d_'%i, **kwargs),
get_cell(prefix='r%d_'%i, **kwargs)))
else:
stack.add(get_cell(prefix='l%d_'%i, **kwargs))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(rnn_cell.DropoutCell(self._dropout))
ni = self._hidden_size * self._dir
return stack
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
batch_size: int
Only required for `NDArray` API. Size of the batch ('N' in layout).
Dimension of the input.
func : callable, default `symbol.zeros`
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var` etc. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
states = []
for i, info in enumerate(self.state_info(batch_size)):
if info is not None:
info.update(kwargs)
else:
info = kwargs
states.append(func(name='%sh0_%d'%(self.prefix, i), **info))
return states
def forward(self, inputs, states=None):
batch_size = inputs.shape[self._layout.find('N')]
skip_states = states is None
if skip_states:
states = self.begin_state(batch_size)
if isinstance(states, ndarray.NDArray):
states = [states]
for state, info in zip(states, self.state_info(batch_size)):
if state.shape != info['shape']:
raise ValueError(
"Invalid recurrent state shape. Expecting %s, got %s."%(
str(info['shape']), str(state.shape)))
if self._input_size == 0:
for i in range(self._dir):
self.i2h_weight[i].shape = (self._gates*self._hidden_size, inputs.shape[2])
self.i2h_weight[i]._finish_deferred_init()
if inputs.context.device_type == 'gpu':
out = self._forward_gpu(inputs, states)
else:
out = self._forward_cpu(inputs, states)
# out is (output, state)
return out[0] if skip_states else out
def _forward_cpu(self, inputs, states):
ns = len(states)
axis = self._layout.find('T')
states = sum(zip(*((j for j in i) for i in states)), ())
outputs, states = self._unfused.unroll(
inputs.shape[axis], inputs, states,
layout=self._layout, merge_outputs=True)
new_states = []
for i in range(ns):
state = ndarray.concat(*(j.reshape((1,)+j.shape) for j in states[i::ns]), dim=0)
new_states.append(state)
return outputs, new_states
def _forward_gpu(self, inputs, states):
if self._layout == 'NTC':
inputs = ndarray.swapaxes(inputs, dim1=0, dim2=1)
ctx = inputs.context
params = sum(zip(self.i2h_weight, self.h2h_weight), ())
params += sum(zip(self.i2h_bias, self.h2h_bias), ())
params = (i.data(ctx).reshape((-1,)) for i in params)
params = ndarray.concat(*params, dim=0)
rnn = ndarray.RNN(inputs, params, *states, state_size=self._hidden_size,
num_layers=self._num_layers, bidirectional=self._dir == 2,
p=self._dropout, state_outputs=True, mode=self._mode)
if self._mode == 'lstm':
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
outputs, states = rnn[0], [rnn[1]]
if self._layout == 'NTC':
outputs = ndarray.swapaxes(outputs, dim1=0, dim2=1)
return outputs, states
class RNN(_RNNLayer):
r"""Applies a multi-layer Elman RNN with `tanh` or `ReLU` non-linearity to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \tanh(w_{ih} * x_t + b_{ih} + w_{hh} * h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, and :math:`x_t` is the hidden
state of the previous layer at time `t` or :math:`input_t` for the first layer.
If nonlinearity='relu', then `ReLU` is used instead of `tanh`.
Parameters
----------
hidden_size: int
The number of features in the hidden state h.
num_layers: int, default 1
Number of recurrent layers.
activation: {'relu' or 'tanh'}, default 'tanh'
The activation function to use.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer.
bidirectional: bool, default False
If `True`, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : ParameterDict or None
Shared Parameters for this `Block`.
Input shapes:
The input shape depends on `layout`. For `layout='TNC'`, the
input has shape `(sequence_length, batch_size, input_size)`
Output shape:
The output shape depends on `layout`. For `layout='TNC'`, the
output has shape `(sequence_length, batch_size, num_hidden)`.
If `bidirectional` is True, output shape will instead be
`(sequence_length, batch_size, 2*num_hidden)`
Recurrent state:
The recurrent state is an NDArray with shape `(num_layers, batch_size, num_hidden)`.
If `bidirectional` is True, the recurrent state shape will instead be
`(2*num_layers, batch_size, num_hidden)`
If input recurrent state is None, zeros are used as default begin states,
and the output recurrent state is omitted.
Examples
--------
>>> layer = mx.gluon.rnn.RNN(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, h0)
"""
def __init__(self, hidden_size, num_layers=1, activation='relu',
layout='TNC', dropout=0, bidirectional=False,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, **kwargs):
super(RNN, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'rnn_'+activation, **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
class LSTM(_RNNLayer):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
i_t = sigmoid(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\
f_t = sigmoid(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\
o_t = sigmoid(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\
c_t = f_t * c_{(t-1)} + i_t * g_t \\
h_t = o_t * \tanh(c_t)
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the
cell state at time `t`, :math:`x_t` is the hidden state of the previous
layer at time `t` or :math:`input_t` for the first layer, and :math:`i_t`,
:math:`f_t`, :math:`g_t`, :math:`o_t` are the input, forget, cell, and
out gates, respectively.
Parameters
----------
hidden_size: int
The number of features in the hidden state h.
num_layers: int, default 1
Number of recurrent layers.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer.
bidirectional: bool, default False
If `True`, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default 'lstmbias'
Initializer for the bias vector. By default, bias for the forget
gate is initialized to 1 while all other biases are initialized
to zero.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : `ParameterDict` or `None`
Shared Parameters for this `Block`.
Input shapes:
The input shape depends on `layout`. For `layout='TNC'`, the
input has shape `(sequence_length, batch_size, input_size)`
Output shape:
The output shape depends on `layout`. For `layout='TNC'`, the
output has shape `(sequence_length, batch_size, num_hidden)`.
If `bidirectional` is True, output shape will instead be
`(sequence_length, batch_size, 2*num_hidden)`
Recurrent state:
The recurrent state is a list of two NDArrays. Both has shape
`(num_layers, batch_size, num_hidden)`.
If `bidirectional` is True, each recurrent state will instead have shape
`(2*num_layers, batch_size, num_hidden)`.
If input recurrent state is None, zeros are used as default begin states,
and the output recurrent state is omitted.
Examples
--------
>>> layer = mx.gluon.rnn.LSTM(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> c0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, [h0, c0])
"""
def __init__(self, hidden_size, num_layers=1, layout='TNC',
dropout=0, bidirectional=False, input_size=0,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
**kwargs):
super(LSTM, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'lstm', **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'},
{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
class GRU(_RNNLayer):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = sigmoid(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
i_t = sigmoid(W_{ii} x_t + b_{ii} + W_hi h_{(t-1)} + b_{hi}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - i_t) * n_t + i_t * h_{(t-1)} \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the hidden
state of the previous layer at time `t` or :math:`input_t` for the first layer,
and :math:`r_t`, :math:`i_t`, :math:`n_t` are the reset, input, and new gates, respectively.
Parameters
----------
hidden_size: int
The number of features in the hidden state h
num_layers: int, default 1
Number of recurrent layers.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer
bidirectional: bool, default False
If True, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : ParameterDict or None
Shared Parameters for this `Block`.
Input shapes:
The input shape depends on `layout`. For `layout='TNC'`, the
input has shape `(sequence_length, batch_size, input_size)`
Output shape:
The output shape depends on `layout`. For `layout='TNC'`, the
output has shape `(sequence_length, batch_size, num_hidden)`.
If `bidirectional` is True, output shape will instead be
`(sequence_length, batch_size, 2*num_hidden)`
Recurrent state:
The recurrent state is an NDArray with shape `(num_layers, batch_size, num_hidden)`.
If `bidirectional` is True, the recurrent state shape will instead be
`(2*num_layers, batch_size, num_hidden)`
If input recurrent state is None, zeros are used as default begin states,
and the output recurrent state is omitted.
Examples
--------
>>> layer = mx.gluon.rnn.GRU(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, h0)
"""
def __init__(self, hidden_size, num_layers=1, layout='TNC',
dropout=0, bidirectional=False, input_size=0,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
**kwargs):
super(GRU, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'gru', **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
| |
"""Webhook tests for mobile_app."""
# pylint: disable=redefined-outer-name,unused-import
import logging
import pytest
from homeassistant.components.mobile_app.const import CONF_SECRET
from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service
from .const import CALL_SERVICE, FIRE_EVENT, REGISTER_CLEARTEXT, RENDER_TEMPLATE, UPDATE
_LOGGER = logging.getLogger(__name__)
async def test_webhook_handle_render_template(create_registrations, webhook_client):
"""Test that we render templates properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 200
json = await resp.json()
assert json == {"one": "Hello world"}
async def test_webhook_handle_call_services(
hass, create_registrations, webhook_client
): # noqa: E501 F811
"""Test that we call services properly."""
calls = async_mock_service(hass, "test", "mobile_app")
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=CALL_SERVICE,
)
assert resp.status == 200
assert len(calls) == 1
async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
"""Test that we can fire events."""
events = []
@callback
def store_event(event):
"""Helepr to store events."""
events.append(event)
hass.bus.async_listen("test_event", store_event)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), json=FIRE_EVENT
)
assert resp.status == 200
json = await resp.json()
assert json == {}
assert len(events) == 1
assert events[0].data["hello"] == "yo world"
async def test_webhook_update_registration(
webhook_client, hass_client
): # noqa: E501 F811
"""Test that a we can update an existing registration via webhook."""
authed_api_client = await hass_client()
register_resp = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert register_resp.status == 201
register_json = await register_resp.json()
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {"type": "update_registration", "data": UPDATE}
update_resp = await webhook_client.post(
"/api/webhook/{}".format(webhook_id), json=update_container
)
assert update_resp.status == 200
update_json = await update_resp.json()
assert update_json["app_version"] == "2.0.0"
assert CONF_WEBHOOK_ID not in update_json
assert CONF_SECRET not in update_json
async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
"""Test that we can get zones properly."""
await async_setup_component(
hass,
ZONE_DOMAIN,
{
ZONE_DOMAIN: {
"name": "test",
"latitude": 32.880837,
"longitude": -117.237561,
"radius": 250,
}
},
)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_zones"},
)
assert resp.status == 200
json = await resp.json()
assert len(json) == 1
assert json[0]["entity_id"] == "zone.home"
async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
"""Test that we can get config properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_config"},
)
assert resp.status == 200
json = await resp.json()
if "components" in json:
json["components"] = set(json["components"])
if "whitelist_external_dirs" in json:
json["whitelist_external_dirs"] = set(json["whitelist_external_dirs"])
hass_config = hass.config.as_dict()
expected_dict = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": "#03A9F4", # Default frontend theme color
}
assert expected_dict == json
async def test_webhook_returns_error_incorrect_json(
webhook_client, create_registrations, caplog
): # noqa: E501 F811
"""Test that an error is returned when JSON is invalid."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), data="not json"
)
assert resp.status == 400
json = await resp.json()
assert json == {}
assert "invalid JSON" in caplog.text
async def test_webhook_handle_decryption(webhook_client, create_registrations):
"""Test that we can encrypt/decrypt properly."""
try:
# pylint: disable=unused-import
from nacl.secret import SecretBox # noqa: F401
from nacl.encoding import Base64Encoder # noqa: F401
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
key = create_registrations[0]["secret"].encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
payload = json.dumps(RENDER_TEMPLATE["data"]).encode("utf-8")
data = SecretBox(key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = SecretBox(key).decrypt(
webhook_json["encrypted_data"], encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
assert json.loads(decrypted_data) == {"one": "Hello world"}
async def test_webhook_requires_encryption(webhook_client, create_registrations):
"""Test that encrypted registrations only accept encrypted data."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 400
webhook_json = await resp.json()
assert "error" in webhook_json
assert webhook_json["success"] is False
assert webhook_json["error"]["code"] == "encryption_required"
| |
"""Parses text files downloaded by timeliness.crawler and
stores the pickled results for the analysis stage."""
import re
import os
import sys
import time
import cPickle as pickle
from datetime import date, datetime
from utils import flattened, recursive_listdir, pretty_bytes, pretty_seconds, Accumulator
from timeliness import (DATA_DIR, DOWNLOAD_DIR, FISCAL_YEARS)
RE_CFDA = re.compile('^[0-9]{2,2}.[0-9]{3,3}$')
class InvalidCFDAProgramNumber(ValueError):
def __init__(self, *args, **kwargs):
self.program_number = kwargs.pop('program_number', None)
super(InvalidCFDAProgramNumber, self).__init__(*args, **kwargs)
class UnparseableFile(Exception):
def __init__(self, *args, **kwargs):
self.path = kwargs.pop('path', None)
super(UnparseableFile, self).__init__(*args, **kwargs)
def get_fiscal_year(d):
if d.month < 10:
return d.year
else:
return d.year + 1
def find_date(haystack):
patterns = [
re.compile('(?P<year>20[0-9]{2,2})[-.\s\_]{1,1}(?P<month>[0-9]{1,2})[-.\s\_]{1,1}(?P<day>[0-9]{1,2})'),
re.compile('(?P<year>20[0-9]{2,2})(?P<month>[0-9]{2,2})(?P<day>[0-9]{2,2})'),
re.compile('(?P<month>[0-9]{2,2})(?P<day>[0-9]{2,2})(?P<year>20[0-9]{2,2})')
]
for p in patterns:
m = p.search(haystack)
if m:
return date(year=int(m.group('year')),
month=int(m.group('month')),
day=int(m.group('day')))
raise ValueError("{0} does not contain a date string.".format(haystack))
def parse_line(line, import_date):
award = {}
award['cfda'] = line[0:7].strip()
if not RE_CFDA.match(award['cfda']):
raise InvalidCFDAProgramNumber(award['cfda'])
award['action'] = line[135]
award['award_id'] = line[142:158].strip()
try:
award['award_mod'] = int(line[158:162].strip())
except ValueError:
award['award_mod'] = None
award['fed_amount'] = int(line[162:173])
award['correction_indicator'] = line[223]
# for aggregates obligation date is the last day of the quarter
award['obligation_date'] = date(year=int(line[196:200]),
month=int(line[200:202]),
day=int(line[202:204]))
award['import_date'] = import_date
award['reporting_lag'] = (award['import_date'] - award['obligation_date']).days
fiscal_year = get_fiscal_year(award['obligation_date'])
award['fiscal_year_lag'] = (import_date - date(year=fiscal_year,
month=9,
day=30)).days
award['fiscal_year'] = fiscal_year
return award
def parse_file(path, import_date, on_bad_line=None):
transactions = {}
with file(path) as fil:
for line in fil:
if line.strip() == '' or len(line) < 100:
continue
try:
t = parse_line(line, import_date)
ts = transactions.get(t['award_id'])
if ts is None:
transactions[t['award_id']] = [t]
else:
ts.append(t)
except Exception, ex:
if not on_bad_line is None:
on_bad_line(line)
first_transactions = []
for (award_id, ts) in transactions.iteritems():
ts.sort(key=lambda t: t['award_mod'])
for t in ts:
if t['action'] == 'A' and t['correction_indicator'] not in('D', 'C', 'L'):
first_transactions.append((award_id, t))
break
return [(award_id, t)
for (award_id, t) in first_transactions
if t['fed_amount'] > 0]
def find_files_to_process():
files_from_crawler = list(flattened(recursive_listdir(DOWNLOAD_DIR)))
files_to_process = []
files_to_ignore = []
for path in files_from_crawler:
try:
import_date = find_date(path)
size = os.path.getsize(path)
files_to_process.append((path,
import_date,
os.path.getsize(path)))
except ValueError:
files_to_ignore.append(path)
def _import_date((_1, import_date, _2)): return import_date
def _size((_1, _2, size)): return size
bytes_accumulator = Accumulator()
files_to_process.sort(key=_import_date)
files_to_process = [(f, bytes_accumulator(_size(f)))
for f in files_to_process]
bytes_to_process = bytes_accumulator.getvalue()
return (bytes_to_process, files_to_process, files_to_ignore)
def parser_main():
(bytes_to_process,
files_to_process,
files_to_ignore) = find_files_to_process()
for path in files_to_ignore:
print "Unparseable filename: {0}".format(os.path.basename(path))
print "Files to process: {0}".format(len(files_to_process))
print "Bytes to process: {0}".format(pretty_bytes(bytes_to_process))
print "Continue?"
user_input = raw_input()
if not 'yes'.startswith(user_input.lower()):
return
transactions = {}
failed_lines = file(os.path.join(DATA_DIR, 'failed_lines.out'), 'w')
failed_files = file(os.path.join(DATA_DIR, 'failed_files.out'), 'w')
begin_time = time.time()
for files_processed, ((filepath, import_date, filesize), bytes_processed) in enumerate(files_to_process, start=1):
try:
print
print "Parsing {0}".format(os.path.basename(filepath))
file_transactions = parse_file(filepath, import_date)
for (award_id, t) in file_transactions:
if award_id not in transactions:
transactions[award_id] = t
except UnicodeDecodeError, error:
log_error(db, filepath, "Unable to parse file: {0}".format(unicode(error)))
except KeyboardInterrupt:
break
now_time = time.time()
bytes_per_second = bytes_processed / max(now_time - begin_time, 1)
bytes_processed_pct = bytes_processed * 100 / bytes_to_process
eta_seconds = (bytes_to_process - bytes_processed) / max(bytes_per_second, 1)
print "{0}/{1} ({2}%), {3}/s, ETA {4}".format(
pretty_bytes(bytes_processed),
pretty_bytes(bytes_to_process),
bytes_processed_pct,
pretty_bytes(bytes_per_second),
pretty_seconds(eta_seconds))
failed_lines.close()
failed_files.close()
print "Dumping awards dictionary..."
with file(os.path.join(DATA_DIR, 'cfda_awards.out.bin'), 'wb') as outf:
pickle.dump(transactions, outf)
def fix_prefix(prefix):
for stem in ['VA', 'DHS', 'HUD', 'USAID', 'DOJ', 'USTREAS', 'DOE', 'DOI', 'IMLS', 'DOC']:
if prefix.startswith(stem):
return stem
return prefix
def show_prefixes():
def filename_has_date(filename):
try:
import_date = find_date(filename)
return True
except (ValueError, ImportError), err:
return False
re_agency = re.compile('^[0-9]*[A-Z]+')
def extract_prefix(filename):
prefix_match = re_agency.match(filename.upper())
if not prefix_match is None:
prefix = prefix_match.group()
return fix_prefix(prefix)
else:
return None
files_to_process = filter(filename_has_date, map(os.path.basename, flattened(recursive_listdir(DOWNLOAD_DIR))))
prefixes = map(extract_prefix, files_to_process)
def unique(iterable):
def combine(accum, item):
accum[item] = None
return accum
return reduce(combine, iterable, {}).keys()
def frequency(iterable):
def combine(frequencies, item):
cnt = frequencies.get(item, 0)
frequencies[item] = cnt + 1
return frequencies
return reduce(combine, iterable, {})
def print_freq(freq, indent=""):
def value((k, v)):
return v
for s, f in sorted(freq.iteritems()):
print "{0}{1!s:15}: {2!s:>7}".format(indent, s, f)
print_freq(frequency(prefixes))
if __name__ == "__main__":
parser_main()
| |
from __future__ import print_function
import re
import sqlparse
from collections import namedtuple
from sqlparse.sql import IdentifierList, Identifier, Function
from sqlparse.tokens import Keyword, DML, Punctuation, Token, Error
cleanup_regex = {
# This matches only alphanumerics and underscores.
'alphanum_underscore': re.compile(r'(\w+)$'),
# This matches everything except spaces, parens, colon, and comma
'many_punctuations': re.compile(r'([^():,\s]+)$'),
# This matches everything except spaces, parens, colon, comma, and period
'most_punctuations': re.compile(r'([^\.():,\s]+)$'),
# This matches everything except a space.
'all_punctuations': re.compile('([^\s]+)$'),
}
def last_word(text, include='alphanum_underscore'):
"""
Find the last word in a sentence.
>>> last_word('abc')
'abc'
>>> last_word(' abc')
'abc'
>>> last_word('')
''
>>> last_word(' ')
''
>>> last_word('abc ')
''
>>> last_word('abc def')
'def'
>>> last_word('abc def ')
''
>>> last_word('abc def;')
''
>>> last_word('bac $def')
'def'
>>> last_word('bac $def', include='most_punctuations')
'$def'
>>> last_word('bac \def', include='most_punctuations')
'\\\\def'
>>> last_word('bac \def;', include='most_punctuations')
'\\\\def;'
>>> last_word('bac::def', include='most_punctuations')
'def'
>>> last_word('"foo*bar', include='most_punctuations')
'"foo*bar'
"""
if not text: # Empty string
return ''
if text[-1].isspace():
return ''
else:
regex = cleanup_regex[include]
matches = regex.search(text)
if matches:
return matches.group(0)
else:
return ''
TableReference = namedtuple('TableReference', ['schema', 'name', 'alias',
'is_function'])
TableReference.ref = property(lambda self: self.alias or self.name)
# This code is borrowed from sqlparse example script.
# <url>
def is_subselect(parsed):
if not parsed.is_group():
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() in ('SELECT', 'INSERT',
'UPDATE', 'CREATE', 'DELETE'):
return True
return False
def _identifier_is_function(identifier):
return any(isinstance(t, Function) for t in identifier.tokens)
def extract_from_part(parsed, stop_at_punctuation=True):
tbl_prefix_seen = False
for item in parsed.tokens:
if tbl_prefix_seen:
if is_subselect(item):
for x in extract_from_part(item, stop_at_punctuation):
yield x
elif stop_at_punctuation and item.ttype is Punctuation:
raise StopIteration
# An incomplete nested select won't be recognized correctly as a
# sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
# the second FROM to trigger this elif condition resulting in a
# StopIteration. So we need to ignore the keyword if the keyword
# FROM.
# Also 'SELECT * FROM abc JOIN def' will trigger this elif
# condition. So we need to ignore the keyword JOIN and its variants
# INNER JOIN, FULL OUTER JOIN, etc.
elif item.ttype is Keyword and (
not item.value.upper() == 'FROM') and (
not item.value.upper().endswith('JOIN')):
tbl_prefix_seen = False
else:
yield item
elif item.ttype is Keyword or item.ttype is Keyword.DML:
item_val = item.value.upper()
if (item_val in ('COPY', 'FROM', 'INTO', 'UPDATE', 'TABLE') or
item_val.endswith('JOIN')):
tbl_prefix_seen = True
# 'SELECT a, FROM abc' will detect FROM as part of the column list.
# So this check here is necessary.
elif isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
if (identifier.ttype is Keyword and
identifier.value.upper() == 'FROM'):
tbl_prefix_seen = True
break
def extract_table_identifiers(token_stream, allow_functions=True):
"""yields tuples of TableReference namedtuples"""
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
# Sometimes Keywords (such as FROM ) are classified as
# identifiers which don't have the get_real_name() method.
try:
schema_name = identifier.get_parent_name()
real_name = identifier.get_real_name()
is_function = (allow_functions and
_identifier_is_function(identifier))
except AttributeError:
continue
if real_name:
yield TableReference(schema_name, real_name,
identifier.get_alias(), is_function)
elif isinstance(item, Identifier):
real_name = item.get_real_name()
schema_name = item.get_parent_name()
is_function = allow_functions and _identifier_is_function(item)
if real_name:
yield TableReference(schema_name, real_name, item.get_alias(),
is_function)
else:
name = item.get_name()
yield TableReference(None, name, item.get_alias() or name,
is_function)
elif isinstance(item, Function):
yield TableReference(None, item.get_real_name(), item.get_alias(),
allow_functions)
# extract_tables is inspired from examples in the sqlparse lib.
def extract_tables(sql):
"""Extract the table names from an SQL statment.
Returns a list of TableReference namedtuples
"""
parsed = sqlparse.parse(sql)
if not parsed:
return ()
# INSERT statements must stop looking for tables at the sign of first
# Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
# abc is the table name, but if we don't stop at the first lparen, then
# we'll identify abc, col1 and col2 as table names.
insert_stmt = parsed[0].token_first().value.lower() == 'insert'
stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
# Kludge: sqlparse mistakenly identifies insert statements as
# function calls due to the parenthesized column list, e.g. interprets
# "insert into foo (bar, baz)" as a function call to foo with arguments
# (bar, baz). So don't allow any identifiers in insert statements
# to have is_function=True
identifiers = extract_table_identifiers(stream,
allow_functions=not insert_stmt)
return tuple(identifiers)
def find_prev_keyword(sql):
""" Find the last sql keyword in an SQL statement
Returns the value of the last keyword, and the text of the query with
everything after the last keyword stripped
"""
if not sql.strip():
return None, ''
parsed = sqlparse.parse(sql)[0]
flattened = list(parsed.flatten())
logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN')
for t in reversed(flattened):
if t.value == '(' or (t.is_keyword and (
t.value.upper() not in logical_operators)):
# Find the location of token t in the original parsed statement
# We can't use parsed.token_index(t) because t may be a child token
# inside a TokenList, in which case token_index thows an error
# Minimal example:
# p = sqlparse.parse('select * from foo where bar')
# t = list(p.flatten())[-3] # The "Where" token
# p.token_index(t) # Throws ValueError: not in list
idx = flattened.index(t)
# Combine the string values of all tokens in the original list
# up to and including the target keyword token t, to produce a
# query string with everything after the keyword token removed
text = ''.join(tok.value for tok in flattened[:idx+1])
return t, text
return None, ''
# Postgresql dollar quote signs look like `$$` or `$tag$`
dollar_quote_regex = re.compile(r'^\$[^$]*\$$')
def is_open_quote(sql):
"""Returns true if the query contains an unclosed quote"""
# parsed can contain one or more semi-colon separated commands
parsed = sqlparse.parse(sql)
return any(_parsed_is_open_quote(p) for p in parsed)
def _parsed_is_open_quote(parsed):
tokens = list(parsed.flatten())
i = 0
while i < len(tokens):
tok = tokens[i]
if tok.match(Token.Error, "'"):
# An unmatched single quote
return True
elif (tok.ttype in Token.Name.Builtin
and dollar_quote_regex.match(tok.value)):
# Find the matching closing dollar quote sign
for (j, tok2) in enumerate(tokens[i+1:], i+1):
if tok2.match(Token.Name.Builtin, tok.value):
# Found the matching closing quote - continue our scan for
# open quotes thereafter
i = j
break
else:
# No matching dollar sign quote
return True
i += 1
return False
def parse_partial_identifier(word):
"""Attempt to parse a (partially typed) word as an identifier
word may include a schema qualification, like `schema_name.partial_name`
or `schema_name.` There may also be unclosed quotation marks, like
`"schema`, or `schema."partial_name`
:param word: string representing a (partially complete) identifier
:return: sqlparse.sql.Identifier, or None
"""
p = sqlparse.parse(word)[0]
n_tok = len(p.tokens)
if n_tok == 1 and isinstance(p.tokens[0], Identifier):
return p.tokens[0]
elif p.token_next_match(0, Error, '"'):
# An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar'
# Close the double quote, then reparse
return parse_partial_identifier(word + '"')
else:
return None
if __name__ == '__main__':
sql = 'select * from (select t. from tabl t'
print (extract_tables(sql))
| |
##
# TRACK 1
# TWO TRAINS
# From Data-Driven DJ (datadrivendj.com) by Brian Foo (brianfoo.com)
# This file builds the sequence file for use with ChucK from the data supplied
##
# Library dependancies
import csv
import json
import math
import os
import random
import re
import time
# Config
BPM = 120 # Beats per minute, e.g. 60, 75, 100, 120, 150
METERS_PER_BEAT = 75 # Higher numbers creates shorter songs
DIVISIONS_PER_BEAT = 4 # e.g. 4 = quarter notes, 8 = eighth notes
VARIANCE_MS = 20 # +/- milliseconds an instrument note should be off by to give it a little more "natural" feel
VARIANCE_RATE = 0 # for adding variance to the playback rate
INSTRUMENTS_INPUT_FILE = 'data/instruments.csv'
STATIONS_INPUT_FILE = 'data/stations.csv'
REPORT_SUMMARY_OUTPUT_FILE = 'data/report_summary.csv'
REPORT_SEQUENCE_OUTPUT_FILE = 'data/report_sequence.csv'
INSTRUMENTS_OUTPUT_FILE = 'data/ck_instruments.csv'
SEQUENCE_OUTPUT_FILE = 'data/ck_sequence.csv'
STATIONS_VISUALIZATION_OUTPUT_FILE = 'visualization/stations/data/stations.json'
MAP_VISUALIZATION_OUTPUT_FILE = 'visualization/map/data/stations.json'
INSTRUMENTS_DIR = 'instruments/'
WRITE_SEQUENCE = True
WRITE_REPORT = True
WRITE_JSON = True
# Calculations
BEAT_MS = round(60.0 / BPM * 1000)
ROUND_TO_NEAREST = round(BEAT_MS/DIVISIONS_PER_BEAT)
print('Building sequence at '+str(BPM)+' BPM ('+str(BEAT_MS)+'ms per beat)')
# Initialize Variables
instruments = []
stations = []
sequence = []
hindex = 0
# For creating pseudo-random numbers
def halton(index, base):
result = 0.0
f = 1.0 / base
i = 1.0 * index
while(i > 0):
result += f * (i % base)
i = math.floor(i / base)
f = f / base
return result
# Find index of first item that matches value
def findInList(list, key, value):
found = -1
for index, item in enumerate(list):
if item[key] == value:
found = index
break
return found
def roundToNearest(n, nearest):
return 1.0 * round(1.0*n/nearest) * nearest
# Read instruments from file
with open(INSTRUMENTS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for name,type,price,bracket_min,bracket_max,file,from_gain,to_gain,from_tempo,to_tempo,gain_phase,tempo_phase,tempo_offset,interval_phase,interval,interval_offset,active in r:
if file and int(active):
index = len(instruments)
# build instrument object
instrument = {
'index': index,
'name': name,
'type': type.lower().replace(' ', '_'),
'bracket_min': float(bracket_min),
'bracket_max': float(bracket_max),
'price': int(price),
'file': INSTRUMENTS_DIR + file,
'from_gain': round(float(from_gain), 2),
'to_gain': round(float(to_gain), 2),
'from_tempo': float(from_tempo),
'to_tempo': float(to_tempo),
'gain_phase': int(gain_phase),
'tempo_phase': int(tempo_phase),
'from_beat_ms': int(round(BEAT_MS/float(from_tempo))),
'to_beat_ms': int(round(BEAT_MS/float(to_tempo))),
'tempo_offset': float(tempo_offset),
'interval_ms': int(int(interval_phase)*BEAT_MS),
'interval': int(interval),
'interval_offset': int(interval_offset)
}
# add instrument to instruments
instruments.append(instrument)
# Read stations from file
with open(STATIONS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for name,lat,lng,income,borough in r:
index = len(stations)
stations.append({
'index': index,
'name': name,
'budget': float(int(income*52)/12),
'percentile': 0.0,
'lat': float(lat),
'lng': float(lng),
'beats': 0,
'distance': 0,
'duration': 0,
'borough': borough,
'borough_next': borough,
'instruments': []
})
# For calculating distance between two coords(lat, lng)
def distBetweenCoords(lat1, lng1, lat2, lng2):
earthRadius = 6371000 # meters
dLat = math.radians(lat2-lat1)
dLng = math.radians(lng2-lng1)
a = math.sin(dLat/2) * math.sin(dLat/2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dLng/2) * math.sin(dLng/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
dist = float(earthRadius * c)
return dist
def getIncomePercentile(station, sorted_station_list):
percentile = 0.0
index = findInList(sorted_station_list, 'index', station['index'])
if index >= 0:
percentile = 1.0 * index / len(sorted_station_list) * 100
return percentile
# Buy instruments based on a specified budget
def buyInstruments(station, instruments_shelf):
budget = station['budget']
percentile = station['percentile']
instruments_cart = []
for i in instruments_shelf:
# skip if not in bracket
if percentile < i['bracket_min'] or percentile >= i['bracket_max']:
continue
# add to cart if in budget
elif i['price'] <= budget:
budget -= i['price']
if i['type'] != 'placeholder':
instruments_cart.append(i)
# out of budget, finished
else:
break
return instruments_cart
# Pre-process stations
min_distance = 0
max_distance = 0
total_distance = 0
total_beats = 0
total_ms = 0
min_duration = 0
max_duration = 0
# Create a list of stations sorted by budget
sorted_stations = stations[:]
sorted_stations = sorted(sorted_stations, key=lambda k: k['budget'])
# Loop through stations
for index, station in enumerate(stations):
# determine station's income percentile
stations[index]['percentile'] = getIncomePercentile(station, sorted_stations)
# determine the station's instruments based on budget
stations[index]['instruments'] = buyInstruments(stations[index], instruments)
if index > 0:
# determine distance between last station
distance = distBetweenCoords(station['lat'], station['lng'], stations[index-1]['lat'], stations[index-1]['lng'])
beats = int(round(distance / METERS_PER_BEAT))
duration = beats * BEAT_MS
stations[index-1]['distance'] = distance
stations[index-1]['beats'] = beats
stations[index-1]['duration'] = duration
stations[index-1]['borough_next'] = station['borough']
total_distance += distance
total_beats += beats
total_ms += duration
if distance > max_distance:
max_distance = distance
max_duration = duration
if distance < min_distance or min_distance == 0:
min_distance = distance
min_duration = duration
# Calculate how many beats
station_count = len(stations)-1
total_seconds = int(1.0*total_ms/1000)
seconds_per_station = int(1.0*total_seconds/station_count)
print('Total distance in meters: '+str(round(total_distance)))
print('Distance range in meters: ['+str(min_distance)+','+str(max_distance)+']')
print('Average beats per station: '+str(1.0*total_beats/station_count))
print('Average time per station: '+time.strftime('%M:%S', time.gmtime(seconds_per_station)))
print('Main sequence beats: '+str(total_beats))
print('Main sequence time: '+time.strftime('%M:%S', time.gmtime(total_seconds)) + '(' + str(total_seconds) + 's)')
# Multiplier based on sine curve
def getMultiplier(percent_complete):
radians = percent_complete * math.pi
multiplier = math.sin(radians)
if multiplier < 0:
multiplier = 0.0
elif multiplier > 1:
multplier = 1.0
return multiplier
# Retrieve gain based on current beat
def getGain(instrument, beat):
beats_per_phase = instrument['gain_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
multiplier = getMultiplier(percent_complete)
from_gain = instrument['from_gain']
to_gain = instrument['to_gain']
min_gain = min(from_gain, to_gain)
gain = multiplier * (to_gain - from_gain) + from_gain
gain = max(min_gain, round(gain, 2))
return gain
# Get beat duration in ms based on current point in time
def getBeatMs(instrument, beat, round_to):
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
beats_per_phase = instrument['tempo_phase']
percent_complete = float(beat % beats_per_phase) / beats_per_phase
multiplier = getMultiplier(percent_complete)
ms = multiplier * (to_beat_ms - from_beat_ms) + from_beat_ms
ms = int(roundToNearest(ms, round_to))
return ms
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
return int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset
# Make sure there's no sudden drop in gain
def continueFromPrevious(instrument):
return instrument['bracket_min'] > 0 or instrument['bracket_max'] < 100
# Add beats to sequence
def addBeatsToSequence(instrument, duration, ms, beat_ms, round_to):
global sequence
global hindex
offset_ms = int(instrument['tempo_offset'] * beat_ms)
ms += offset_ms
previous_ms = int(ms)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
min_ms = min(from_beat_ms, to_beat_ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
continue_from_prev = continueFromPrevious(instrument)
while remaining_duration >= min_ms:
elapsed_ms = int(ms)
elapsed_beat = int((elapsed_ms-previous_ms) / beat_ms)
# continue beat from previous
if continue_from_prev:
elapsed_beat = int(elapsed_ms / beat_ms)
this_beat_ms = getBeatMs(instrument, elapsed_beat, round_to)
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms):
h = halton(hindex, 3)
variance = int(h * VARIANCE_MS * 2 - VARIANCE_MS)
rate_variance = float(h * VARIANCE_RATE * 2 - VARIANCE_RATE)
sequence.append({
'instrument_index': instrument['index'],
'instrument': instrument,
'position': 0,
'gain': getGain(instrument, elapsed_beat),
'rate': 1.0 + rate_variance,
'elapsed_ms': max([elapsed_ms + variance, 0])
})
hindex += 1
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
# Build main sequence
for instrument in instruments:
ms = 0
station_queue_duration = 0
if instrument['type'] == 'misc':
continue
# Each station in stations
for station in stations:
# Check if instrument is in this station
instrument_index = findInList(station['instruments'], 'index', instrument['index'])
# Instrument not here, just add the station duration and continue
if instrument_index < 0 and station_queue_duration > 0:
addBeatsToSequence(instrument, station_queue_duration, ms, BEAT_MS, ROUND_TO_NEAREST)
ms += station_queue_duration + station['duration']
station_queue_duration = 0
elif instrument_index < 0:
ms += station['duration']
else:
station_queue_duration += station['duration']
if station_queue_duration > 0:
addBeatsToSequence(instrument, station_queue_duration, ms, BEAT_MS, ROUND_TO_NEAREST)
# Calculate total time
total_seconds = int(1.0*total_ms/1000)
print('Total sequence time: '+time.strftime('%M:%S', time.gmtime(total_seconds)) + '(' + str(total_seconds) + 's)')
# Sort sequence
sequence = sorted(sequence, key=lambda k: k['elapsed_ms'])
# Add milliseconds to sequence
elapsed = 0
for index, step in enumerate(sequence):
sequence[index]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write instruments to file
if WRITE_SEQUENCE:
with open(INSTRUMENTS_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for index, instrument in enumerate(instruments):
w.writerow([index])
w.writerow([instrument['file']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote instruments to file: '+INSTRUMENTS_OUTPUT_FILE)
# Write sequence to file
if WRITE_SEQUENCE:
with open(SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['instrument_index']])
w.writerow([step['position']])
w.writerow([step['gain']])
w.writerow([step['rate']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence to file: '+SEQUENCE_OUTPUT_FILE)
# Write summary file
if WRITE_REPORT:
with open(REPORT_SUMMARY_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Name', 'Distance', 'Duration', 'Beats', 'Instruments'])
elapsed = 0
for station in stations:
duration = station['duration']
duration_f = time.strftime('%M:%S', time.gmtime(int(duration/1000)))
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
elapsed += duration
w.writerow([elapsed_f, station['name'], round(station['distance'], 2), duration_f, station['beats'], ' '.join([i['name'] for i in station['instruments']])])
print('Successfully wrote summary file: '+REPORT_SUMMARY_OUTPUT_FILE)
# Write sequence report to file
if WRITE_REPORT:
with open(REPORT_SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Instrument', 'Gain'])
for step in sequence:
instrument = instruments[step['instrument_index']]
elapsed = step['elapsed_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = elapsed % 1000
elapsed_f += '.' + str(ms)
w.writerow([elapsed_f, instrument['file'], step['gain']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence report to file: '+REPORT_SEQUENCE_OUTPUT_FILE)
# Write JSON data for the visualization
if WRITE_JSON:
json_data = []
elapsed_duration = 0
for station in stations:
json_data.append({
'name': station['name'],
'borough': station['borough'].upper(),
'borough_next': station['borough_next'].upper(),
'duration': station['duration'],
'elapsed_duration': elapsed_duration,
'min_duration': min_duration,
'lat': station['lat'],
'lng': station['lng']
})
elapsed_duration += station['duration']
with open(STATIONS_VISUALIZATION_OUTPUT_FILE, 'w') as outfile:
json.dump(json_data, outfile)
print('Successfully wrote to JSON file: '+STATIONS_VISUALIZATION_OUTPUT_FILE)
with open(MAP_VISUALIZATION_OUTPUT_FILE, 'w') as outfile:
json.dump(json_data, outfile)
print('Successfully wrote to JSON file: '+MAP_VISUALIZATION_OUTPUT_FILE)
| |
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''config.py: util functions for config, mainly for heron-cli'''
import argparse
import contextlib
import getpass
import os
import sys
import subprocess
import tarfile
import tempfile
import yaml
from heron.common.src.python.utils.log import Log
# pylint: disable=logging-not-lazy
# default environ tag, if not provided
ENVIRON = "default"
# directories for heron distribution
BIN_DIR = "bin"
CONF_DIR = "conf"
ETC_DIR = "etc"
LIB_DIR = "lib"
CLI_DIR = ".heron"
RELEASE_YAML = "release.yaml"
OVERRIDE_YAML = "override.yaml"
# directories for heron sandbox
SANDBOX_CONF_DIR = "./heron-conf"
# config file for heron cli
CLIENT_YAML = "client.yaml"
# cli configs for role and env
IS_ROLE_REQUIRED = "heron.config.is.role.required"
IS_ENV_REQUIRED = "heron.config.is.env.required"
def create_tar(tar_filename, files, config_dir, config_files):
'''
Create a tar file with a given set of files
'''
with contextlib.closing(tarfile.open(tar_filename, 'w:gz')) as tar:
for filename in files:
if os.path.isfile(filename):
tar.add(filename, arcname=os.path.basename(filename))
else:
raise Exception("%s is not an existing file" % filename)
if os.path.isdir(config_dir):
tar.add(config_dir, arcname=get_heron_sandbox_conf_dir())
else:
raise Exception("%s is not an existing directory" % config_dir)
for filename in config_files:
if os.path.isfile(filename):
arcfile = os.path.join(get_heron_sandbox_conf_dir(), os.path.basename(filename))
tar.add(filename, arcname=arcfile)
else:
raise Exception("%s is not an existing file" % filename)
def get_subparser(parser, command):
'''
Retrieve the given subparser from parser
'''
# pylint: disable=protected-access
subparsers_actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers
for choice, subparser in subparsers_action.choices.items():
if choice == command:
return subparser
return None
def cygpath(x):
'''
normalized class path on cygwin
'''
command = ['cygpath', '-wp', x]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()
output = result[0]
lines = output.split("\n")
return lines[0]
def identity(x):
'''
identity function
'''
return x
def normalized_class_path(x):
'''
normalize path
'''
if sys.platform == 'cygwin':
return cygpath(x)
return identity(x)
def get_classpath(jars):
'''
Get the normalized class path of all jars
'''
return ':'.join(map(normalized_class_path, jars))
def get_heron_dir():
"""
This will extract heron directory from .pex file.
For example,
when __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and
its real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',
the internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory
:return: root location for heron-cli.
"""
path = "/".join(os.path.realpath(__file__).split('/')[:-9])
return normalized_class_path(path)
################################################################################
# Get the root of heron dir and various sub directories depending on platform
################################################################################
def get_heron_dir_explorer():
"""
This will extract heron directory from .pex file.
From heron-cli with modification since we need to reuse cli's conf
:return: root location for heron-cli.
"""
path_list = os.path.realpath(__file__).split('/')[:-10]
path_list.append(CLI_DIR)
path = "/".join(path_list)
return normalized_class_path(path)
def get_heron_bin_dir():
"""
This will provide heron bin directory from .pex file.
:return: absolute path of heron lib directory
"""
bin_path = os.path.join(get_heron_dir(), BIN_DIR)
return bin_path
def get_heron_conf_dir():
"""
This will provide heron conf directory from .pex file.
:return: absolute path of heron conf directory
"""
conf_path = os.path.join(get_heron_dir(), CONF_DIR)
return conf_path
def get_heron_lib_dir():
"""
This will provide heron lib directory from .pex file.
:return: absolute path of heron lib directory
"""
lib_path = os.path.join(get_heron_dir(), LIB_DIR)
return lib_path
def get_heron_release_file():
"""
This will provide the path to heron release.yaml file
:return: absolute path of heron release.yaml file
"""
return os.path.join(get_heron_dir(), RELEASE_YAML)
def get_heron_cluster_conf_dir(cluster, default_config_path):
"""
This will provide heron cluster config directory, if config path is default
:return: absolute path of heron cluster conf directory
"""
return os.path.join(default_config_path, cluster)
def get_heron_sandbox_conf_dir():
"""
This will provide heron conf directory in the sandbox
:return: relative path of heron sandbox conf directory
"""
return SANDBOX_CONF_DIR
def get_heron_libs(local_jars):
"""Get all the heron lib jars with the absolute paths"""
heron_lib_dir = get_heron_lib_dir()
heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars]
return heron_libs
def get_heron_cluster(cluster_role_env):
"""Get the cluster to which topology is submitted"""
return cluster_role_env.split('/')[0]
# pylint: disable=too-many-branches
def parse_cluster_role_env(cluster_role_env, config_path):
"""Parse cluster/[role]/[environ], supply default, if not provided, not required"""
parts = cluster_role_env.split('/')[:3]
Log.info("Using config file under %s" % config_path)
if not os.path.isdir(config_path):
Log.error("Config path cluster directory does not exist: %s" % config_path)
raise Exception("Invalid config path")
# if cluster/role/env is not completely provided, check further
if len(parts) < 3:
cli_conf_file = os.path.join(config_path, CLIENT_YAML)
# if client conf doesn't exist, use default value
if not os.path.isfile(cli_conf_file):
if len(parts) == 1:
parts.append(getpass.getuser())
if len(parts) == 2:
parts.append(ENVIRON)
else:
cli_confs = {}
with open(cli_conf_file, 'r') as conf_file:
tmp_confs = yaml.load(conf_file)
# the return value of yaml.load can be None if conf_file is an empty file
if tmp_confs is not None:
cli_confs = tmp_confs
else:
print "Failed to read: %s due to it is empty" % (CLIENT_YAML)
# if role is required but not provided, raise exception
if len(parts) == 1:
if (IS_ROLE_REQUIRED in cli_confs) and (cli_confs[IS_ROLE_REQUIRED] is True):
raise Exception("role required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, IS_ROLE_REQUIRED, CLIENT_YAML))
else:
parts.append(getpass.getuser())
# if environ is required but not provided, raise exception
if len(parts) == 2:
if (IS_ENV_REQUIRED in cli_confs) and (cli_confs[IS_ENV_REQUIRED] is True):
raise Exception("environ required but not provided (cluster/role/env = %s). See %s in %s"
% (cluster_role_env, IS_ENV_REQUIRED, CLIENT_YAML))
else:
parts.append(ENVIRON)
# if cluster or role or environ is empty, print
if len(parts[0]) == 0 or len(parts[1]) == 0 or len(parts[2]) == 0:
print "Failed to parse"
sys.exit(1)
return (parts[0], parts[1], parts[2])
################################################################################
# Parse the command line for overriding the defaults
################################################################################
def parse_override_config(namespace):
"""Parse the command line for overriding the defaults"""
try:
tmp_dir = tempfile.mkdtemp()
override_config_file = os.path.join(tmp_dir, OVERRIDE_YAML)
with open(override_config_file, 'w') as f:
for config in namespace:
f.write("%s\n" % config.replace('=', ': '))
return override_config_file
except Exception as e:
raise Exception("Failed to parse override config: %s" % str(e))
def get_java_path():
"""Get the path of java executable"""
java_home = os.environ.get("JAVA_HOME")
return os.path.join(java_home, BIN_DIR, "java")
def check_java_home_set():
"""Check if the java home set"""
# check if environ variable is set
if "JAVA_HOME" not in os.environ:
Log.error("JAVA_HOME not set")
return False
# check if the value set is correct
java_path = get_java_path()
if os.path.isfile(java_path) and os.access(java_path, os.X_OK):
return True
Log.error("JAVA_HOME/bin/java either does not exist or not an executable")
return False
def check_release_file_exists():
"""Check if the release.yaml file exists"""
release_file = get_heron_release_file()
# if the file does not exist and is not a file
if not os.path.isfile(release_file):
Log.error("Required file not found: %s" % release_file)
return False
return True
def print_version():
release_file = get_heron_release_file()
with open(release_file) as release_info:
for line in release_info:
print line,
def insert_bool(param, command_args):
'''
:param param:
:param command_args:
:return:
'''
index = 0
found = False
for lelem in command_args:
if lelem == '--' and not found:
break
if lelem == param:
found = True
break
index = index + 1
if found:
command_args.insert(index + 1, 'True')
return command_args
def insert_bool_values(command_line_args):
'''
:param command_line_args:
:return:
'''
args1 = insert_bool('--verbose', command_line_args)
args2 = insert_bool('--deploy-deactivated', args1)
return args2
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
# pylint: disable=bad-super-call
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
| |
# coding: utf-8
from __future__ import unicode_literals
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import SomeClass, SomeOtherException, UTF8Class, setup
class FilterSyntaxTests(SimpleTestCase):
@setup({'filter-syntax01': '{{ var|upper }}'})
def test_filter_syntax01(self):
"""
Basic filter usage
"""
output = self.engine.render_to_string('filter-syntax01', {"var": "Django is the greatest!"})
self.assertEqual(output, "DJANGO IS THE GREATEST!")
@setup({'filter-syntax02': '{{ var|upper|lower }}'})
def test_filter_syntax02(self):
"""
Chained filters
"""
output = self.engine.render_to_string('filter-syntax02', {"var": "Django is the greatest!"})
self.assertEqual(output, "django is the greatest!")
@setup({'filter-syntax03': '{{ var |upper }}'})
def test_filter_syntax03(self):
"""
Allow spaces before the filter pipe
"""
output = self.engine.render_to_string('filter-syntax03', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax04': '{{ var| upper }}'})
def test_filter_syntax04(self):
"""
Allow spaces after the filter pipe
"""
output = self.engine.render_to_string('filter-syntax04', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax05': '{{ var|does_not_exist }}'})
def test_filter_syntax05(self):
"""
Raise TemplateSyntaxError for a nonexistent filter
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax05')
@setup({'filter-syntax06': '{{ var|fil(ter) }}'})
def test_filter_syntax06(self):
"""
Raise TemplateSyntaxError when trying to access a filter containing
an illegal character
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax06')
@setup({'filter-syntax07': "{% nothing_to_see_here %}"})
def test_filter_syntax07(self):
"""
Raise TemplateSyntaxError for invalid block tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax07')
@setup({'filter-syntax08': "{% %}"})
def test_filter_syntax08(self):
"""
Raise TemplateSyntaxError for empty block tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax08')
@setup({'filter-syntax09': '{{ var|cut:"o"|upper|lower }}'})
def test_filter_syntax09(self):
"""
Chained filters, with an argument to the first one
"""
output = self.engine.render_to_string('filter-syntax09', {'var': 'Foo'})
self.assertEqual(output, 'f')
@setup({'filter-syntax10': r'{{ var|default_if_none:" endquote\" hah" }}'})
def test_filter_syntax10(self):
"""
Literal string as argument is always "safe" from auto-escaping.
"""
output = self.engine.render_to_string('filter-syntax10', {"var": None})
self.assertEqual(output, ' endquote" hah')
@setup({'filter-syntax11': r'{{ var|default_if_none:var2 }}'})
def test_filter_syntax11(self):
"""
Variable as argument
"""
output = self.engine.render_to_string('filter-syntax11', {"var": None, "var2": "happy"})
self.assertEqual(output, 'happy')
@setup({'filter-syntax12': r'{{ var|yesno:"yup,nup,mup" }} {{ var|yesno }}'})
def test_filter_syntax12(self):
"""
Default argument testing
"""
output = self.engine.render_to_string('filter-syntax12', {"var": True})
self.assertEqual(output, 'yup yes')
@setup({'filter-syntax13': r'1{{ var.method3 }}2'})
def test_filter_syntax13(self):
"""
Fail silently for methods that raise an exception with a
`silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax13', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax14': r'1{{ var.method4 }}2'})
def test_filter_syntax14(self):
"""
In methods that raise an exception without a
`silent_variable_attribute` set to True, the exception propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax14', {"var": SomeClass()})
@setup({'filter-syntax15': r'{{ var|default_if_none:"foo\bar" }}'})
def test_filter_syntax15(self):
"""
Escaped backslash in argument
"""
output = self.engine.render_to_string('filter-syntax15', {"var": None})
self.assertEqual(output, r'foo\bar')
@setup({'filter-syntax16': r'{{ var|default_if_none:"foo\now" }}'})
def test_filter_syntax16(self):
"""
Escaped backslash using known escape char
"""
output = self.engine.render_to_string('filter-syntax16', {"var": None})
self.assertEqual(output, r'foo\now')
@setup({'filter-syntax17': r'{{ var|join:"" }}'})
def test_filter_syntax17(self):
"""
Empty strings can be passed as arguments to filters
"""
output = self.engine.render_to_string('filter-syntax17', {'var': ['a', 'b', 'c']})
self.assertEqual(output, 'abc')
@setup({'filter-syntax18': r'{{ var }}'})
def test_filter_syntax18(self):
"""
Make sure that any unicode strings are converted to bytestrings
in the final output.
"""
output = self.engine.render_to_string('filter-syntax18', {'var': UTF8Class()})
self.assertEqual(output, '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
@setup({'filter-syntax19': '{{ var|truncatewords:1 }}'})
def test_filter_syntax19(self):
"""
Numbers as filter arguments should work
"""
output = self.engine.render_to_string('filter-syntax19', {"var": "hello world"})
self.assertEqual(output, "hello ...")
@setup({'filter-syntax20': '{{ ""|default_if_none:"was none" }}'})
def test_filter_syntax20(self):
"""
Filters should accept empty string constants
"""
output = self.engine.render_to_string('filter-syntax20')
self.assertEqual(output, "")
@setup({'filter-syntax21': r'1{{ var.silent_fail_key }}2'})
def test_filter_syntax21(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a "silent_variable_failure" attribute
"""
output = self.engine.render_to_string('filter-syntax21', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax22': r'1{{ var.silent_fail_attribute }}2'})
def test_filter_syntax22(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a `silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax22', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax23': r'1{{ var.noisy_fail_key }}2'})
def test_filter_syntax23(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax23', {"var": SomeClass()})
@setup({'filter-syntax24': r'1{{ var.noisy_fail_attribute }}2'})
def test_filter_syntax24(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax24', {"var": SomeClass()})
@setup({'filter-syntax25': '{{ var.attribute_error_attribute }}'})
def test_filter_syntax25(self):
"""
#16383 - Attribute errors from an @property value should be
reraised.
"""
with self.assertRaises(AttributeError):
self.engine.render_to_string('filter-syntax25', {'var': SomeClass()})
| |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from xml.etree import ElementTree
from oslo.utils import units
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
VHDX_BAT_ENTRY_SIZE = 8
VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki]
VHDX_HEADER_SECTION_SIZE = units.Mi
VHDX_LOG_LENGTH_OFFSET = 68
VHDX_METADATA_SIZE_OFFSET = 64
VHDX_REGION_TABLE_OFFSET = 192 * units.Ki
VHDX_BS_METADATA_ENTRY_OFFSET = 48
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
_VHD_TYPE_DIFFERENCING = 4
_vhd_format_map = {
constants.DISK_FORMAT_VHD: 2,
constants.DISK_FORMAT_VHDX: 3,
}
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def create_dynamic_vhd(self, path, max_internal_size, format):
vhd_format = self._vhd_format_map.get(format)
if not vhd_format:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
def create_differencing_vhd(self, path, parent_path):
# Although this method can take a size argument in case of VHDX
# images, avoid it as the underlying Win32 is currently not
# resizing the disk properly. This can be reconsidered once the
# Win32 issue is fixed.
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
path, parent_path=parent_path)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()
vhd_info.Type = vhd_type
vhd_info.Format = format
vhd_info.Path = path
vhd_info.ParentPath = parent_path
if max_internal_size:
vhd_info.MaxInternalSize = max_internal_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateVirtualHardDisk(
VirtualDiskSettingData=vhd_info.GetText_(1))
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path)
et = ElementTree.fromstring(vhd_info_xml)
item = et.find(".//PROPERTY[@NAME='ParentPath']/VALUE")
if item:
item.text = parent_vhd_path
vhd_info_xml = ElementTree.tostring(et)
(job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData(
VirtualDiskSettingData=vhd_info_xml)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ResizeVirtualHardDisk
def get_internal_vhd_size_by_file_size(self, vhd_path,
new_vhd_file_size):
"""VHDX Size = Header (1 MB)
+ Log
+ Metadata Region
+ BAT
+ Payload Blocks
Chunk size = maximum number of bytes described by a SB block
= 2 ** 23 * LogicalSectorSize
"""
vhd_format = self.get_vhd_format(vhd_path)
if vhd_format == constants.DISK_FORMAT_VHD:
return super(VHDUtilsV2,
self).get_internal_vhd_size_by_file_size(
vhd_path, new_vhd_file_size)
else:
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
vhd_parent = self.get_vhd_parent_path(vhd_path)
return self.get_internal_vhd_size_by_file_size(vhd_parent,
new_vhd_file_size)
else:
try:
with open(vhd_path, 'rb') as f:
hs = VHDX_HEADER_SECTION_SIZE
bes = VHDX_BAT_ENTRY_SIZE
lss = vhd_info['LogicalSectorSize']
bs = self._get_vhdx_block_size(f)
ls = self._get_vhdx_log_size(f)
ms = self._get_vhdx_metadata_size_and_offset(f)[0]
chunk_ratio = (1 << 23) * lss / bs
size = new_vhd_file_size
max_internal_size = (bs * chunk_ratio * (size - hs -
ls - ms - bes - bes / chunk_ratio) / (bs *
chunk_ratio + bes * chunk_ratio + bes))
return max_internal_size - (max_internal_size % bs)
except IOError as ex:
raise vmutils.HyperVException(_("Unable to obtain "
"internal size from VHDX: "
"%(vhd_path)s. Exception: "
"%(ex)s") %
{"vhd_path": vhd_path,
"ex": ex})
def _get_vhdx_current_header_offset(self, vhdx_file):
sequence_numbers = []
for offset in VHDX_HEADER_OFFSETS:
vhdx_file.seek(offset + 8)
sequence_numbers.append(struct.unpack('<Q',
vhdx_file.read(8))[0])
current_header = sequence_numbers.index(max(sequence_numbers))
return VHDX_HEADER_OFFSETS[current_header]
def _get_vhdx_log_size(self, vhdx_file):
current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
vhdx_file.seek(offset)
log_size = struct.unpack('<I', vhdx_file.read(4))[0]
return log_size
def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
vhdx_file.seek(offset)
metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
return metadata_size, metadata_offset
def _get_vhdx_block_size(self, vhdx_file):
metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
vhdx_file.seek(offset)
file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
vhdx_file.seek(file_parameter_offset + metadata_offset)
block_size = struct.unpack('<I', vhdx_file.read(4))[0]
return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
ret_val,
vhd_info_xml) = image_man_svc.GetVirtualHardDiskSettingData(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
return vhd_info_xml.encode('utf8', 'xmlcharrefreplace')
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, vhd_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_item = item.find("VALUE")
if value_item is None:
value_text = None
else:
value_text = value_item.text
if name in ["Path", "ParentPath"]:
vhd_info_dict[name] = value_text
elif name in ["BlockSize", "LogicalSectorSize",
"PhysicalSectorSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["Type", "Format"]:
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHDX
| |
"""Collection of custom Keras layers."""
# Imports
from keras import backend as K
from keras.layers.core import Dense, Reshape, RepeatVector, Lambda, Dropout
from keras.layers import Input, merge
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
# Apply batch symmetrization (A + A.T)
def batch_symmetrize(input_matrix, batch_size, n_nodes):
"""
Take an n_nodes - 1 x n_nodes matrix and symmetrizes it.
It concatenates a row of zeros with the matrix,
adds the transpose and then removes the padded row.
Parameters
----------
input_matrix: theano tensor
batch_size x n_nodes - 1 x n_nodes
batch_size: int
batch size
n_nodes: int
number of nodes of the matrix
"""
input_matrix = K.concatenate([K.zeros(shape=[batch_size, 1, n_nodes]),
input_matrix], axis=1)
result, updates = \
K.theano.scan(fn=lambda n: input_matrix[n, :, :] +
input_matrix[n, :, :].T,
sequences=K.arange(input_matrix.shape[0]))
return result[:, 1:, :]
# Masked softmax Lambda layer
def masked_softmax(input_layer, n_nodes, batch_size):
"""
A Lambda layer to mask a matrix of outputs to be lower-triangular.
Each row must sum up to one. We apply a lower triangular mask of ones
and then add an upper triangular mask of a large negative number.
Parameters
----------
input_layer: keras layer object
(n x 1, n) matrix
n_nodes: int
number of nodes
batch_size: int
batch size
Returns
-------
output_layer: keras layer object
(n x 1, n) matrix
"""
# input_layer = batch_symmetrize(input_layer, batch_size, n_nodes)
mask_lower = K.theano.tensor.tril(K.ones((n_nodes - 1, n_nodes)))
mask_upper = \
K.theano.tensor.triu(-100. * K.ones((n_nodes - 1, n_nodes)), 1)
mask_layer = mask_lower * input_layer + mask_upper
mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes - 1, 0:n_nodes]
mask_layer = \
K.reshape(mask_layer, (batch_size * (n_nodes - 1), n_nodes))
softmax_layer = K.softmax(mask_layer)
output_layer = K.reshape(softmax_layer, (batch_size, n_nodes - 1, n_nodes))
return output_layer
# Compute full adjacency matrix
def full_matrix(adjacency, n_nodes):
"""
Returning the full adjacency matrix of adjacency.
Parameters
----------
adjacency: keras layer object
(n , n) matrix
Returns
-------
keras layer object
(n , n) matrix
"""
return K.theano.tensor.nlinalg.matrix_inverse(K.eye(n_nodes) - adjacency)
def batch_full_matrix(adjacency, n_nodes, batch_size):
result, updates = \
K.theano.scan(fn=lambda n: full_matrix(adjacency[n, :, :], n_nodes),
sequences=K.arange(batch_size))
return result
# Masked softmax Lambda layer
def masked_softmax_full(input_layer, n_nodes, batch_size):
"""
A Lambda layer to compute a lower-triangular version of the full adjacency.
Each row must sum up to one. We apply a lower triangular mask of ones
and then add an upper triangular mask of a large negative number.
After that we return the full adjacency matrix.
Parameters
----------
input_layer: keras layer object
(n x 1, n) matrix
Returns
-------
output_layer: keras layer object
(n x 1, n) matrix
"""
mask_layer = masked_softmax(input_layer, n_nodes, batch_size)
mask_layer = \
K.concatenate([K.zeros(shape=[batch_size, 1, n_nodes]), mask_layer],
axis=1)
result, updates = \
K.theano.scan(fn=lambda n: full_matrix(mask_layer[n, :, :], n_nodes),
sequences=K.arange(batch_size))
return result[:, 1:, :]
def distance_from_parent(adjacency, locations, n_nodes, batch_size):
"""
Return distance from parent.
Parameters
----------
adjacency: theano/keras tensor
(batch_size x n_nodes - 1 x n_nodes) matrix
locations: theano/keras tensor
(batch_size x n_nodes x 3) matrix
Returns
-------
result: keras layer object
(batch_size x n_nodes - 1 x n_nodes) matrix
"""
result, updates = \
K.theano.scan(fn=lambda n: K.dot(K.eye(n_nodes) - adjacency[n, :, :],
locations[n, :, :]),
sequences=K.arange(batch_size))
# result, updates = \
# K.theano.scan(fn=lambda n: K.dot(adjacency[n, :, :],
# locations[n, :, :]),
# sequences=K.arange(batch_size))
return result
def locations_by_distance_from_parent(full_adjacency, distance_from_parent, batch_size):
"""
Return distance from parent.
Parameters
----------
full_adjacency: theano/keras tensor
(batch_size x n_nodes x n_nodes) matrix
distance_from_parent: theano/keras tensor
(batch_size x n_nodes x 3) matrix
Returns
-------
result: keras layer object
(batch_size x n_nodes - 1 x n_nodes) matrix
"""
result, updates = \
K.theano.scan(fn=lambda n: K.dot(full_adjacency[n, :, :],
distance_from_parent[n, :, :]),
sequences=K.arange(batch_size))
return result
def feature_extractor(inputs,
n_nodes,
batch_size):
"""
Compute various features and concatenate them.
Parameters
----------
morphology_input: keras layer object
(batch_size x n_nodes - 1 x n_nodes)
the adjacency matrix of each sample.
geometry_input: keras layer object
(batch_size x n_nodes - 1 x 3)
the locations of each nodes.
n_nodes: int
number of nodes
batch_size: int
batch size
Returns
-------
features: keras layer object
(batch_size x n_nodes x n_features)
The features currently supports:
- The adjacency
- The full adjacency
- locations
- distance from imediate parents
"""
geometry_input = inputs[:, :, :3]
morphology_input = inputs[:, :, 3:]
adjacency = \
K.concatenate([K.zeros(shape=(batch_size, 1, n_nodes)),
morphology_input], axis=1)
full_adjacency = \
batch_full_matrix(adjacency, n_nodes, batch_size)
geometry_input = K.concatenate([K.zeros(shape=(batch_size, 1, 3)),
geometry_input], axis=1)
# distance = distance_from_parent(adjacency,
# geometry_input,
# n_nodes,
# batch_size)
# distance = locations_by_distance_from_parent(full_adjacency=full_adjacency,
# distance_from_parent=geometry_input,
# batch_size=batch_size)
#
filled_full_adjacency_x = \
full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:,:,0],2),n_nodes, axis=2)
filled_full_adjacency_y = \
full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:,:,1],2),n_nodes, axis=2)
filled_full_adjacency_z = \
full_adjacency*K.repeat_elements(K.expand_dims(geometry_input[:,:,2],2),n_nodes, axis=2)
features = K.concatenate([adjacency,
full_adjacency,
geometry_input,
filled_full_adjacency_x,
filled_full_adjacency_y,
filled_full_adjacency_z], axis=2)
return features
| |
import os
from datetime import datetime, timedelta
from mimetypes import guess_type
try:
import socks
except ImportError:
socks = None
from . import TelegramBareClient
from . import helpers, utils
from .errors import (
RPCError, UnauthorizedError, InvalidParameterError, PhoneCodeEmptyError,
PhoneCodeExpiredError, PhoneCodeHashEmptyError, PhoneCodeInvalidError
)
from .network import ConnectionMode
from .tl import TLObject
from .tl.custom import Draft
from .tl.entity_database import EntityDatabase
from .tl.functions.account import (
GetPasswordRequest
)
from .tl.functions.auth import (
CheckPasswordRequest, LogOutRequest, SendCodeRequest, SignInRequest,
SignUpRequest, ImportBotAuthorizationRequest
)
from .tl.functions.contacts import (
GetContactsRequest, ResolveUsernameRequest
)
from .tl.functions.messages import (
GetDialogsRequest, GetHistoryRequest, ReadHistoryRequest, SendMediaRequest,
SendMessageRequest, GetChatsRequest,
GetAllDraftsRequest)
from .tl.functions import channels
from .tl.functions import messages
from .tl.functions.users import (
GetUsersRequest
)
from .tl.functions.channels import (
GetChannelsRequest
)
from .tl.types import (
DocumentAttributeAudio, DocumentAttributeFilename,
InputDocumentFileLocation, InputFileLocation,
InputMediaUploadedDocument, InputMediaUploadedPhoto, InputPeerEmpty,
Message, MessageMediaContact, MessageMediaDocument, MessageMediaPhoto,
InputUserSelf, UserProfilePhoto, ChatPhoto, UpdateMessageID,
UpdateNewChannelMessage, UpdateNewMessage, UpdateShortSentMessage,
PeerUser, InputPeerUser, InputPeerChat, InputPeerChannel)
from .tl.types.messages import DialogsSlice
class TelegramClient(TelegramBareClient):
"""Full featured TelegramClient meant to extend the basic functionality"""
# region Initialization
def __init__(self, session, api_id, api_hash,
connection_mode=ConnectionMode.TCP_FULL,
proxy=None,
timeout=timedelta(seconds=5),
loop=None,
**kwargs):
"""Initializes the Telegram client with the specified API ID and Hash.
Session can either be a `str` object (filename for the .session)
or it can be a `Session` instance (in which case list_sessions()
would probably not work). Pass 'None' for it to be a temporary
session - remember to '.log_out()'!
The 'connection_mode' should be any value under ConnectionMode.
This will only affect how messages are sent over the network
and how much processing is required before sending them.
If more named arguments are provided as **kwargs, they will be
used to update the Session instance. Most common settings are:
device_model = platform.node()
system_version = platform.system()
app_version = TelegramClient.__version__
lang_code = 'en'
system_lang_code = lang_code
report_errors = True
"""
super().__init__(
session, api_id, api_hash,
connection_mode=connection_mode,
proxy=proxy,
timeout=timeout,
loop=loop,
**kwargs
)
# Some fields to easy signing in
self._phone_code_hash = None
self._phone = None
# endregion
# region Telegram requests functions
# region Authorization requests
async def send_code_request(self, phone):
"""Sends a code request to the specified phone number.
:param str | int phone: The phone to which the code will be sent.
:return auth.SentCode: Information about the result of the request.
"""
phone = EntityDatabase.parse_phone(phone) or self._phone
result = await self(SendCodeRequest(phone, self.api_id, self.api_hash))
self._phone = phone
self._phone_code_hash = result.phone_code_hash
return result
async def sign_in(self, phone=None, code=None,
password=None, bot_token=None, phone_code_hash=None):
"""
Starts or completes the sign in process with the given phone number
or code that Telegram sent.
:param str | int phone:
The phone to send the code to if no code was provided, or to
override the phone that was previously used with these requests.
:param str | int code:
The code that Telegram sent.
:param str password:
2FA password, should be used if a previous call raised
SessionPasswordNeededError.
:param str bot_token:
Used to sign in as a bot. Not all requests will be available.
This should be the hash the @BotFather gave you.
:param str phone_code_hash:
The hash returned by .send_code_request. This can be set to None
to use the last hash known.
:return auth.SentCode | User:
The signed in user, or the information about .send_code_request().
"""
if phone and not code:
return await self.send_code_request(phone)
elif code:
phone = EntityDatabase.parse_phone(phone) or self._phone
phone_code_hash = phone_code_hash or self._phone_code_hash
if not phone:
raise ValueError(
'Please make sure to call send_code_request first.'
)
if not phone_code_hash:
raise ValueError('You also need to provide a phone_code_hash.')
try:
if isinstance(code, int):
code = str(code)
result = await self(SignInRequest(phone, phone_code_hash, code))
except (PhoneCodeEmptyError, PhoneCodeExpiredError,
PhoneCodeHashEmptyError, PhoneCodeInvalidError):
return None
elif password:
salt = await self(GetPasswordRequest()).current_salt
result = await self(CheckPasswordRequest(
helpers.get_password_hash(password, salt)
))
elif bot_token:
result = await self(ImportBotAuthorizationRequest(
flags=0, bot_auth_token=bot_token,
api_id=self.api_id, api_hash=self.api_hash
))
else:
raise ValueError(
'You must provide a phone and a code the first time, '
'and a password only if an RPCError was raised before.'
)
self._set_connected_and_authorized()
return result.user
async def sign_up(self, code, first_name, last_name=''):
"""
Signs up to Telegram if you don't have an account yet.
You must call .send_code_request(phone) first.
:param str | int code: The code sent by Telegram
:param str first_name: The first name to be used by the new account.
:param str last_name: Optional last name.
:return User: The new created user.
"""
result = await self(SignUpRequest(
phone_number=self._phone,
phone_code_hash=self._phone_code_hash,
phone_code=code,
first_name=first_name,
last_name=last_name
))
self._set_connected_and_authorized()
return result.user
async def log_out(self):
"""Logs out Telegram and deletes the current *.session file.
:return bool: True if the operation was successful.
"""
try:
await self(LogOutRequest())
except RPCError:
return False
self.disconnect()
self.session.delete()
self.session = None
return True
async def get_me(self):
"""
Gets "me" (the self user) which is currently authenticated,
or None if the request fails (hence, not authenticated).
:return User: Your own user.
"""
try:
return (await self(GetUsersRequest([InputUserSelf()])))[0]
except UnauthorizedError:
return None
# endregion
# region Dialogs ("chats") requests
async def get_dialogs(self,
limit=10,
offset_date=None,
offset_id=0,
offset_peer=InputPeerEmpty()):
"""
Gets N "dialogs" (open "chats" or conversations with other people).
:param limit:
How many dialogs to be retrieved as maximum. Can be set to None
to retrieve all dialogs. Note that this may take whole minutes
if you have hundreds of dialogs, as Telegram will tell the library
to slow down through a FloodWaitError.
:param offset_date:
The offset date to be used.
:param offset_id:
The message ID to be used as an offset.
:param offset_peer:
The peer to be used as an offset.
:return: A tuple of lists ([dialogs], [entities]).
"""
if limit is None:
limit = float('inf')
dialogs = {} # Use peer id as identifier to avoid dupes
messages = {} # Used later for sorting TODO also return these?
entities = {}
while len(dialogs) < limit:
need = limit - len(dialogs)
r = await self(GetDialogsRequest(
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
limit=need if need < float('inf') else 0
))
if not r.dialogs:
break
for d in r.dialogs:
dialogs[utils.get_peer_id(d.peer, True)] = d
for m in r.messages:
messages[m.id] = m
# We assume users can't have the same ID as a chat
for u in r.users:
entities[u.id] = u
for c in r.chats:
entities[c.id] = c
if not isinstance(r, DialogsSlice):
# Don't enter next iteration if we already got all
break
offset_date = r.messages[-1].date
offset_peer = utils.find_user_or_chat(
r.dialogs[-1].peer, entities, entities
)
offset_id = r.messages[-1].id & 4294967296 # Telegram/danog magic
# Sort by message date. Windows will raise if timestamp is 0,
# so we need to set at least one day ahead while still being
# the smallest date possible.
no_date = datetime.fromtimestamp(86400)
ds = list(sorted(
dialogs.values(),
key=lambda d: getattr(messages[d.top_message], 'date', no_date)
))
if limit < float('inf'):
ds = ds[:limit]
return (
ds,
[utils.find_user_or_chat(d.peer, entities, entities) for d in ds]
)
async def get_drafts(self): # TODO: Ability to provide a `filter`
"""
Gets all open draft messages.
Returns a list of custom `Draft` objects that are easy to work with:
You can call `draft.set_message('text')` to change the message,
or delete it through `draft.delete()`.
:return List[telethon.tl.custom.Draft]: A list of open drafts
"""
response = await self(GetAllDraftsRequest())
self.session.process_entities(response)
self.session.generate_sequence(response.seq)
drafts = [Draft._from_update(self, u) for u in response.updates]
return drafts
async def send_message(self,
entity,
message,
reply_to=None,
link_preview=True):
"""
Sends the given message to the specified entity (user/chat/channel).
:param str | int | User | Chat | Channel entity: To who will it be sent.
:param str message: The message to be sent.
:param int | Message reply_to: Whether to reply to a message or not.
:param link_preview: Should the link preview be shown?
:return Message: the sent message
"""
entity = await self.get_input_entity(entity)
request = SendMessageRequest(
peer=entity,
message=message,
entities=[],
no_webpage=not link_preview,
reply_to_msg_id=self._get_reply_to(reply_to)
)
result = await self(request)
if isinstance(result, UpdateShortSentMessage):
return Message(
id=result.id,
to_id=entity,
message=message,
date=result.date,
out=result.out,
media=result.media,
entities=result.entities
)
# Telegram seems to send updateMessageID first, then updateNewMessage,
# however let's not rely on that just in case.
msg_id = None
for update in result.updates:
if isinstance(update, UpdateMessageID):
if update.random_id == request.random_id:
msg_id = update.id
break
for update in result.updates:
if isinstance(update, (UpdateNewChannelMessage, UpdateNewMessage)):
if update.message.id == msg_id:
return update.message
return None # Should not happen
async def delete_messages(self, entity, message_ids, revoke=True):
"""
Deletes a message from a chat, optionally "for everyone" with argument
`revoke` set to `True`.
The `revoke` argument has no effect for Channels and Megagroups,
where it inherently behaves as being `True`.
Note: The `entity` argument can be `None` for normal chats, but it's
mandatory to delete messages from Channels and Megagroups. It is also
possible to supply a chat_id which will be automatically resolved to
the right type of InputPeer.
:param entity: ID or Entity of the chat
:param list message_ids: ID(s) or `Message` object(s) of the message(s) to delete
:param revoke: Delete the message for everyone or just this client
:returns .messages.AffectedMessages: Messages affected by deletion.
"""
if not isinstance(message_ids, list):
message_ids = [message_ids]
message_ids = [m.id if isinstance(m, Message) else int(m) for m in message_ids]
if entity is None:
return await self(messages.DeleteMessagesRequest(message_ids, revoke=revoke))
entity = await self.get_input_entity(entity)
if isinstance(entity, InputPeerChannel):
return await self(channels.DeleteMessagesRequest(entity, message_ids))
else:
return await self(messages.DeleteMessagesRequest(message_ids, revoke=revoke))
async def get_message_history(self,
entity,
limit=20,
offset_date=None,
offset_id=0,
max_id=0,
min_id=0,
add_offset=0):
"""
Gets the message history for the specified entity
:param entity: The entity from whom to retrieve the message history
:param limit: Number of messages to be retrieved
:param offset_date: Offset date (messages *previous* to this date will be retrieved)
:param offset_id: Offset message ID (only messages *previous* to the given ID will be retrieved)
:param max_id: All the messages with a higher (newer) ID or equal to this will be excluded
:param min_id: All the messages with a lower (older) ID or equal to this will be excluded
:param add_offset: Additional message offset (all of the specified offsets + this offset = older messages)
:return: A tuple containing total message count and two more lists ([messages], [senders]).
Note that the sender can be null if it was not found!
"""
result = await self(GetHistoryRequest(
peer=await self.get_input_entity(entity),
limit=limit,
offset_date=offset_date,
offset_id=offset_id,
max_id=max_id,
min_id=min_id,
add_offset=add_offset
))
# The result may be a messages slice (not all messages were retrieved)
# or simply a messages TLObject. In the later case, no "count"
# attribute is specified, so the total messages count is simply
# the count of retrieved messages
total_messages = getattr(result, 'count', len(result.messages))
# Iterate over all the messages and find the sender User
entities = [
utils.find_user_or_chat(m.from_id, result.users, result.chats)
if m.from_id is not None else
utils.find_user_or_chat(m.to_id, result.users, result.chats)
for m in result.messages
]
return total_messages, result.messages, entities
async def send_read_acknowledge(self, entity, messages=None, max_id=None):
"""
Sends a "read acknowledge" (i.e., notifying the given peer that we've
read their messages, also known as the "double check").
:param entity: The chat where these messages are located.
:param messages: Either a list of messages or a single message.
:param max_id: Overrides messages, until which message should the
acknowledge should be sent.
:return:
"""
if max_id is None:
if not messages:
raise InvalidParameterError(
'Either a message list or a max_id must be provided.')
if isinstance(messages, list):
max_id = max(msg.id for msg in messages)
else:
max_id = messages.id
return await self(ReadHistoryRequest(
peer=await self.get_input_entity(entity),
max_id=max_id
))
@staticmethod
def _get_reply_to(reply_to):
"""Sanitizes the 'reply_to' parameter a user may send"""
if reply_to is None:
return None
if isinstance(reply_to, int):
return reply_to
if isinstance(reply_to, TLObject) and \
type(reply_to).SUBCLASS_OF_ID == 0x790009e3:
# hex(crc32(b'Message')) = 0x790009e3
return reply_to.id
raise ValueError('Invalid reply_to type: ', type(reply_to))
# endregion
# region Uploading files
async def send_file(self, entity, file, caption='',
force_document=False, progress_callback=None,
reply_to=None,
attributes=None,
**kwargs):
"""
Sends a file to the specified entity.
:param entity:
Who will receive the file.
:param file:
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
Subsequent calls with the very same file will result in
immediate uploads, unless .clear_file_cache() is called.
:param caption:
Optional caption for the sent media message.
:param force_document:
If left to False and the file is a path that ends with .png, .jpg
and such, the file will be sent as a photo. Otherwise always as
a document.
:param progress_callback:
A callback function accepting two parameters: (sent bytes, total)
:param reply_to:
Same as reply_to from .send_message().
:param attributes:
Optional attributes that override the inferred ones, like
DocumentAttributeFilename and so on.
:param kwargs:
If "is_voice_note" in kwargs, despite its value, and the file is
sent as a document, it will be sent as a voice note.
:return:
"""
as_photo = False
if isinstance(file, str):
lowercase_file = file.lower()
as_photo = any(
lowercase_file.endswith(ext)
for ext in ('.png', '.jpg', '.gif', '.jpeg')
)
file_hash = hash(file)
if file_hash in self._upload_cache:
file_handle = self._upload_cache[file_hash]
else:
self._upload_cache[file_hash] = file_handle = await self.upload_file(
file, progress_callback=progress_callback
)
if as_photo and not force_document:
media = InputMediaUploadedPhoto(file_handle, caption)
else:
mime_type = None
if isinstance(file, str):
# Determine mime-type and attributes
# Take the first element by using [0] since it returns a tuple
mime_type = guess_type(file)[0]
attr_dict = {
DocumentAttributeFilename:
DocumentAttributeFilename(os.path.basename(file))
# TODO If the input file is an audio, find out:
# Performer and song title and add DocumentAttributeAudio
}
else:
attr_dict = {
DocumentAttributeFilename:
DocumentAttributeFilename('unnamed')
}
if 'is_voice_note' in kwargs:
attr_dict[DocumentAttributeAudio] = \
DocumentAttributeAudio(0, voice=True)
# Now override the attributes if any. As we have a dict of
# {cls: instance}, we can override any class with the list
# of attributes provided by the user easily.
if attributes:
for a in attributes:
attr_dict[type(a)] = a
# Ensure we have a mime type, any; but it cannot be None
# 'The "octet-stream" subtype is used to indicate that a body
# contains arbitrary binary data.'
if not mime_type:
mime_type = 'application/octet-stream'
media = InputMediaUploadedDocument(
file=file_handle,
mime_type=mime_type,
attributes=list(attr_dict.values()),
caption=caption
)
# Once the media type is properly specified and the file uploaded,
# send the media message to the desired entity.
await self(SendMediaRequest(
peer=await self.get_input_entity(entity),
media=media,
reply_to_msg_id=self._get_reply_to(reply_to)
))
async def send_voice_note(self, entity, file, caption='', upload_progress=None,
reply_to=None):
"""Wrapper method around .send_file() with is_voice_note=()"""
return await self.send_file(entity, file, caption,
upload_progress=upload_progress,
reply_to=reply_to,
is_voice_note=()) # empty tuple is enough
def clear_file_cache(self):
"""Calls to .send_file() will cache the remote location of the
uploaded files so that subsequent files can be immediate, so
uploading the same file path will result in using the cached
version. To avoid this a call to this method should be made.
"""
self._upload_cache.clear()
# endregion
# region Downloading media requests
async def download_profile_photo(self, entity, file=None, download_big=True):
"""
Downloads the profile photo of the given entity (user/chat/channel).
:param entity:
From who the photo will be downloaded.
:param file:
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
:param download_big:
Whether to use the big version of the available photos.
:return:
None if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
"""
possible_names = []
if not isinstance(entity, TLObject) or type(entity).SUBCLASS_OF_ID in (
0x2da17977, 0xc5af5d94, 0x1f4661b9, 0xd49a2697
):
# Maybe it is an user or a chat? Or their full versions?
#
# The hexadecimal numbers above are simply:
# hex(crc32(x.encode('ascii'))) for x in
# ('User', 'Chat', 'UserFull', 'ChatFull')
entity = await self.get_entity(entity)
if not hasattr(entity, 'photo'):
# Special case: may be a ChatFull with photo:Photo
# This is different from a normal UserProfilePhoto and Chat
if hasattr(entity, 'chat_photo'):
return await self._download_photo(
entity.chat_photo, file,
date=None, progress_callback=None
)
else:
# Give up
return None
for attr in ('username', 'first_name', 'title'):
possible_names.append(getattr(entity, attr, None))
entity = entity.photo
if not isinstance(entity, UserProfilePhoto) and \
not isinstance(entity, ChatPhoto):
return None
if download_big:
photo_location = entity.photo_big
else:
photo_location = entity.photo_small
file = self._get_proper_filename(
file, 'profile_photo', '.jpg',
possible_names=possible_names
)
# Download the media with the largest size input file location
await self.download_file(
InputFileLocation(
volume_id=photo_location.volume_id,
local_id=photo_location.local_id,
secret=photo_location.secret
),
file
)
return file
async def download_media(self, message, file=None, progress_callback=None):
"""
Downloads the given media, or the media from a specified Message.
:param message:
The media or message containing the media that will be downloaded.
:param file:
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
:param progress_callback:
A callback function accepting two parameters: (recv bytes, total)
:return:
"""
# TODO This won't work for messageService
if isinstance(message, Message):
date = message.date
media = message.media
else:
date = datetime.now()
media = message
if isinstance(media, MessageMediaPhoto):
return await self._download_photo(
media, file, date, progress_callback
)
elif isinstance(media, MessageMediaDocument):
return await self._download_document(
media, file, date, progress_callback
)
elif isinstance(media, MessageMediaContact):
return await self._download_contact(
media, file
)
async def _download_photo(self, mm_photo, file, date, progress_callback):
"""Specialized version of .download_media() for photos"""
# Determine the photo and its largest size
photo = mm_photo.photo
largest_size = photo.sizes[-1]
file_size = largest_size.size
largest_size = largest_size.location
file = self._get_proper_filename(file, 'photo', '.jpg', date=date)
# Download the media with the largest size input file location
await self.download_file(
InputFileLocation(
volume_id=largest_size.volume_id,
local_id=largest_size.local_id,
secret=largest_size.secret
),
file,
file_size=file_size,
progress_callback=progress_callback
)
return file
async def _download_document(self, mm_doc, file, date, progress_callback):
"""Specialized version of .download_media() for documents"""
document = mm_doc.document
file_size = document.size
possible_names = []
for attr in document.attributes:
if isinstance(attr, DocumentAttributeFilename):
possible_names.insert(0, attr.file_name)
elif isinstance(attr, DocumentAttributeAudio):
possible_names.append('{} - {}'.format(
attr.performer, attr.title
))
file = self._get_proper_filename(
file, 'document', utils.get_extension(mm_doc),
date=date, possible_names=possible_names
)
await self.download_file(
InputDocumentFileLocation(
id=document.id,
access_hash=document.access_hash,
version=document.version
),
file,
file_size=file_size,
progress_callback=progress_callback
)
return file
@staticmethod
def _download_contact(mm_contact, file):
"""Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format
"""
first_name = mm_contact.first_name
last_name = mm_contact.last_name
phone_number = mm_contact.phone_number
if isinstance(file, str):
file = TelegramClient._get_proper_filename(
file, 'contact', '.vcard',
possible_names=[first_name, phone_number, last_name]
)
f = open(file, 'w', encoding='utf-8')
else:
f = file
try:
# Remove these pesky characters
first_name = first_name.replace(';', '')
last_name = (last_name or '').replace(';', '')
f.write('BEGIN:VCARD\n')
f.write('VERSION:4.0\n')
f.write('N:{};{};;;\n'.format(first_name, last_name))
f.write('FN:{} {}\n'.format(first_name, last_name))
f.write('TEL;TYPE=cell;VALUE=uri:tel:+{}\n'.format(phone_number))
f.write('END:VCARD\n')
finally:
# Only close the stream if we opened it
if isinstance(file, str):
f.close()
return file
@staticmethod
def _get_proper_filename(file, kind, extension,
date=None, possible_names=None):
"""Gets a proper filename for 'file', if this is a path.
'kind' should be the kind of the output file (photo, document...)
'extension' should be the extension to be added to the file if
the filename doesn't have any yet
'date' should be when this file was originally sent, if known
'possible_names' should be an ordered list of possible names
If no modification is made to the path, any existing file
will be overwritten.
If any modification is made to the path, this method will
ensure that no existing file will be overwritten.
"""
if file is not None and not isinstance(file, str):
# Probably a stream-like object, we cannot set a filename here
return file
if file is None:
file = ''
elif os.path.isfile(file):
# Make no modifications to valid existing paths
return file
if os.path.isdir(file) or not file:
try:
name = None if possible_names is None else next(
x for x in possible_names if x
)
except StopIteration:
name = None
if not name:
name = '{}_{}-{:02}-{:02}_{:02}-{:02}-{:02}'.format(
kind,
date.year, date.month, date.day,
date.hour, date.minute, date.second,
)
file = os.path.join(file, name)
directory, name = os.path.split(file)
name, ext = os.path.splitext(name)
if not ext:
ext = extension
result = os.path.join(directory, name + ext)
if not os.path.isfile(result):
return result
i = 1
while True:
result = os.path.join(directory, '{} ({}){}'.format(name, i, ext))
if not os.path.isfile(result):
return result
i += 1
# endregion
# endregion
# region Small utilities to make users' life easier
async def get_entity(self, entity):
"""
Turns the given entity into a valid Telegram user or chat.
:param entity:
The entity to be transformed.
If it's a string which can be converted to an integer or starts
with '+' it will be resolved as if it were a phone number.
If it doesn't start with '+' or starts with a '@' it will be
be resolved from the username. If no exact match is returned,
an error will be raised.
If the entity is an integer or a Peer, its information will be
returned through a call to self.get_input_peer(entity).
If the entity is neither, and it's not a TLObject, an
error will be raised.
:return:
"""
try:
return self.session.entities[entity]
except KeyError:
pass
if isinstance(entity, int) or (
isinstance(entity, TLObject) and
# crc32(b'InputPeer') and crc32(b'Peer')
type(entity).SUBCLASS_OF_ID in (0xc91c90b6, 0x2d45687)):
ie = await self.get_input_entity(entity)
if isinstance(ie, InputPeerUser):
await self(GetUsersRequest([ie]))
elif isinstance(ie, InputPeerChat):
await self(GetChatsRequest([ie.chat_id]))
elif isinstance(ie, InputPeerChannel):
await self(GetChannelsRequest([ie]))
try:
# session.process_entities has been called in the MtProtoSender
# with the result of these calls, so they should now be on the
# entities database.
return self.session.entities[ie]
except KeyError:
pass
if isinstance(entity, str):
return await self._get_entity_from_string(entity)
raise ValueError(
'Cannot turn "{}" into any entity (user or chat)'.format(entity)
)
async def _get_entity_from_string(self, string):
"""Gets an entity from the given string, which may be a phone or
an username, and processes all the found entities on the session.
"""
phone = EntityDatabase.parse_phone(string)
if phone:
entity = phone
await self(GetContactsRequest(0))
else:
entity = string.strip('@').lower()
await self(ResolveUsernameRequest(entity))
# MtProtoSender will call .process_entities on the requests made
try:
return self.session.entities[entity]
except KeyError:
raise ValueError(
'Could not find user with username {}'.format(entity)
)
async def get_input_entity(self, peer):
"""
Turns the given peer into its input entity version. Most requests
use this kind of InputUser, InputChat and so on, so this is the
most suitable call to make for those cases.
:param peer:
The integer ID of an user or otherwise either of a
PeerUser, PeerChat or PeerChannel, for which to get its
Input* version.
If this Peer hasn't been seen before by the library, the top
dialogs will be loaded and their entities saved to the session
file (unless this feature was disabled explicitly).
If in the end the access hash required for the peer was not found,
a ValueError will be raised.
:return:
"""
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.entities.get_input_entity(peer)
except KeyError:
pass
if isinstance(peer, str):
return utils.get_input_peer(await self._get_entity_from_string(peer))
is_peer = False
if isinstance(peer, int):
peer = PeerUser(peer)
is_peer = True
elif isinstance(peer, TLObject):
is_peer = type(peer).SUBCLASS_OF_ID == 0x2d45687 # crc32(b'Peer')
if not is_peer:
try:
return utils.get_input_peer(peer)
except ValueError:
pass
if not is_peer:
raise ValueError(
'Cannot turn "{}" into an input entity.'.format(peer)
)
if self.session.save_entities:
# Not found, look in the latest dialogs.
# This is useful if for instance someone just sent a message but
# the updates didn't specify who, as this person or chat should
# be in the latest dialogs.
await self(GetDialogsRequest(
offset_date=None,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=0,
exclude_pinned=True
))
try:
return self.session.entities.get_input_entity(peer)
except KeyError:
pass
raise ValueError(
'Could not find the input entity corresponding to "{}".'
'Make sure you have encountered this peer before.'.format(peer)
)
# endregion
| |
from __future__ import unicode_literals
import re
import datetime
from django.utils import six
from django.db import models
from django.conf import settings
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.optimizer import MigrationOptimizer
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct'):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
dict(
(key, self.deep_deconstruct(value))
for key, value in kwargs.items()
),
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.render(ignore_swappable=True)
self.new_apps = self.to_state.render()
self.old_model_keys = []
self.old_proxy_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if model._meta.managed and al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if model._meta.managed and (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(field_name)[0]
if hasattr(old_field, "rel") and getattr(old_field.rel, "through", None) and not old_field.rel.through._meta.auto_created:
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.object_name.lower(),
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_created_models()
self.generate_deleted_models()
self.generate_created_proxies()
self.generate_deleted_proxies()
self.generate_altered_options()
# Generate field operations
self.generate_added_fields()
self.generate_removed_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
for i in range(10000):
found = False
for i, op in enumerate(ops):
for dep in op._auto_deps:
if dep[0] == app_label:
# Alright, there's a dependency on the same app.
for j, op2 in enumerate(ops):
if self.check_dependency(op2, dep) and j > i:
ops = ops[:i] + ops[i + 1:j + 1] + [op] + ops[j + 1:]
found = True
break
if found:
break
if found:
break
if not found:
break
else:
raise ValueError("Infinite loop caught in operation dependency resolution")
self.generated_operations[app_label] = ops
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
if dep[0] == "__setting__":
operation_dependencies.add((dep[0], dep[1]))
elif dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if self.migrations.get(dep[0], None):
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies")
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Checks if an operation dependency matches an operation.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name.lower() == dependency[1].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name.lower() == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models and make creation operations for them,
and separate operations to create any foreign key or M2M relationships
(we'll optimise these back in later if we can)
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together)
"""
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models, key=self.swappable_first_key):
model_state = self.to_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in self.new_apps.get_model(app_label, model_name)._meta.local_fields:
if field.rel:
if field.rel.to:
if field.primary_key:
primary_key_rel = field.rel.to
else:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in self.new_apps.get_model(app_label, model_name)._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
),
dependencies=dependencies,
)
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added_proxies = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added_proxies):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models and make creation operations for them,
and separate operations to delete any foreign key or M2M relationships
(we'll optimise these back in later if we can)
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models.
"""
deleted_models = set(self.old_model_keys) - set(self.new_model_keys)
for app_label, model_name in sorted(deleted_models):
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.get_all_related_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for related_object in model._meta.get_all_related_many_to_many_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name.lower()), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted_proxies = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted_proxies):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_added_fields(self):
# New fields
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
found_rename = False
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default() and not isinstance(field, models.ManyToManyField):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
)
)
else:
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
)
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an order_with_respect_to;
# this is safely ignored if there isn't one
dependencies=[(app_label, model_name, field_name, "order_wrt_unset")],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
new_model_state = self.to_state.models[app_label, model_name]
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(old_field_name)[0]
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.object_name.lower(),
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
if old_model_state.options.get(option_name) is None:
old_value = None
else:
old_value = set([
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_model_state.options[option_name]
])
if old_value != new_model_state.options.get(option_name):
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_model_state.options.get(option_name)}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(set(self.new_proxy_keys).intersection(self.old_proxy_keys))
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in self.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in self.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get("order_with_respect_to", None) != new_model_state.options.get("order_with_respect_to", None):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| |
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import eventlet
import netaddr
from neutron_lib.agent import constants as agent_consts
from neutron_lib.agent import topics
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as lib_const
from neutron_lib import context as n_context
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib import rpc as n_rpc
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import timeutils
from osprofiler import profiler
from neutron.agent.common import resource_processing_queue as queue
from neutron.agent.common import utils as common_utils
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_edge_ha_router
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_local_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api
from neutron.agent.l3 import l3_agent_extensions_manager as l3_ext_manager
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces as l3_namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import utils
from neutron import manager
LOG = logging.getLogger(__name__)
# Number of routers to fetch from server at a time on resync.
# Needed to reduce load on server side and to speed up resync on agent side.
SYNC_ROUTERS_MAX_CHUNK_SIZE = 256
SYNC_ROUTERS_MIN_CHUNK_SIZE = 32
# Priorities - lower value is higher priority
PRIORITY_RELATED_ROUTER = 0
PRIORITY_RPC = 1
PRIORITY_SYNC_ROUTERS_TASK = 2
PRIORITY_PD_UPDATE = 3
# Actions
DELETE_ROUTER = 1
DELETE_RELATED_ROUTER = 2
ADD_UPDATE_ROUTER = 3
ADD_UPDATE_RELATED_ROUTER = 4
PD_UPDATE = 5
UPDATE_NETWORK = 6
RELATED_ACTION_MAP = {DELETE_ROUTER: DELETE_RELATED_ROUTER,
ADD_UPDATE_ROUTER: ADD_UPDATE_RELATED_ROUTER}
ROUTER_PROCESS_GREENLET_MAX = 32
ROUTER_PROCESS_GREENLET_MIN = 8
def log_verbose_exc(message, router_payload):
LOG.exception(message)
LOG.debug("Payload:\n%s",
utils.DelayedStringRenderer(jsonutils.dumps,
router_payload, indent=5))
class L3PluginApi(object):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
1.8 - Added address scope information
1.9 - Added get_router_ids
1.10 Added update_all_ha_network_port_statuses
1.11 Added get_host_ha_router_count
1.12 Added get_networks
1.13 Removed get_external_network_id
"""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
@utils.timecost
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
cctxt = self.client.prepare()
return cctxt.call(context, 'sync_routers', host=self.host,
router_ids=router_ids)
@utils.timecost
def update_all_ha_network_port_statuses(self, context):
"""Make a remote process call to update HA network port status."""
cctxt = self.client.prepare(version='1.10')
return cctxt.call(context, 'update_all_ha_network_port_statuses',
host=self.host)
@utils.timecost
def get_router_ids(self, context):
"""Make a remote process call to retrieve scheduled routers ids."""
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'get_router_ids', host=self.host)
@utils.timecost
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses',
router_id=router_id, fip_statuses=fip_statuses)
@utils.timecost
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_ports_by_subnet', host=self.host,
subnet_id=subnet_id)
@utils.timecost
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_agent_gateway_port',
network_id=fip_net, host=self.host)
@utils.timecost
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_service_plugin_list')
@utils.timecost
def update_ha_routers_states(self, context, states):
"""Update HA routers states."""
cctxt = self.client.prepare(version='1.5')
return cctxt.cast(context, 'update_ha_routers_states',
host=self.host, states=states)
@utils.timecost
def process_prefix_update(self, context, prefix_update):
"""Process prefix update whenever prefixes get changed."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
@utils.timecost
def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)
@utils.timecost
def get_host_ha_router_count(self, context):
"""Make a call to get the count of HA router."""
cctxt = self.client.prepare(version='1.11')
return cctxt.call(context, 'get_host_ha_router_count', host=self.host)
def get_networks(self, context, filters=None, fields=None):
"""Get networks.
:param context: Security context
:param filters: The filters to apply.
E.g {"id" : ["<uuid of a network>", ...]}
:param fields: A list of fields to collect, e.g ["id", "subnets"].
:return: A list of dicts where each dict represent a network object.
"""
cctxt = self.client.prepare(version='1.12')
return cctxt.call(
context, 'get_networks', filters=filters, fields=fields)
class RouterFactory(object):
def __init__(self):
self._routers = {}
def register(self, features, router_cls):
"""Register router class which implements BaseRouterInfo
Features which is a list of strings converted to frozenset internally
for key uniqueness.
:param features: a list of strings of router's features
:param router_cls: a router class which implements BaseRouterInfo
"""
self._routers[frozenset(features)] = router_cls
def create(self, features, **kwargs):
"""Create router instance with registered router class
:param features: a list of strings of router's features
:param kwargs: arguments for router class
:returns: a router instance which implements BaseRouterInfo
:raises: n_exc.RouterNotFoundInRouterFactory
"""
try:
router = self._routers[frozenset(features)]
return router(**kwargs)
except KeyError:
exc = l3_exc.RouterNotFoundInRouterFactory(
router_id=kwargs['router_id'], features=features)
LOG.exception(exc.msg)
raise exc
@profiler.trace_cls("l3-agent")
class L3NATAgent(ha.AgentMixin,
dvr.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace
after the external network is removed
Needed by the L3 service when dealing with DVR
1.4 - support network_update to get MTU updates
"""
target = oslo_messaging.Target(version='1.4')
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.check_config()
self.router_info = {}
self.router_factory = RouterFactory()
self._register_router_cls(self.router_factory)
self._check_config_params()
self.process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='router')
self._context = n_context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.driver = common_utils.load_interface_driver(
self.conf,
get_networks_callback=functools.partial(
self.plugin_rpc.get_networks, self.context))
self.fullsync = True
self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE
self._exiting = False
# Get the HA router count from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
while True:
try:
self.ha_router_count = int(
self.plugin_rpc.get_host_ha_router_count(self.context))
except oslo_messaging.MessagingTimeout as e:
LOG.warning('l3-agent cannot contact neutron server '
'to retrieve HA router count. '
'Check connectivity to neutron server. '
'Retrying... '
'Detailed message: %(msg)s.', {'msg': e})
continue
break
LOG.info("Agent HA routers count %s", self.ha_router_count)
self.init_extension_manager(self.plugin_rpc)
self.metadata_driver = None
if self.conf.enable_metadata_proxy:
self.metadata_driver = metadata_driver.MetadataDriver(self)
self.namespaces_manager = namespace_manager.NamespaceManager(
self.conf,
self.driver,
self.metadata_driver)
# L3 agent router processing green pool
self._pool_size = ROUTER_PROCESS_GREENLET_MIN
self._pool = eventlet.GreenPool(size=self._pool_size)
self._queue = queue.ResourceProcessingQueue()
super(L3NATAgent, self).__init__(host=self.conf.host)
self.target_ex_net_id = None
self.use_ipv6 = netutils.is_ipv6_enabled()
self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
self.driver,
self.plugin_rpc.process_prefix_update,
self.create_pd_router_update,
self.conf)
# Consume network updates to trigger router resync
consumers = [[topics.NETWORK, topics.UPDATE]]
agent_rpc.create_consumers([self], topics.AGENT, consumers)
self._check_ha_router_process_status()
def check_config(self):
if self.conf.cleanup_on_shutdown:
LOG.warning("cleanup_on_shutdown is set to True, so L3 agent will "
"cleanup all its routers when exiting, "
"data-plane will be affected.")
def _check_ha_router_process_status(self):
"""Check HA router VRRP process status in network node.
Check if the HA router HA routers VRRP (keepalived) process count
and state change python monitor process count meet the expected
quantity. If so, l3-agent will not call neutron to set all related
HA port to down state, this can prevent some unexpected VRRP
re-election. If not, a physical host may have down and just
restarted, set HA network port status to DOWN.
"""
if (self.conf.agent_mode not in [lib_const.L3_AGENT_MODE_DVR_SNAT,
lib_const.L3_AGENT_MODE_LEGACY]):
return
if self.ha_router_count <= 0:
return
# Only set HA ports down when host was rebooted so no net
# namespaces were still created.
if any(ns.startswith(l3_namespaces.NS_PREFIX) for ns in
self.namespaces_manager.list_all()):
LOG.debug("Network configuration already done. Skipping"
" set HA port to DOWN state.")
return
LOG.debug("Call neutron server to set HA port to DOWN state.")
try:
# We set HA network port status to DOWN to let l2 agent
# update it to ACTIVE after wiring. This allows us to spawn
# keepalived only when l2 agent finished wiring the port.
self.plugin_rpc.update_all_ha_network_port_statuses(
self.context)
except Exception:
LOG.exception('update_all_ha_network_port_statuses failed')
def _register_router_cls(self, factory):
factory.register([], legacy_router.LegacyRouter)
factory.register(['ha'], ha_router.HaRouter)
if self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT:
factory.register(['distributed'],
dvr_router.DvrEdgeRouter)
factory.register(['ha', 'distributed'],
dvr_edge_ha_router.DvrEdgeHaRouter)
else:
factory.register(['distributed'],
dvr_local_router.DvrLocalRouter)
factory.register(['ha', 'distributed'],
dvr_local_router.DvrLocalRouter)
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = 'An interface driver must be specified'
LOG.error(msg)
raise SystemExit(1)
if self.conf.ipv6_gateway:
# ipv6_gateway configured. Check for valid v6 link-local address.
try:
msg = ("%s used in config as ipv6_gateway is not a valid "
"IPv6 link-local address.")
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
if ip_addr.version != 6 or not ip_addr.is_link_local():
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
except netaddr.AddrFormatError:
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
def _create_router(self, router_id, router):
kwargs = {
'agent': self,
'router_id': router_id,
'router': router,
'use_ipv6': self.use_ipv6,
'agent_conf': self.conf,
'interface_driver': self.driver,
}
features = []
if router.get('distributed'):
features.append('distributed')
kwargs['host'] = self.host
if router.get('ha'):
features.append('ha')
if router.get('distributed') and router.get('ha'):
# Case 1: If the router contains information about the HA interface
# and if the requesting agent is a DVR_SNAT agent then go ahead
# and create a HA router.
# Case 2: If the router does not contain information about the HA
# interface this means that this DVR+HA router needs to host only
# the edge side of it, typically because it's landing on a node
# that needs to provision a router namespace because of a DVR
# service port (e.g. DHCP). So go ahead and create a regular DVR
# edge router.
if (not router.get(lib_const.HA_INTERFACE_KEY) or
self.conf.agent_mode != lib_const.L3_AGENT_MODE_DVR_SNAT):
features.remove('ha')
return self.router_factory.create(features, **kwargs)
@lockutils.synchronized('resize_greenpool')
def _resize_process_pool(self):
pool_size = max([ROUTER_PROCESS_GREENLET_MIN,
min([ROUTER_PROCESS_GREENLET_MAX,
len(self.router_info)])])
if pool_size == self._pool_size:
return
LOG.info("Resizing router processing queue green pool size to: %d",
pool_size)
self._pool.resize(pool_size)
self._pool_size = pool_size
def _router_added(self, router_id, router):
ri = self._create_router(router_id, router)
registry.publish(resources.ROUTER, events.BEFORE_CREATE, self,
payload=events.DBEventPayload(
self.context,
resource_id=router_id,
states=(ri,)))
self.router_info[router_id] = ri
# If initialize() fails, cleanup and retrigger complete sync
try:
ri.initialize(self.process_monitor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Error while initializing router %s',
router_id)
self._cleanup_failed_router(router_id, delete_router_info=True)
self._resize_process_pool()
def _cleanup_failed_router(self, router_id, delete_router_info):
ri = self.router_info.pop(router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
try:
if delete_router_info:
ri.delete()
except Exception:
LOG.exception('Error while deleting router %s',
router_id)
def _safe_router_removed(self, router_id):
"""Try to delete a router and return True if successful."""
# The l3_ext_manager API expects a router dict, look it up
ri = self.router_info.get(router_id)
try:
if ri:
self.l3_ext_manager.delete_router(self.context, ri.router)
self._router_removed(ri, router_id)
except Exception:
LOG.exception('Error while deleting router %s', router_id)
return False
self._resize_process_pool()
return True
def _router_removed(self, ri, router_id):
"""Delete the router and stop the auxiliary processes
This stops the auxiliary processes (keepalived, keepvalived-state-
change, radvd, etc) and deletes the router ports and the namespace.
The "router_info" cache is updated too at the beginning of the process,
to avoid any other concurrent process to handle the router being
deleted. If an exception is raised, the "router_info" cache is
restored.
"""
if ri is None:
LOG.warning("Info for router %s was not found. "
"Performing router cleanup", router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
return
registry.publish(resources.ROUTER, events.BEFORE_DELETE, self,
payload=events.DBEventPayload(
self.context, states=(ri,),
resource_id=router_id))
del self.router_info[router_id]
try:
ri.delete()
except Exception:
with excutils.save_and_reraise_exception():
self.router_info[router_id] = ri
LOG.debug("Router info %s delete action done, "
"and it was removed from cache.", router_id)
registry.publish(resources.ROUTER, events.AFTER_DELETE, self,
payload=events.DBEventPayload(
self.context,
resource_id=router_id,
states=(ri,)))
def init_extension_manager(self, connection):
l3_ext_manager.register_opts(self.conf)
self.agent_api = l3_ext_api.L3AgentExtensionAPI(self.router_info,
self.router_factory)
self.l3_ext_manager = (
l3_ext_manager.L3AgentExtensionsManager(self.conf))
self.l3_ext_manager.initialize(
connection, lib_const.L3_AGENT_MODE,
self.agent_api)
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', router_id)
update = queue.ResourceUpdate(router_id,
PRIORITY_RPC,
action=DELETE_ROUTER)
self._queue.add(update)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = queue.ResourceUpdate(
id, PRIORITY_RPC, action=ADD_UPDATE_ROUTER)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
router_id = payload['router_id']
update = queue.ResourceUpdate(router_id,
PRIORITY_RPC,
action=DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
LOG.debug("Got network %s update", network_id)
for ri in self.router_info.values():
update = queue.ResourceUpdate(ri.router_id,
PRIORITY_RPC,
action=UPDATE_NETWORK,
resource=network_id)
self._queue.add(update)
def _process_network_update(self, router_id, network_id):
ri = self.router_info.get(router_id)
if not ri:
return
LOG.debug("Checking if router %s is plugged to the network %s",
ri, network_id)
ports = list(ri.internal_ports)
if ri.ex_gw_port:
ports.append(ri.ex_gw_port)
port_belongs = lambda p: p['network_id'] == network_id
if any(port_belongs(p) for p in ports):
update = queue.ResourceUpdate(
ri.router_id, PRIORITY_SYNC_ROUTERS_TASK)
self._resync_router(update)
def _process_router_if_compatible(self, router):
# Either ex_net_id or handle_internal_only_routers must be set
ex_net_id = (router['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
raise l3_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
if router['id'] not in self.router_info:
LOG.debug("Router %s info not in cache, "
"will do the router add action.", router['id'])
self._process_added_router(router)
else:
LOG.debug("Router %s info in cache, "
"will do the router update action.", router['id'])
self._process_updated_router(router)
def _process_added_router(self, router):
self._router_added(router['id'], router)
ri = self.router_info[router['id']]
ri.router = router
try:
ri.process()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Error while processing router %s',
router['id'])
# NOTE(slaweq): deleting of the router info in the
# _cleanup_failed_router is avoided as in case of error,
# processing of the router will be retried on next call and
# that may lead to some race conditions e.g. with
# configuration of the DVR router's FIP gateway
self._cleanup_failed_router(router['id'],
delete_router_info=False)
registry.publish(resources.ROUTER, events.AFTER_CREATE, self,
payload=events.DBEventPayload(
self.context,
resource_id=router['id'],
states=(ri,)))
self.l3_ext_manager.add_router(self.context, router)
def _process_updated_router(self, router):
ri = self.router_info[router['id']]
router_ha = router.get('ha')
router_distributed = router.get('distributed')
if ((router_ha is not None and ri.router.get('ha') != router_ha) or
(router_distributed is not None and
ri.router.get('distributed') != router_distributed)):
LOG.warning('Type of the router %(id)s changed. '
'Old type: ha=%(old_ha)s; distributed=%(old_dvr)s; '
'New type: ha=%(new_ha)s; distributed=%(new_dvr)s',
{'id': router['id'],
'old_ha': ri.router.get('ha'),
'old_dvr': ri.router.get('distributed'),
'new_ha': router.get('ha'),
'new_dvr': router.get('distributed')})
ri = self._create_router(router['id'], router)
self.router_info[router['id']] = ri
is_dvr_snat_agent = (self.conf.agent_mode ==
lib_const.L3_AGENT_MODE_DVR_SNAT)
is_dvr_only_agent = (self.conf.agent_mode in
[lib_const.L3_AGENT_MODE_DVR,
lib_const.L3_AGENT_MODE_DVR_NO_EXTERNAL])
old_router_ha_interface = ri.router.get(lib_const.HA_INTERFACE_KEY)
current_router_ha_interface = router.get(lib_const.HA_INTERFACE_KEY)
ha_interface_change = ((old_router_ha_interface is None and
current_router_ha_interface is not None) or
(old_router_ha_interface is not None and
current_router_ha_interface is None))
is_dvr_ha_router = router.get('distributed') and router.get('ha')
if is_dvr_snat_agent and is_dvr_ha_router and ha_interface_change:
LOG.debug("Removing HA router %s, since it is not bound to "
"the current agent, and recreating regular DVR router "
"based on service port requirements.",
router['id'])
if self._safe_router_removed(router['id']):
self._process_added_router(router)
else:
is_ha_router = getattr(ri, 'ha_state', False)
# For HA routers check that DB state matches actual state
if router.get('ha') and not is_dvr_only_agent and is_ha_router:
self.check_ha_state_for_router(
router['id'], router.get(lib_const.HA_ROUTER_STATE_KEY))
ri.router = router
registry.publish(resources.ROUTER, events.BEFORE_UPDATE, self,
payload=events.DBEventPayload(
self.context,
resource_id=router['id'],
states=(ri,)))
ri.process()
registry.publish(resources.ROUTER, events.AFTER_UPDATE, self,
payload=events.DBEventPayload(
self.context,
resource_id=router['id'],
states=(None, ri)))
self.l3_ext_manager.update_router(self.context, router)
def _resync_router(self, router_update,
priority=PRIORITY_SYNC_ROUTERS_TASK):
# Don't keep trying to resync if it's failing
if router_update.hit_retry_limit():
LOG.warning("Hit retry limit with router update for %s, action %s",
router_update.id, router_update.action)
return
router_update.timestamp = timeutils.utcnow()
router_update.priority = priority
router_update.resource = None # Force the agent to resync the router
self._queue.add(router_update)
def _process_update(self):
if self._exiting:
return
for rp, update in self._queue.each_update_to_next_resource():
LOG.info("Starting processing update %s, action %s, priority %s, "
"update_id %s. Wait time elapsed: %.3f",
update.id, update.action, update.priority,
update.update_id,
update.time_elapsed_since_create)
if update.action == UPDATE_NETWORK:
self._process_network_update(
router_id=update.id,
network_id=update.resource)
else:
self._process_router_update(rp, update)
def _process_router_update(self, rp, update):
LOG.info("Starting router update for %s, action %s, priority %s, "
"update_id %s. Wait time elapsed: %.3f",
update.id, update.action, update.priority,
update.update_id,
update.time_elapsed_since_create)
if update.action == PD_UPDATE:
self.pd.process_prefix_update()
LOG.info("Finished a router update for %s IPv6 PD, "
"update_id. %s. Time elapsed: %.3f",
update.id, update.update_id,
update.time_elapsed_since_start)
return
routers = [update.resource] if update.resource else []
not_delete_no_routers = (update.action != DELETE_ROUTER and
not routers)
related_action = update.action in (DELETE_RELATED_ROUTER,
ADD_UPDATE_RELATED_ROUTER)
if not_delete_no_routers or related_action:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = "Failed to fetch router information for '%s'"
LOG.exception(msg, update.id)
self._resync_router(update)
return
# For a related action, verify the router is still hosted here,
# since it could have just been deleted and we don't want to
# add it back.
if related_action:
routers = [r for r in routers if r['id'] == update.id]
if not routers:
removed = self._safe_router_removed(update.id)
if not removed:
self._resync_router(update)
else:
# need to update timestamp of removed router in case
# there are older events for the same router in the
# processing queue (like events from fullsync) in order to
# prevent deleted router re-creation
rp.fetched_and_processed(update.timestamp)
LOG.info("Finished a router delete for %s, update_id %s. "
"Time elapsed: %.3f",
update.id, update.update_id,
update.time_elapsed_since_start)
return
if not self._process_routers_if_compatible(routers, update):
self._resync_router(update)
return
rp.fetched_and_processed(update.timestamp)
LOG.info("Finished a router update for %s, update_id %s. "
"Time elapsed: %.3f",
update.id, update.update_id,
update.time_elapsed_since_start)
def _process_routers_if_compatible(self, routers, update):
process_result = True
for router in routers:
if router['id'] != update.id:
# Don't do the work here, instead create a new update and
# enqueue it, since there could be another thread working
# on it already and we don't want to race.
new_action = RELATED_ACTION_MAP.get(
update.action, ADD_UPDATE_RELATED_ROUTER)
new_update = queue.ResourceUpdate(
router['id'],
priority=PRIORITY_RELATED_ROUTER,
action=new_action)
self._queue.add(new_update)
LOG.debug('Queued a router update for %(router_id)s '
'(related router %(related_router_id)s). '
'Original event action %(action)s, '
'priority %(priority)s. '
'New event action %(new_action)s, '
'priority %(new_priority)s',
{'router_id': router['id'],
'related_router_id': update.id,
'action': update.action,
'priority': update.priority,
'new_action': new_update.action,
'new_priority': new_update.priority})
continue
try:
self._process_router_if_compatible(router)
except l3_exc.RouterNotCompatibleWithAgent as e:
log_verbose_exc(e.msg, router)
# Was the router previously handled by this agent?
if router['id'] in self.router_info:
LOG.error("Removing incompatible router '%s'",
router['id'])
self._safe_router_removed(router['id'])
except Exception:
log_verbose_exc(
"Failed to process compatible router: %s" % update.id,
router)
process_result = False
return process_result
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
while not self._exiting:
self._pool.spawn_n(self._process_update)
# NOTE(kevinbenton): this is set to 1 second because the actual interval
# is controlled by a FixedIntervalLoopingCall in neutron/service.py that
# is responsible for task execution.
@periodic_task.periodic_task(spacing=1, run_immediately=True)
def periodic_sync_routers_task(self, context):
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")
# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call
# to periodic_sync_routers_task will re-enter this code and try again.
# Context manager self.namespaces_manager captures a picture of
# namespaces *before* fetch_and_sync_all_routers fetches the full list
# of routers from the database. This is important to correctly
# identify stale ones.
try:
with self.namespaces_manager as ns_manager:
self.fetch_and_sync_all_routers(context, ns_manager)
except l3_exc.AbortSyncRouters:
self.fullsync = True
def fetch_and_sync_all_routers(self, context, ns_manager):
prev_router_ids = set(self.router_info)
curr_router_ids = set()
timestamp = timeutils.utcnow()
router_ids = []
chunk = []
is_snat_agent = (self.conf.agent_mode ==
lib_const.L3_AGENT_MODE_DVR_SNAT)
try:
router_ids = self.plugin_rpc.get_router_ids(context)
# fetch routers by chunks to reduce the load on server and to
# start router processing earlier
for i in range(0, len(router_ids), self.sync_routers_chunk_size):
chunk = router_ids[i:i + self.sync_routers_chunk_size]
routers = self.plugin_rpc.get_routers(context, chunk)
LOG.debug('Processing :%r', routers)
for r in routers:
curr_router_ids.add(r['id'])
ns_manager.keep_router(r['id'])
if r.get('distributed'):
# need to keep fip namespaces as well
ext_net_id = (r['external_gateway_info'] or {}).get(
'network_id')
if ext_net_id:
ns_manager.keep_ext_net(ext_net_id)
elif is_snat_agent and not r.get('ha'):
ns_manager.ensure_snat_cleanup(r['id'])
update = queue.ResourceUpdate(
r['id'],
PRIORITY_SYNC_ROUTERS_TASK,
resource=r,
action=ADD_UPDATE_ROUTER,
timestamp=timestamp)
self._queue.add(update)
except oslo_messaging.MessagingTimeout:
if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:
self.sync_routers_chunk_size = max(
self.sync_routers_chunk_size // 2,
SYNC_ROUTERS_MIN_CHUNK_SIZE)
LOG.error('Server failed to return info for routers in '
'required time, decreasing chunk size to: %s',
self.sync_routers_chunk_size)
else:
LOG.error('Server failed to return info for routers in '
'required time even with min chunk size: %s. '
'It might be under very high load or '
'just inoperable',
self.sync_routers_chunk_size)
raise
except oslo_messaging.MessagingException:
failed_routers = chunk or router_ids
LOG.exception("Failed synchronizing routers '%s' "
"due to RPC error", failed_routers)
raise l3_exc.AbortSyncRouters()
self.fullsync = False
LOG.debug("periodic_sync_routers_task successfully completed")
# adjust chunk size after successful sync
if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE:
self.sync_routers_chunk_size = min(
self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,
SYNC_ROUTERS_MAX_CHUNK_SIZE)
# Delete routers that have disappeared since the last sync
for router_id in prev_router_ids - curr_router_ids:
ns_manager.keep_router(router_id)
update = queue.ResourceUpdate(router_id,
PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=DELETE_ROUTER)
self._queue.add(update)
@property
def context(self):
# generate a new request-id on each call to make server side tracking
# of RPC calls easier.
self._context.request_id = common_context.generate_request_id()
return self._context
def after_start(self):
# Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It
# calls this method here. So Removing this after_start() would break
# vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent
# can have L3NATAgentWithStateReport as its base class instead of
# L3NATAgent.
eventlet.spawn_n(self._process_routers_loop)
LOG.info("L3 agent started")
def stop(self):
LOG.info("Stopping L3 agent")
if self.conf.cleanup_on_shutdown:
self._exiting = True
for router in self.router_info.values():
router.delete()
def create_pd_router_update(self):
router_id = None
update = queue.ResourceUpdate(router_id,
PRIORITY_PD_UPDATE,
timestamp=timeutils.utcnow(),
action=PD_UPDATE)
self._queue.add(update)
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.failed_report_state = False
self.agent_state = {
'binary': lib_const.AGENT_PROCESS_L3,
'host': host,
'availability_zone': self.conf.AGENT.availability_zone,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats,
'extensions': self.l3_ext_manager.names()},
'start_flag': True,
'agent_type': lib_const.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = ri.get_ex_gw_port()
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(lib_const.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(lib_const.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == agent_consts.AGENT_REVIVED:
LOG.info('Agent has just been revived. '
'Doing a full sync.')
self.fullsync = True
self.agent_state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
LOG.warning("Neutron server does not support state report. "
"State report for this agent will be disabled.")
self.heartbeat.stop()
return
except Exception:
self.failed_report_state = True
LOG.exception("Failed reporting state!")
return
if self.failed_report_state:
self.failed_report_state = False
LOG.info("Successfully reported state after a previous failure.")
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
LOG.info("L3 agent started")
# Do the report state before we do the first full sync.
self._report_state()
self.pd.after_start()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info("agent_updated by server side %s!", payload)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
def test3Dimension(self):
with self.cached_session():
input_shape = [8, 16, 16, 16, 8]
total_input_size = 1
for s in input_shape:
total_input_size *= s
inputs = [
i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
]
a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
filter_shape = [1, 1, 1, 8, 8]
total_filter_size = 1
for s in filter_shape:
total_filter_size *= s
filters = [
i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
]
f = constant_op.constant(
filters, shape=filter_shape, dtype=dtypes.float32)
conv_t = nn_ops.conv3d(
a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
result = self.evaluate(slice_t)
expected = [
0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
0.03654456, 0.03758812
]
self.assertAllClose(expected, result.flatten(), rtol=1e-6)
def testScalarInput(self):
input_val = 0
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[:]
return slice_t
with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
self.evaluate(func(input_val))
def testInvalidIndex(self):
input_val = [1, 2]
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[1:, 1:]
return slice_t
with self.assertRaisesWithPredicateMatch(
TypeError, "must be integers or slices, not tuple"):
self.evaluate(func(input_val))
def _testSliceMatrixDim0(self, x, begin, size):
tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with test_util.use_gpu():
for dtype in [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.int32,
np.int64,
np.bool_,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
]:
inp = np.random.rand(4, 4).astype(dtype)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session():
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
with backprop.GradientTape() as tape:
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
tape.watch(a)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = tape.gradient(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session():
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSizeTape(self):
with backprop.GradientTape() as tape:
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
tape.watch(inp)
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(tape.gradient(out, inp))
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session():
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def _testGradientVariableSize2DTape(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with backprop.GradientTape(persistent=True) as tape:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
tape.watch(x)
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = tape.gradient(loss1, x)
g2 = tape.gradient(loss2, x)
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def testGradientsAll(self):
with ops.Graph().as_default():
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
def testGradientsAllTape(self):
# Slice the middle square out of a 4x4 input
self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSizeTape()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2DTape()
def testNotIterable(self):
# Tensor iteration is disabled explicitly for only graph mode.
with ops.Graph().as_default():
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
"Iterating over a symbolic `tf.Tensor`"):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
# placeholders only make sense in a graph.
with ops.Graph().as_default():
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session():
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.control._persistence``.
"""
from uuid import uuid4
from eliot.testing import validate_logging, assertHasMessage, assertHasAction
from twisted.internet import reactor
from twisted.trial.unittest import TestCase, SynchronousTestCase
from twisted.python.filepath import FilePath
from pyrsistent import PRecord
from .._persistence import (
ConfigurationPersistenceService, wire_decode, wire_encode,
_LOG_SAVE, _LOG_STARTUP, LeaseService,
)
from .._model import (
Deployment, Application, DockerImage, Node, Dataset, Manifestation,
AttachedVolume, SERIALIZABLE_CLASSES, NodeState)
DATASET = Dataset(dataset_id=unicode(uuid4()),
metadata={u"name": u"myapp"})
MANIFESTATION = Manifestation(dataset=DATASET, primary=True)
TEST_DEPLOYMENT = Deployment(
nodes=[Node(uuid=uuid4(),
applications=[
Application(
name=u'myapp',
image=DockerImage.from_string(u'postgresql:7.6'),
volume=AttachedVolume(
manifestation=MANIFESTATION,
mountpoint=FilePath(b"/xxx/yyy"))
)],
manifestations={DATASET.dataset_id: MANIFESTATION})])
class FakePersistenceService(object):
"""
A very simple fake persistence service that does nothing.
"""
def __init__(self):
self._deployment = Deployment(nodes=frozenset())
def save(self, deployment):
self._deployment = deployment
def get(self):
return self._deployment
class LeaseServiceTests(TestCase):
"""
Tests for ``LeaseService``.
"""
def service(self):
"""
Start a lease service and schedule it to stop.
:return: Started ``LeaseService``.
"""
service = LeaseService(reactor, FakePersistenceService())
service.startService()
self.addCleanup(service.stopService)
return service
def test_expired_lease_removed(self):
"""
A lease that has expired is removed from the persisted
configuration.
XXX Leases cannot be manipulated in this branch. See FLOC-2375.
This is a skeletal test that merely ensures the call to
``update_leases`` takes place when ``_expire`` is called and should
be rewritten to test the updated configuration once the configuration
is aware of Leases.
"""
service = self.service()
d = service._expire()
def check_expired(updated):
self.assertIsNone(updated)
d.addCallback(check_expired)
return d
class ConfigurationPersistenceServiceTests(TestCase):
"""
Tests for ``ConfigurationPersistenceService``.
"""
def service(self, path, logger=None):
"""
Start a service, schedule its stop.
:param FilePath path: Where to store data.
:param logger: Optional eliot ``Logger`` to set before startup.
:return: Started ``ConfigurationPersistenceService``.
"""
service = ConfigurationPersistenceService(reactor, path)
if logger is not None:
self.patch(service, "logger", logger)
service.startService()
self.addCleanup(service.stopService)
return service
def test_empty_on_start(self):
"""
If no configuration was previously saved, starting a service results
in an empty ``Deployment``.
"""
service = self.service(FilePath(self.mktemp()))
self.assertEqual(service.get(), Deployment(nodes=frozenset()))
def test_directory_is_created(self):
"""
If a directory does not exist in given path, it is created.
"""
path = FilePath(self.mktemp())
self.service(path)
self.assertTrue(path.isdir())
def test_file_is_created(self):
"""
If no configuration file exists in the given path, it is created.
"""
path = FilePath(self.mktemp())
self.service(path)
self.assertTrue(path.child(b"current_configuration.v1.json").exists())
@validate_logging(assertHasAction, _LOG_SAVE, succeeded=True,
startFields=dict(configuration=TEST_DEPLOYMENT))
def test_save_then_get(self, logger):
"""
A configuration that was saved can subsequently retrieved.
"""
service = self.service(FilePath(self.mktemp()), logger)
d = service.save(TEST_DEPLOYMENT)
d.addCallback(lambda _: service.get())
d.addCallback(self.assertEqual, TEST_DEPLOYMENT)
return d
@validate_logging(assertHasMessage, _LOG_STARTUP,
fields=dict(configuration=TEST_DEPLOYMENT))
def test_persist_across_restarts(self, logger):
"""
A configuration that was saved can be loaded from a new service.
"""
path = FilePath(self.mktemp())
service = ConfigurationPersistenceService(reactor, path)
service.startService()
d = service.save(TEST_DEPLOYMENT)
d.addCallback(lambda _: service.stopService())
def retrieve_in_new_service(_):
new_service = self.service(path, logger)
self.assertEqual(new_service.get(), TEST_DEPLOYMENT)
d.addCallback(retrieve_in_new_service)
return d
def test_register_for_callback(self):
"""
Callbacks can be registered that are called every time there is a
change saved.
"""
service = self.service(FilePath(self.mktemp()))
callbacks = []
callbacks2 = []
service.register(lambda: callbacks.append(1))
d = service.save(TEST_DEPLOYMENT)
def saved(_):
service.register(lambda: callbacks2.append(1))
return service.save(TEST_DEPLOYMENT)
d.addCallback(saved)
def saved_again(_):
self.assertEqual((callbacks, callbacks2), ([1, 1], [1]))
d.addCallback(saved_again)
return d
@validate_logging(
lambda test, logger:
test.assertEqual(len(logger.flush_tracebacks(ZeroDivisionError)), 1))
def test_register_for_callback_failure(self, logger):
"""
Failed callbacks don't prevent later callbacks from being called.
"""
service = self.service(FilePath(self.mktemp()), logger)
callbacks = []
service.register(lambda: 1/0)
service.register(lambda: callbacks.append(1))
d = service.save(TEST_DEPLOYMENT)
def saved(_):
self.assertEqual(callbacks, [1])
d.addCallback(saved)
return d
class WireEncodeDecodeTests(SynchronousTestCase):
"""
Tests for ``wire_encode`` and ``wire_decode``.
"""
def test_encode_to_bytes(self):
"""
``wire_encode`` converts the given object to ``bytes``.
"""
self.assertIsInstance(wire_encode(TEST_DEPLOYMENT), bytes)
def test_roundtrip(self):
"""
``wire_decode`` returns object passed to ``wire_encode``.
"""
self.assertEqual(TEST_DEPLOYMENT,
wire_decode(wire_encode(TEST_DEPLOYMENT)))
def test_no_arbitrary_decoding(self):
"""
``wire_decode`` will not decode classes that are not in
``SERIALIZABLE_CLASSES``.
"""
class Temp(PRecord):
"""A class."""
SERIALIZABLE_CLASSES.append(Temp)
def cleanup():
if Temp in SERIALIZABLE_CLASSES:
SERIALIZABLE_CLASSES.remove(Temp)
self.addCleanup(cleanup)
data = wire_encode(Temp())
SERIALIZABLE_CLASSES.remove(Temp)
# Possibly future versions might throw exception, the key point is
# that the returned object is not a Temp instance.
self.assertFalse(isinstance(wire_decode(data), Temp))
def test_complex_keys(self):
"""
Objects with attributes that are ``PMap``\s with complex keys
(i.e. not strings) can be roundtripped.
"""
node_state = NodeState(hostname=u'127.0.0.1', uuid=uuid4(),
manifestations={}, paths={},
devices={uuid4(): FilePath(b"/tmp")})
self.assertEqual(node_state, wire_decode(wire_encode(node_state)))
| |
import io
import logging
import shutil
import subprocess
from collections import namedtuple
from pathlib import Path
from subprocess import Popen
from tempfile import TemporaryDirectory
from textwrap import dedent
import pytest
import Pegasus
from Pegasus import yaml
from Pegasus.braindump import Braindump
from Pegasus.client._client import (
Client,
PegasusClientError,
Result,
Workflow,
WorkflowInstanceError,
from_env,
)
def test_PegasusClientError():
return_value = namedtuple("return_value", ["stdout", "stderr"])
rv = return_value("stdout", "stderr")
try:
raise PegasusClientError("pegasus command failed", rv)
except PegasusClientError as e:
assert e.output == "stdout\nstderr"
assert e.result == rv
def test_from_env(mocker):
mocker.patch("shutil.which", return_value="/usr/bin/pegasus-version")
try:
from_env()
shutil.which.assert_called_once_with("pegasus-version")
except ValueError as e:
pytest.fail("should not have thrown {}".format(e))
def test_from_env_no_pegasus_home(monkeypatch):
monkeypatch.setenv("PATH", "/tmp")
with pytest.raises(ValueError) as e:
from_env()
assert "PEGASUS_HOME not found" in str(e)
@pytest.fixture(scope="function")
def mock_subprocess(mocker):
class Popen:
def __init__(self):
self.stdout = io.BytesIO(b'{"key":"value"}')
self.stderr = io.BytesIO(b"some initial binary data: \x00\x01\n")
self.returncode = 0
def poll(self):
return 0
def __del__(self):
self.stdout.close()
self.stderr.close()
mocker.patch("subprocess.Popen", return_value=Popen())
@pytest.fixture(scope="function")
def client():
return Client("/path")
class TestClient:
@pytest.mark.parametrize("log_lvl", [(logging.INFO), (logging.ERROR)])
def test__handle_stream(self, client, caplog, log_lvl):
test_logger = logging.getLogger("handle_stream_test")
caplog.set_level(log_lvl)
# fork process to print 0\n1\n..4\n"
proc = Popen(
["python3", "-c", 'exec("for i in range(5):\\n\\tprint(i)\\n")'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stuff = []
# invoke stream handler
Client._handle_stream(
proc=proc,
stream=proc.stdout,
dst=stuff,
logger=test_logger,
log_lvl=log_lvl,
)
assert stuff == [b"0\n", b"1\n", b"2\n", b"3\n", b"4\n"]
for t in caplog.record_tuples:
if t[0] == "handle_stream_test":
assert t[1] == log_lvl
def test__handle_stream_no_logging(self, client, caplog):
logging.getLogger("handle_stream_test")
caplog.set_level(logging.DEBUG)
# fork process to print 0\n1\n..4\n"
proc = Popen(
["python3", "-c", 'exec("for i in range(5):\\n\\tprint(i)\\n")'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stuff = []
# invoke stream handler
Client._handle_stream(proc=proc, stream=proc.stdout, dst=stuff)
assert stuff == [b"0\n", b"1\n", b"2\n", b"3\n", b"4\n"]
for t in caplog.record_tuples:
if t[0] == "handle_stream_test":
pytest.fail(
"nothing should have been logged under logger: handle_stream_test"
)
def test__handle_stream_invalid_log_lvl(self, client):
test_logger = logging.getLogger("handle_stream_test")
# fork process to print 0\n1\n..4\n"
proc = Popen(
["python3", "-c", 'exec("for i in range(5):\\n\\tprint(i)\\n")'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stuff = []
# invoke stream handler
with pytest.raises(ValueError) as e:
Client._handle_stream(
proc=proc,
stream=proc.stdout,
dst=stuff,
logger=test_logger,
log_lvl="INVALID_LOG_LEVEL",
)
assert "invalid log_lvl: INVALID_LOG_LEVEL" in str(e)
# for good measure
proc.kill()
def test_plan(self, mocker, mock_subprocess, client):
mocker.patch(
"Pegasus.client._client.Result.json",
return_value={"submit_dir": "/submit_dir"},
)
mocker.patch(
"Pegasus.client._client.Workflow._get_braindump",
return_value=Braindump(user="ryan"),
)
wf_instance = client.plan(
abstract_workflow="wf.yml",
basename="test_basename",
cache=["/cache_file1", "/cache_file2"],
cleanup="leaf",
cluster=["horizontal", "label"],
conf="pegasus.conf",
dir="/dir",
force=True,
force_replan=True,
forward=["arg1", "opt1=value"],
inherited_rc_files=["f1", "f2"],
input_dirs=["/input_dir1", "/input_dir2"],
java_options=["mx1024m", "ms512m"],
job_prefix="job_pref",
output_dir="/output_dir",
output_sites=["local", "other_site"],
quiet=3,
random_dir="/random/dir",
relative_dir="/relative_dir",
relative_submit_dir="rsd",
reuse=["/submit_dir1", "/submit_dir2"],
sites=["site1", "site2"],
staging_sites={"es1": "ss1", "es2": "ss2"},
submit=True,
verbose=3,
**{"pegasus.mode": "development"},
)
subprocess.Popen.assert_called_once_with(
[
"/path/bin/pegasus-plan",
"-Dpegasus.mode=development",
"--basename",
"test_basename",
"--job-prefix",
"job_pref",
"--conf",
"pegasus.conf",
"--cluster",
"horizontal,label",
"--sites",
"site1,site2",
"--output-sites",
"local,other_site",
"--staging-site",
"es1=ss1,es2=ss2",
"--cache",
"/cache_file1,/cache_file2",
"--input-dir",
"/input_dir1,/input_dir2",
"--output-dir",
"/output_dir",
"--dir",
"/dir",
"--relative-dir",
"/relative_dir",
"--relative-submit-dir",
"rsd",
"--randomdir=/random/dir",
"--inherited-rc-files",
"f1,f2",
"--cleanup",
"leaf",
"--reuse",
"/submit_dir1,/submit_dir2",
"-vvv",
"-qqq",
"--force",
"--force-replan",
"--forward",
"arg1",
"--forward",
"opt1=value",
"--submit",
"-Xmx1024m",
"-Xms512m",
"wf.yml",
"--json",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert wf_instance.braindump.user == "ryan"
def test_plan_invalid_cluster(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", cluster="cluster")
assert "invalid cluster: cluster" in str(e)
def test_plan_invalid_sites(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", sites="local")
assert "invalid sites: local" in str(e)
def test_plan_invalid_staging_sites(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", staging_sites="condorpool=origin")
assert "invalid staging_sites: condorpool=origin" in str(e)
def test_plan_invalid_cache(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", cache="cache")
assert "invalid cache: cache" in str(e)
def test_plan_invalid_output_sites(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", output_sites="site1,site2")
assert "invalid output_sites: site1,site2" in str(e)
def test_plan_invalid_input_dirs(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", input_dirs="/input_dir")
assert "invalid input_dirs: /input_dir" in str(e)
def test_plan_invalid_inherited_rc_files(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", inherited_rc_files="files")
assert "invalid inherited_rc_files: files" in str(e)
def test_plan_invalid_forward(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", forward="forward")
assert "invalid forward: forward" in str(e)
def test_plan_invalid_java_options(self, client):
with pytest.raises(TypeError) as e:
client.plan("wf.yml", java_options="opts")
assert "invalid java_options: opts" in str(e)
def test_run(self, mock_subprocess, client):
client.run("submit_dir", verbose=3, grid=True)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-run", "-vvv", "--grid", "--json", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_run_no_grid(self, mock_subprocess, client):
client.run("submit_dir", verbose=3)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-run", "-vvv", "--json", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_status(self, mock_subprocess, client):
client.status("submit_dir", long=True, verbose=3)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-status", "--long", "-vvv", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
@pytest.mark.parametrize(
"pegasus_status_out, expected_dict",
[
(
dedent(
"""
(no matching jobs found in Condor Q)
UNRDY READY PRE IN_Q POST DONE FAIL %DONE STATE DAGNAME
1 2 3 4 5 1,000 1,000 100.0 Success *wf-name-0.dag
Summary: 1 DAG total (Success:1)
"""
),
{
"totals": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"total": 2015,
},
"dags": {
"root": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"state": "Success",
"dagname": "wf-name-0.dag",
}
},
},
),
(
dedent(
"""
STAT IN_STATE JOB
Run 01:10 appends-0 ( /nas/home/tanaka/workflows/test-workflow-1583372721 )
Summary: 1 Condor job total (R:1)
UNRDY READY PRE IN_Q POST DONE FAIL %DONE STATE DAGNAME
1 2 3 4 5 6 7 37.5 Failure *wf-name-0.dag
Summary: 1 DAG total (Failure:1)
"""
),
{
"totals": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 6,
"failed": 7,
"percent_done": 37.5,
"total": 28,
},
"dags": {
"root": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 6,
"failed": 7,
"percent_done": 37.5,
"state": "Failure",
"dagname": "wf-name-0.dag",
}
},
},
),
(
dedent(
"""
UNRDY READY PRE IN_Q POST DONE FAIL %DONE STATE DAGNAME
0 0 0 0 0 6 0 100.0 Success 00/00/analysis-wf_ID0000001/analysis-wf-0.dag
0 0 0 0 0 3 0 100.0 Success 00/00/sleep-wf_ID0000002/sleep-wf-0.dag
1 2 3 4 5 6 7 100.0 Success *wf-name-0.dag
0 0 0 0 0 20 0 100.0 TOTALS (20 jobs)
Summary: 3 DAGs total (Success:3)
"""
),
{
"totals": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 6,
"failed": 7,
"percent_done": 100.0,
"total": 28,
},
"dags": {
"root": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 6,
"failed": 7,
"percent_done": 100.0,
"state": "Success",
"dagname": "wf-name-0.dag",
}
},
},
),
(
"get status should return None because this is invalid/unexpected status output",
None,
),
],
)
def test__parse_status_output(self, pegasus_status_out, expected_dict):
assert (
Client._parse_status_output(pegasus_status_out, "wf-name") == expected_dict
)
@pytest.mark.parametrize(
"status_output_str, expected_dict",
[
(
dedent(
"""
(no matching jobs found in Condor Q)
UNRDY READY PRE IN_Q POST DONE FAIL %DONE STATE DAGNAME
1 2 3 4 5 1,000 1,000 100.0 Success *wf-name-0.dag
Summary: 1 DAG total (Success:1)
"""
).encode("utf-8"),
{
"totals": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"total": 2015,
},
"dags": {
"root": {
"unready": 1,
"ready": 2,
"pre": 3,
"queued": 4,
"post": 5,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"state": "Success",
"dagname": "wf-name-0.dag",
}
},
},
),
(
b"get status should return None because this is invalid/unexpected status output",
None,
),
],
)
def test_get_status(self, mocker, client, status_output_str, expected_dict):
mocker.patch(
"Pegasus.client._client.Client._exec",
return_value=Result(
cmd=["/path/bin/pegasus-status", "--long", "submit_dir"],
exit_code=0,
stdout_bytes=status_output_str,
stderr_bytes=b"",
),
)
assert client.get_status("wf-name", "submit_dir") == expected_dict
@pytest.mark.parametrize(
"status_output, expected_progress_bar",
[
(
{
"totals": {
"unready": 0,
"ready": 0,
"pre": 0,
"queued": 0,
"post": 0,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"total": 2000,
},
"dags": {
"root": {
"unready": 0,
"ready": 0,
"pre": 0,
"queued": 0,
"post": 0,
"succeeded": 1000,
"failed": 1000,
"percent_done": 100.0,
"state": "Success",
"dagname": "wf-name-0.dag",
}
},
},
"\r[\x1b[1;32m#########################\x1b[0m] 100.0% ..Success (\x1b[1;34mUnready: 0\x1b[0m, \x1b[1;32mCompleted: 1000\x1b[0m, \x1b[1;33mQueued: 0\x1b[0m, \x1b[1;36mRunning: 0\x1b[0m, \x1b[1;31mFailed: 1000\x1b[0m)\n",
),
(
{
"totals": {
"unready": 4,
"ready": 0,
"pre": 0,
"queued": 0,
"post": 0,
"succeeded": 3,
"failed": 1,
"percent_done": 37.5,
"total": 8,
},
"dags": {
"root": {
"unready": 4,
"ready": 0,
"pre": 0,
"queued": 0,
"post": 0,
"succeeded": 3,
"failed": 1,
"percent_done": 37.5,
"state": "Failure",
"dagname": "wf-name-0.dag",
}
},
},
"\r[\x1b[1;32m#########\x1b[0m----------------] 37.5% ..Failure (\x1b[1;34mUnready: 4\x1b[0m, \x1b[1;32mCompleted: 3\x1b[0m, \x1b[1;33mQueued: 0\x1b[0m, \x1b[1;36mRunning: 0\x1b[0m, \x1b[1;31mFailed: 1\x1b[0m)\n",
),
],
)
def test_wait(self, mocker, capsys, client, status_output, expected_progress_bar):
mocker.patch(
"Pegasus.client._client.Client.get_status", return_value=status_output
)
client.wait(root_wf_name="wf-name", submit_dir="submit_dir")
Pegasus.client._client.Client.get_status.assert_called_once_with(
root_wf_name="wf-name", submit_dir="submit_dir"
)
out, _ = capsys.readouterr()
assert out == expected_progress_bar
def test_remove(self, mock_subprocess, client):
client.remove("submit_dir", verbose=3)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-remove", "-vvv", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_analyzer(self, mock_subprocess, client):
client.analyzer("submit_dir", verbose=3)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-analyzer", "-vvv", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_statistics(self, mock_subprocess, client):
client.statistics("submit_dir", verbose=3)
subprocess.Popen.assert_called_once_with(
["/path/bin/pegasus-statistics", "-vvv", "submit_dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_graph(self, mock_subprocess, client):
client.graph(
workflow_file="workflow.yml",
include_files=True,
no_simplify=False,
label="label",
output="wf.dot",
remove=["tr1", "tr2"],
width=256,
height=256,
)
subprocess.Popen.assert_called_once_with(
[
"/path/bin/pegasus-graphviz",
"workflow.yml",
"--files",
"--nosimplify",
"--label=label",
"--output=wf.dot",
"--remove=tr1",
"--remove=tr2",
"--width=256",
"--height=256",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test__exec(self, mock_subprocess, client):
client._exec("ls")
with pytest.raises(ValueError) as e:
client._exec(None)
assert str(e.value) == "cmd is required"
@pytest.fixture(scope="function")
def make_result():
def _make_result(cmd="command", exit_code=0, stdout=b"", stderr=b""):
r = Result(cmd, exit_code, stdout, stderr)
return r
return _make_result
def test_raise_exit_code(make_result):
r = make_result()
assert r.raise_exit_code() is None
with pytest.raises(ValueError) as e:
r = make_result(exit_code=1)
r.raise_exit_code()
assert e.value.args[1] == r
def test_empty(make_result):
r = make_result()
assert r.output == ""
assert r.stdout == ""
assert r.stderr == ""
assert r.json is None
assert r.yaml is None
assert r.yaml_all is None
def test_output(make_result):
r = make_result(stdout=b"test")
assert r.output == "test"
def test_output_fail(make_result):
r = make_result(stdout=None)
with pytest.raises(ValueError) as e:
r.stdout
assert str(e.value) == "stdout not captured"
def test_stdout(make_result):
r = make_result(stdout=b"test")
assert r.stdout == "test"
def test_stdout_fail(make_result):
r = make_result(stdout=None)
with pytest.raises(ValueError) as e:
r.stdout
assert str(e.value) == "stdout not captured"
def test_stderr(make_result):
r = make_result(stderr=b"test")
assert r.stderr == "test"
def test_stderr_fail(make_result):
r = make_result(stderr=None)
with pytest.raises(ValueError) as e:
r.stderr
assert str(e.value) == "stderr not captured"
def test_json(make_result):
r = make_result(stdout=b'{"a": 1}')
assert isinstance(r.json, dict)
assert r.json["a"] == 1
def test_yaml(make_result):
r = make_result(stdout=b"a: 1")
assert isinstance(r.yaml, dict)
assert r.yaml["a"] == 1
def test_yaml_all(make_result):
r = make_result(
stdout=b"""---
a: 1
---
b: 2
"""
)
d = [y for y in r.yaml_all]
assert isinstance(d, list)
assert len(d) == 2
assert d[0]["a"] == 1
assert d[1]["b"] == 2
class TestWorkflow:
def test__get_braindump(self):
# create a fake temporary submit dir and braindump.yml file
with TemporaryDirectory() as td:
bd_path = Path(td) / "braindump.yml"
with bd_path.open("w+") as bd_file:
yaml.dump({"user": "ryan", "submit_dir": "/submit_dir"}, bd_file)
bd_file.seek(0)
bd = Workflow._get_braindump(bd_path.parent)
assert bd.user == "ryan"
assert bd.submit_dir == Path("/submit_dir")
def test_try_get_missing_braindump(self):
with TemporaryDirectory() as td:
with pytest.raises(WorkflowInstanceError) as e:
Workflow._get_braindump(td)
assert "Unable to load braindump file" in str(e)
| |
import datetime
from deform.exception import ValidationFailure
from pyramid.httpexceptions import HTTPSeeOther
from pyramid.renderers import render_to_response
from brouz import accounting
from brouz import enums
from brouz.forms import make_add_form
from brouz.forms import make_edit_form
from brouz.i18n import _
from brouz.models import CATEGORY_EXPENDITURE_BANKING_CHARGES
from brouz.models import CATEGORY_EXPENDITURE_CET
from brouz.models import CATEGORY_EXPENDITURE_CONFERENCES
from brouz.models import CATEGORY_EXPENDITURE_DEDUCTIBLE_CSG
from brouz.models import CATEGORY_EXPENDITURE_FEES_NO_RETROCESSION
from brouz.models import CATEGORY_EXPENDITURE_FIXED_ASSETS
from brouz.models import CATEGORY_EXPENDITURE_HARDWARE_FURNITURE_RENTAL
from brouz.models import CATEGORY_EXPENDITURE_INSURANCE_PREMIUM
from brouz.models import CATEGORY_EXPENDITURE_OBLIGATORY_SOCIAL_CHARGES
from brouz.models import CATEGORY_EXPENDITURE_OFFICE_FURNITURE
from brouz.models import CATEGORY_EXPENDITURE_OPTIONAL_SOCIAL_CHARGES
from brouz.models import CATEGORY_EXPENDITURE_OTHER_MISC_EXPENSES
from brouz.models import CATEGORY_EXPENDITURE_OTHER_TAXES
from brouz.models import CATEGORY_EXPENDITURE_PURCHASE
from brouz.models import CATEGORY_EXPENDITURE_SMALL_FURNITURE
from brouz.models import CATEGORY_EXPENDITURE_TRAVEL_EXPENSES
from brouz.models import CATEGORY_EXPENDITURE_VAT
from brouz.models import CATEGORY_INCOME_MISC
from brouz.models import CATEGORY_REMUNERATION
from brouz.models import DBSession
from brouz.models import Transaction
from brouz.utils import TemplateAPI
def home(request):
session = DBSession()
lines = session.query(Transaction).filter_by(part_of=None, composite=False)
lines = lines.union(session.query(Transaction).filter_by(composite=True))
lines = lines.order_by(Transaction.date,
Transaction.party,
Transaction.title)
balances = [accounting.Price(2494978)]
for line in lines:
balances.append(balances[-1] + line.signed_amount)
balance = sum((line.signed_amount for line in lines), accounting.Price(0))
api = TemplateAPI(request, 'home')
bindings = {'api': api,
'lines': lines,
'balance': balance,
'balances': balances}
return render_to_response('templates/home.pt', bindings)
def add_unique_form(request, form=None):
if form is None:
form = make_add_form(request, composite=False)
api = TemplateAPI(request, 'add')
bindings = {'api': api,
'form': form}
return render_to_response('templates/add.pt', bindings)
def add_unique(request):
form = make_add_form(request, composite=False)
try:
data = form.validate(request.POST.items())
except ValidationFailure, e:
return add_unique_form(request, e)
data['composite'] = False
data['part_of'] = None
data['net_amount'] = data['net_amount'].eurocents
data['vat'] = data['vat'].eurocents
transaction = Transaction(**data)
session = DBSession()
session.add(transaction)
request.session.flash(_('The transaction has been added.'), 'success')
return HTTPSeeOther(request.route_url('home'))
def add_composite_form(request, form=None):
if form is None:
form = make_add_form(request, composite=True)
api = TemplateAPI(request, 'add')
bindings = {'api': api,
'form': form}
return render_to_response('templates/add.pt', bindings)
def add_composite(request):
form = make_add_form(request, composite=True)
try:
data = form.validate(request.POST.items())
except ValidationFailure, e:
return add_unique_form(request, e)
total_net_amount = sum(l['net_amount'] for l in data['lines'])
total_vat = sum(l['vat'] for l in data['lines'])
composite = Transaction(party=data['party'],
title=data['title'],
date=data['date'],
category=data['lines'][0]['category'],
is_meal=False,
net_amount=total_net_amount.eurocents,
vat=total_vat.eurocents,
mean=data['mean'],
invoice=data['invoice'],
composite=True,
part_of=None)
session = DBSession()
session.add(composite)
session.flush() # set transaction.id
for line in data['lines']:
t = Transaction(party=data['party'],
title=line['title'],
date=data['date'],
category=line['category'],
is_meal=line['is_meal'],
net_amount=line['net_amount'].eurocents,
vat=line['vat'].eurocents,
mean=data['mean'],
invoice=data['invoice'],
composite=False,
part_of=composite.id)
session.add(t)
request.session.flash(_('The transaction has been added.'), 'success')
return HTTPSeeOther(request.route_url('home'))
def edit_form(request, form=None):
if form is None:
session = DBSession()
transaction_id = request.matchdict['transaction_id']
transaction = session.query(Transaction).\
filter_by(id=transaction_id).one()
form = make_edit_form(request, transaction)
if transaction.composite:
data = transaction.__dict__
data['lines'] = []
for txn in session.query(Transaction).\
filter_by(part_of=transaction_id).all():
data['lines'].append(txn.__dict__)
else:
data = transaction.__dict__
api = TemplateAPI(request, 'add')
bindings = {'api': api,
'form': form,
'data': data}
return render_to_response('templates/edit.pt', bindings)
def edit(request):
session = DBSession()
transaction_id = request.matchdict['transaction_id']
transaction = session.query(Transaction).\
filter_by(id=transaction_id).one()
form = make_edit_form(request, transaction)
try:
data = form.validate(request.POST.items())
except ValidationFailure, e:
return edit_form(request, e)
if transaction.composite:
raise NotImplementedError
else:
data['composite'] = False
data['part_of'] = None
data['net_amount'] = data['net_amount'].eurocents
data['vat'] = data['vat'].eurocents
transaction.update(**data)
request.session.flash(_('The transaction has been modified.'), 'success')
return HTTPSeeOther(request.route_url('home'))
def delete(request):
transaction_id = request.matchdict['transaction_id']
if request.POST.get('confirm') != '1':
msg = _('You did not confirm that you want to delete '
'this transaction.')
request.session.flash(msg, 'error')
url = request.route_url('edit', transaction_id=transaction_id)
return HTTPSeeOther(url)
session = DBSession()
transaction = session.query(Transaction).\
filter_by(id=transaction_id).one()
# FIXME: If transaction is composite, we should delete the related
# transactions: filter_by(part_of=transaction_id)
session.delete(transaction)
request.session.flash(_('The transaction has been deleted.'), 'success')
return HTTPSeeOther(request.route_url('home'))
def reports(request):
try:
year = int(request.matchdict['year'])
except KeyError: # no matchdict
year = datetime.datetime.now().year
session = DBSession()
if session.query(Transaction.date).\
filter_by(year=year - 1).first() is None:
previous_year = None
else:
previous_year = year - 1
if session.query(Transaction.date).\
filter_by(year=year + 1).first() is None:
next_year = None
else:
next_year = year + 1
lines = session.query(Transaction).\
filter_by(year=year, composite=False).all()
report = _calculate_report(lines)
_update_report_with_fixed_assets(report, year, session)
_update_report_with_vat(report, lines)
report['vat_installments'] = accounting.calculate_vat_installments(
session.query(Transaction), year)
# FIXME: move these to separate function(s)?
report['clients'] = ((party, accounting.Price(net_amount))
for party, net_amount in session.execute(
'SELECT party, SUM(net_amount) AS sum '
'FROM transactions '
'WHERE category=%d '
'AND year=%d '
'GROUP BY party '
'ORDER BY sum DESC' % (CATEGORY_INCOME_MISC, year)))
report['providers'] = ((party, accounting.Price(net_amount))
for party, net_amount, in session.execute(
'SELECT party, SUM(net_amount) AS sum '
'FROM transactions '
'WHERE category NOT IN (%s, %s) '
'AND composite = 0 '
'AND part_of IS NULL '
'AND year=%d '
'GROUP BY party '
'ORDER BY sum DESC' % (
CATEGORY_INCOME_MISC, CATEGORY_REMUNERATION, year)))
report['remuneration'] = [(date, accounting.Price(net_amount))
for date, net_amount in session.query(
Transaction.date, Transaction.net_amount).\
filter_by(category=CATEGORY_REMUNERATION, year=year).\
order_by(Transaction.date)]
report['total_remuneration'] = sum(
r[1] for r in report['remuneration'])
bindings = {'api': TemplateAPI(request, 'reports'),
'year': year,
'previous_year': previous_year,
'next_year': next_year,
'report': report}
return render_to_response('templates/reports.pt', bindings)
def _calculate_report(lines):
# Les commentaires correspondent aux intitules exacts de la
# declaration 2035-1K (2012).
def _get_sum_of_lines(lines, category, compute=None):
total = 0
if compute is None:
compute = lambda line: line.net_amount
for line in lines:
if line.category == category:
total += compute(line)
return accounting.RoundedPrice(total)
# Recettes encaissees y compris les remboursements de frais
aa = _get_sum_of_lines(lines, CATEGORY_INCOME_MISC)
total_income = aa
# Achats
ba = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_PURCHASE)
# TVA
bd = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_VAT)
# Contribution economique territoriale (CET)
# = Contribution fonciere des entreprises (CFE)
# + CVAE (nulle si CA < 150.000 EUR environ)
jy = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_CET)
# Autres impots
bs = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_OTHER_TAXES)
# CSG deductible
bv = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_DEDUCTIBLE_CSG)
# Location de materiel et de mobilier
bg = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_HARDWARE_FURNITURE_RENTAL)
# Petit outillage
bh_small_furniture = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_SMALL_FURNITURE)
# Primes d'assurance
bh_insurance_premium = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_INSURANCE_PREMIUM)
# Honoraires ne constituant pas des retrocessions
bh_fees_no_retrocession = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_FEES_NO_RETROCESSION)
# Total : travaux, fournitures et services exterieurs
bh = accounting.sum_rounded_prices(
bh_small_furniture, bh_insurance_premium, bh_fees_no_retrocession)
# Autres frais de deplacements (voyages...)
bj_travel_expenses = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_TRAVEL_EXPENSES, accounting.get_meal_deductible)
# Total : transports et deplacements
bj = accounting.sum_rounded_prices(bj_travel_expenses)
# Charges sociales personnelles obligatoires
bt = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_OBLIGATORY_SOCIAL_CHARGES)
# Charges sociales personnelles facultatives
bu = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_OPTIONAL_SOCIAL_CHARGES)
# Total charges sociales personnelles
bk = accounting.sum_rounded_prices(bt, bu)
# Frais de reception, de representation et de congres
bm_conferences = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_CONFERENCES)
# Fournitures de bureau, frais de documentation, de correspondance
# et de telephone
bm_office = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_OFFICE_FURNITURE)
# Autres frais divers de gestion
bm_other = _get_sum_of_lines(
lines, CATEGORY_EXPENDITURE_OTHER_MISC_EXPENSES)
# Total : frais divers de gestion
bm = accounting.sum_rounded_prices(bm_conferences, bm_office, bm_other)
# Frais financiers
bn = _get_sum_of_lines(lines, CATEGORY_EXPENDITURE_BANKING_CHARGES)
# Total (BR)
total_expenditure = accounting.sum_rounded_prices(
ba, bd, jy, bs, bv, bg, bh, bj, bk, bm, bn)
report = {'aa': aa,
'total_income': total_income,
'ba': ba,
'bd': bd,
'jy': jy,
'bs': bs,
'bv': bv,
'bh_small_furniture': bh_small_furniture,
'bh_fees_no_retrocession': bh_fees_no_retrocession,
'bh_insurance_premium': bh_insurance_premium,
'bg': bg,
'bh': bh,
'bj_travel_expenses': bj_travel_expenses,
'bj': bj,
'bt': bt,
'bu': bu,
'bk': bk,
'bm_conferences': bm_conferences,
'bm_office': bm_office,
'bm_other': bm_other,
'bm': bm,
'bn': bn,
'total_expenditure': total_expenditure}
return report
def _update_report_with_fixed_assets(report, year, session):
"""Update report with fixed assets ("immobilisations et
amortissements").
"""
assets = []
for transaction in session.query(Transaction).\
filter_by(category=CATEGORY_EXPENDITURE_FIXED_ASSETS,
composite=False):
amortization = accounting.calculate_amortization(transaction)
this_year_index = year - transaction.year
past = accounting.RoundedPrice(sum(amortization[:this_year_index]))
try:
this_year = amortization[this_year_index]
except IndexError:
this_year = 0 # already amortized
asset = {'title': transaction.title,
'date': transaction.date,
'amount': accounting.RoundedPrice(abs(transaction.signed_amount)),
'vat': accounting.RoundedPrice(transaction.vat),
'base': accounting.RoundedPrice(abs(transaction.signed_amount) - transaction.vat),
'past': past,
'this_year': this_year}
assets.append(asset)
report['fixed_assets'] = {
'assets': assets,
'total_amount': accounting.RoundedPrice(sum(a['amount'] for a in assets)),
'total_base': accounting.RoundedPrice(sum(a['base'] for a in assets)),
'total_past': accounting.RoundedPrice(sum(a['past'] for a in assets)),
'total_this_year': accounting.RoundedPrice(sum(a['this_year'] for a in assets))}
# FIXME: is this really needed? Should we not use 'calculate_vat_installments()' instead?
def _update_report_with_vat(report, lines):
"""Update report with total VAT for incomes and expenditures (not
including fixed assets.
"""
report['vat_incomes'] = accounting.RoundedPrice(
sum(
accounting.Price(l.vat) for l in lines if l.type == enums.TYPE_INCOME))
report['vat_expenditures'] = accounting.RoundedPrice(
sum(
accounting.Price(l.vat) for l in lines if l.type == enums.TYPE_EXPENDITURE))
report['vat_expenditures_fixed_assets'] = accounting.RoundedPrice(
sum(
accounting.Price(l.vat) for l in lines if l.category == CATEGORY_EXPENDITURE_FIXED_ASSETS))
report['vat_expenditures_without_fixed_assets'] = accounting.RoundedPrice(
sum(
accounting.Price(l.vat) for l in lines if l.type == enums.TYPE_EXPENDITURE and \
l.category != CATEGORY_EXPENDITURE_FIXED_ASSETS))
def autocomplete(request):
field = request.matchdict['field']
field = getattr(Transaction, field)
session = DBSession()
return [i[0] for i in session.query(field).filter(
field.like('%%%s%%' % request.GET['term'])).distinct()]
| |
# -*- coding: utf-8 -*-
"""
The MIT License
===============
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import collections
import scipy.sparse
import numpy as np
from scipy import zeros,arange,mat,amin,amax
from scipy.sparse import vstack,hstack,csr_matrix,coo_matrix,lil_matrix,triu
from scipy.spatial import Delaunay
from scipy.linalg import *
from pyplasm import *
from matrixutil_no_accel import *
import time as tm
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 0;
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__=="__main__" and n <= logging_level:
for s in l:
print "Log:", s;
timer = 1;
timer_last = tm.time()
def timer_start(s):
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer start:" + s]);
timer_last = tm.time();
def timer_stop():
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer stop :" + str(tm.time() - timer_last)]);
# ------------------------------------------------------------
self_test=False
#------------------------------------------------------------------
#--geometry layer (using PyPlasm)----------------------------------
#------------------------------------------------------------------
def View (model):
dims = range(1,1+RN(model))
center = MED(dims)(model)
model = T(dims)(SCALARVECTPROD([-1,center]))(model)
VIEW(ROTN([-PI/3,[1,-1,0]])(R([1,2])(-PI/4)(model)))
def bezier(points):
"""
To create a Bezier curve of degree n from a list of n+1 d-points.
Each point is given as a list of coordinates.
Return a geometric object of HPC (Hierarchical Polyhedral Complex) type.
"""
return MAP(BEZIERCURVE(points))(INTERVALS(1)(20))
def CCOMB(vectors):
"""
To create the convex combination of a list of vectors.
Each vector is given as a list of coordinates.
Return a vector.
"""
return (COMP([ SCALARVECTPROD,CONS([ COMP([ DIV, CONS([K(1),LEN]) ]), VECTSUM ]) ]))(vectors)
def EXPLODE (sx,sy,sz):
"""
To explode a HPC scene, given three real scaling parameters.
sx,sy,sz >= 1.0
Return a function to be applied to a list of HPC (Hierarchical Polyhedral Complex) objects.
"""
def explode0 (scene):
"""
To explode a HPC scene, given as a list of HPC objects.
Dimension-independent function (can be applied to points, edges, faces, cells, even mixed).
Compute the centroid of each object, and apply to each of them a translation equal
to the difference betwwen the scaled and the initial positions of its centroid.
Return a single HPC object (the assembly of input objects, properly translated).
"""
centers = [CCOMB(S1(UKPOL(obj))) for obj in scene]
scalings = len(centers) * [S([1,2,3])([sx,sy,sz])]
scaledCenters = [UK(APPLY(pair)) for pair in
zip(scalings, [MK(p) for p in centers])]
translVectors = [ VECTDIFF((p,q)) for (p,q) in zip(scaledCenters, centers) ]
translations = [ T([1,2,3])(v) for v in translVectors ]
return STRUCT([ t(obj) for (t,obj) in zip(translations,scene) ])
return explode0
def MKPOLS (model):
"""
To MaKe a list of HPC objects from a LAR model.
A LAR model is a pair, i.e. a Python tuple (V, FV), where
- V is the list of vertices, given as lists of coordinates;
- FV is the face-vertex relation, given as a list of faces,
where each face is given as a list of vertex indices.
Return a list of HPC objects.
"""
V, FV = model
pols = [MKPOL([[V[v] for v in f],[range(1,len(f)+1)], None]) for f in FV]
return pols
def LAR2PLASM (topology):
"""
To transform a topological relation from LAR format (base-index = 0, like C or python)
to PyPLASM format (base-index = 1, like fortran or matlab).
topology stands for any LAR d_cell-vertex relation (es: EV, FV, CV, etc.)
represented as a list of lists of integers (vertex indices in 0-basis).
Return a list of lists of integers (vertex indices in 1-basis).
"""
return AA(AA(lambda k: k+1))(topology)
def VERTS(geoms):
"""
To generate the vertices of a grid of points from a list of d lists (of equal length) of numbers.
geoms is the list of xcoods, ycoords, zcoords, etc., where xcoods, etc. is an increasing list of numbers.
returns a properly ordered list of d-vertices, each given a list of numbers (vertex coordinates).
"""
return COMP([AA(REVERSE),CART,REVERSE])(geoms)
def VERTEXTRUDE((V,coords)):
"""
Utility function to generate the output model vertices in a multiple extrusion of a LAR model.
V is a list of d-vertices (each given as a list of d coordinates).
coords is a list of absolute translation parameters to be applied to V in order
to generate the output vertices.
Return a new list of (d+1)-vertices.
"""
return CAT(AA(COMP([AA(AR),DISTR]))(DISTL([V,coords])))
def format(cmat,shape="csr"):
""" Transform from list of triples (row,column,vale)
to scipy.sparse corresponding formats.
Return by default a csr format of a scipy sparse matrix.
"""
n = len(cmat)
data = arange(n)
ij = arange(2*n).reshape(2,n)
for k,item in enumerate(cmat):
ij[0][k],ij[1][k],data[k] = item
return scipy.sparse.coo_matrix((data, ij)).asformat(shape)
###################################################################
#------------------------------------------------------------------
#-- basic LAR software layer --------------------------------------
#------------------------------------------------------------------
#--coo is the standard rep using non-ordered triples of numbers----
#--coo := (row::integer, column::integer, value::float)------------
#------------------------------------------------------------------
def cooCreateFromBrc(ListOfListOfInt):
COOm = [[k,col,1] for k,row in enumerate(ListOfListOfInt)
for col in row ]
return COOm
#------------------------------------------------------------------
def csrCreateFromCoo(COOm):
CSRm = format(COOm,"csr")
return CSRm
#------------------------------------------------------------------
def csrCreate(BRCm,shape=(0,0)):
if shape == (0,0):
out = csrCreateFromCoo(cooCreateFromBrc(BRCm))
return out
else:
CSRm = scipy.sparse.csr_matrix(shape)
for i,j,v in cooCreateFromBrc(BRCm):
CSRm[i,j] = v
return CSRm
#------------------------------------------------------------------
def csrGetNumberOfRows(CSRm):
Int = CSRm.shape[0]
return Int
#------------------------------------------------------------------
def csrGetNumberOfColumns(CSRm):
Int = CSRm.shape[1]
return Int
#------------------------------------------------------------------
def csrToMatrixRepresentation(CSRm):
nrows = csrGetNumberOfRows(CSRm)
ncolumns = csrGetNumberOfColumns(CSRm)
ScipyMat = zeros((nrows,ncolumns),int)
C = CSRm.tocoo()
for triple in zip(C.row,C.col,C.data):
ScipyMat[triple[0],triple[1]] = triple[2]
return ScipyMat
#------------------------------------------------------------------
def csrToBrc(CSRm):
nrows = csrGetNumberOfRows(CSRm)
C = CSRm.tocoo()
out = [[] for i in xrange (nrows)]
[out[i].append(j) for i,j in zip(C.row,C.col)]
return out
#------------------------------------------------------------------
#--matrix utility layer--------------------------------------------
#------------------------------------------------------------------
#------------------------------------------------------------------
def csrIsA(CSRm):
test = CSRm.check_format(True)
return test==None
#------------------------------------------------------------------
def csrGet(CSRm,row,column):
Num = CSRm[row,column]
return Num
#------------------------------------------------------------------
def csrSet(CSRm,row,column,value):
CSRm[row,column] = value
return None
#------------------------------------------------------------------
def csrAppendByRow(CSRm1,CSRm2):
CSRm = vstack([CSRm1,CSRm2])
return CSRm
#------------------------------------------------------------------
def csrAppendByColumn(CSRm1,CSRm2):
CSRm = hstack([CSRm1,CSRm2])
return CSRm
#------------------------------------------------------------------
def csrSplitByRow(CSRm,k):
CSRm1 = CSRm[:k]
CSRm2 = CSRm[k:]
return CSRm1,CSRm2
#------------------------------------------------------------------
def csrSplitByColumn(CSRm,k):
CSRm1 = CSRm.T[:k]
CSRm2 = CSRm.T[k:]
return CSRm1.T,CSRm2.T
#------------------------------------------------------------------
#--sparse matrix operations layer----------------------------------
#------------------------------------------------------------------
def csrBoundaryFilter(CSRm, facetLengths):
maxs = [max(CSRm[k].data) for k in xrange(CSRm.shape[0])]
inputShape = CSRm.shape
coo = CSRm.tocoo()
row = [] # np.array([]).astype(np.int32);
col = [] # np.array([]).astype(np.int32);
# data = [] # np.array([]).astype(np.int32);
k = 0
while (k < len(coo.data)):
if coo.data[k] == maxs[coo.row[k]]:
row.append(coo.row[k])
col.append(coo.col[k])
k += 1
data = np.ones(len(col),dtype=np.int32);
mtx = coo_matrix( (data, ( np.array(row).astype(np.int32), np.array(col).astype(np.int32) )), shape=inputShape)
out = mtx.tocsr()
return out
#------------------------------------------------------------------
def csrBinFilter(CSRm):
# can be done in parallel (by rows)
inputShape = CSRm.shape
coo = CSRm.tocoo()
k = 0
while (k < len(coo.data)):
# sg = 1 #
sg = math.copysign(1, coo.data[k])
if (coo.data[k] % 2 == 1):
coo.data[k] = 1 * sg
else:
coo.data[k] = 0
k += 1
#mtx = coo_matrix((coo.data, (coo.row, coo.col)), shape=inputShape)
#out = mtx.tocsr()
#return out
return coo.tocsr()
#------------------------------------------------------------------
def csrPredFilter(CSRm, pred):
# can be done in parallel (by rows)
coo = CSRm.tocoo()
triples = [[row,col,val] for row,col,val in zip(coo.row,coo.col,coo.data)
if pred(val)]
i, j, data = TRANS(triples)
CSRm = scipy.sparse.coo_matrix((data,(i,j)),CSRm.shape).tocsr()
return CSRm
#------------------------------------------------------------------
#--topology interface layer----------------------------------------
#------------------------------------------------------------------
#------------------------------------------------------------------
def csrCreateTotalChain(kn):
csrMat = csrCreateFromCoo(cooCreateFromBrc(TRANS([kn*[0]])))
return csrMat
#------------------------------------------------------------------
def csrCreateUnitChain(kn,k):
CSRout = lil_matrix((kn, 1))
CSRout[k,0] = 1
return CSRout.tocsr()
#------------------------------------------------------------------
def csrExtractAllGenerators(CSRm):
listOfListOfNumerals = [csrTranspose(CSRm)[k].tocoo().col.tolist()
for k in xrange(CSRm.shape[1])]
return listOfListOfNumerals
#------------------------------------------------------------------
def csrChainToCellList(CSRm):
coo = CSRm.tocoo()
ListOfInt = [theRow for k,theRow in enumerate(coo.row) if coo.data[k]==1]
return ListOfInt
#------------------------------------------------------------------
#--topology query layer--------------------------------------------
#------------------------------------------------------------------
#------------------------------------------------------------------
def larCellAdjacencies(CSRm):
CSRm = matrixProduct(CSRm,csrTranspose(CSRm))
return CSRm
#------------------------------------------------------------------
def larCellIncidences(CSRm1,CSRm2):
return matrixProduct(CSRm1, csrTranspose(CSRm2))
#------------------------------------------------------------------
# FV = d-chain; EV = (d-1)-chain
def larBoundary(EV,FV):
e = len(EV)
f = len(FV)
v = max(AA(max)(FV))+1
#v = FV[-1][-1]+1 # at least with images ...
csrFV = csrCreate(FV)#,shape=(f,v))
csrEV = csrCreate(EV)#,shape=(e,v))
facetLengths = [csrCell.getnnz() for csrCell in csrEV]
temp = larCellIncidences(csrEV,csrFV)
csrBoundary_2 = csrBoundaryFilter(temp,facetLengths)
return csrBoundary_2
#------------------------------------------------------------------
def larBoundaryChain(csrBoundaryMat,brcCellList):
n,m = csrBoundaryMat.shape
data = scipy.ones(len(brcCellList))
i = brcCellList
j = scipy.zeros(len(brcCellList))
csrChain = coo_matrix((data,(i,j)),shape=(m,1)).tocsr()
csrmat = matrixProduct(csrBoundaryMat,csrChain)
out = csrBinFilter(csrmat)
return out
#------------------------------------------------------------------
def larCoboundaryChain(csrCoBoundaryMat,brcCellList):
m = csrGetNumberOfColumns(csrCoBoundaryMat)
csrChain = sum([csrCreateUnitChain(m,k) for k in brcCellList])
return csrBinFilter(matrixProduct(csrCoBoundaryMat,csrChain))
#------------------------------------------------------------------
#--model geometry layer--------------------------------------------
#--larOp : model -> model------------------------------------------
#------------------------------------------------------------------
# model = (vertices, topology)
#------------------------------------------------------------------
# binary product of cell complexes
def larProduct(models):
model1,model2 = models
V, cells1 = model1
W, cells2 = model2
verts = collections.OrderedDict(); k = 0
for v in V:
for w in W:
vertex = tuple(v+w)
if not verts.has_key(vertex):
verts[vertex] = k
k += 1
cells = [ sorted([verts[tuple(V[v]+W[w])] for v in c1 for w in c2])
for c1 in cells1 for c2 in cells2]
model = AA(list)(verts.keys()), sorted(cells)
return model
#------------------------------------------------------------------
# extrusion of simplicial complexes
# combinatorial algorithm
def cumsum(iterable):
# cumulative addition: list(cumsum(range(4))) => [0, 1, 3, 6]
iterable = iter(iterable)
s = iterable.next()
yield s
for c in iterable:
s = s + c
yield s
def larExtrude(model,pattern):
V,FV = model
d = len(FV[0])
offset = len(V)
m = len(pattern)
outcells = []
for cell in FV:
# create the indices of vertices in the cell "tube"
tube = [v + k*offset for k in xrange(m+1) for v in cell]
# take groups of d+1 elements, via shifting by one
rangelimit = len(tube)-d
cellTube = [tube[k:k+d+1] for k in xrange(rangelimit)]
outcells += [scipy.reshape(cellTube,newshape=(m,d,d+1)).tolist()]
outcells = AA(CAT)(TRANS(outcells))
outcells = [group for k,group in enumerate(outcells) if pattern[k]>0 ]
coords = list(cumsum([0]+(AA(ABS)(pattern))))
outVerts = VERTEXTRUDE((V,coords))
newModel = outVerts, CAT(outcells)
return newModel
def EXTRUDE(args):
model = ([[]],[[0]])
for k,steps in enumerate(args):
model = larExtrude(model,steps*[1])
V,cells = model
verts = AA(list)(scipy.array(V) / AA(float)(args))
return [verts, AA(AA(lambda h:h+1))(cells)]
#------------------------------------------------------------------
# extraction of facets of a cell complex
def setup(model,dim):
V, cells = model
csr = csrCreate(cells)
csrAdjSquareMat = larCellAdjacencies(csr)
csrAdjSquareMat = csrPredFilter(csrAdjSquareMat, GE(dim)) # ? HOWTODO ?
return V,cells,csr,csrAdjSquareMat
def larFacets(model,dim=3):
"""
Estraction of (d-1)-cellFacets from model := (V,d-cells)
Return (V, (d-1)-cellFacets)
"""
V,cells,csr,csrAdjSquareMat = setup(model,dim)
cellFacets = []
# for each input cell i
for i in xrange(len(cells)):
adjCells = csrAdjSquareMat[i].tocoo()
cell1 = csr[i].tocoo().col
pairs = zip(adjCells.col,adjCells.data)
for j,v in pairs:
if (i<j):
cell2 = csr[j].tocoo().col
cell = list(set(cell1).intersection(cell2))
cellFacets.append(sorted(cell))
# sort and remove duplicates
cellFacets = sorted(AA(list)(set(AA(tuple)(cellFacets))))
return V,cellFacets
#------------------------------------------------------------------
# extraction of skeletons of a (grid) cell complex
def larSkeletons (model,dim=3):
"""
Estraction of all skeletons from model := (V,d-cells)
Return (V, [d-cells, (d-1)-cells, ..., 1-cells]) where p-cells is a list_of_lists_of_integers
"""
faces = []
faces.append(model[1])
for p in xrange(dim,0,-1):
model = larFacets(model,dim=p)
faces.append(model[1])
return model[0], REVERSE(faces)
def boundarGrid(model,minPoint,maxPoint):
"""
Build the set of the outerCells of a cuboidal model.
Return a list of (degenerate) d-cells
"""
dim = len(minPoint)
# boundary points extraction
outerCells = [[] for k in xrange(2*dim)]
for n,point in enumerate(model[0]):
for h,coord in enumerate(point):
if coord == minPoint[h]: outerCells[h].append(n)
if coord == maxPoint[h]: outerCells[dim+h].append(n)
return outerCells
def outerVertexTest (bounds):
"""
Look whether v is on the boundary of a unconnected (multi-dim) interval [vmin_0,vmax_0, ... ,vmin_n,vmax_n]
Return a Boolean value
"""
def test0 (v):
return OR(AA(EQ)(CAT(AA(TRANS)(DISTR([bounds,v])))))
return test0
| |
import re
from decimal import Decimal
import pytz
from zope.interface import implementer
from .. import handhistory as hh
from ..hand import Combo, Card
from ..constants import Limit, Game, GameType, MoneyType, Currency, Action
__all__ = ["PKRHandHistory"]
@implementer(hh.IStreet)
class _Street(hh._BaseStreet):
def _parse_cards(self, boardline):
self.cards = (
Card(boardline[6:9:2]),
Card(boardline[11:14:2]),
Card(boardline[16:19:2]),
)
def _parse_actions(self, actionlines):
actions = []
for line in actionlines:
if line.startswith("Pot sizes:"):
self._parse_pot(line)
elif " " in line:
actions.append(hh._PlayerAction(*self._parse_player_action(line)))
else:
raise
self.actions = tuple(actions) if actions else None
def _parse_pot(self, line):
amount_start_index = 12
amount = line[amount_start_index:]
self.pot = Decimal(amount)
def _parse_player_action(self, line):
space_index = line.find(" ")
name = line[:space_index]
end_action_index = line.find(" ", space_index + 1)
# -1 means not found
if end_action_index == -1:
end_action_index = None # until the end
action = Action(line[space_index + 1 : end_action_index])
if end_action_index:
amount_start_index = line.find("$") + 1
amount = line[amount_start_index:]
return name, action, Decimal(amount)
else:
return name, action, None
@implementer(hh.IHandHistory)
class PKRHandHistory(hh._SplittableHandHistoryMixin, hh._BaseHandHistory):
"""Parses PKR hand histories."""
currency = Currency.USD
tournament_ident = None
tournament_name = None
tournament_level = None
_DATE_FORMAT = "%d %b %Y %H:%M:%S"
_TZ = pytz.UTC
_SPLIT_CARD_SPACE = slice(0, 3, 2)
_STREET_SECTIONS = {"flop": 2, "turn": 3, "river": 4}
_split_re = re.compile(r"Dealing |\nDealing Cards\n|Taking |Moving |\n")
_blinds_re = re.compile(r"^Blinds are now \$([\d.]*) / \$([\d.]*)$")
_hero_re = re.compile(r"^\[(. .)\]\[(. .)\] to (?P<hero_name>.*)$")
_seat_re = re.compile(r"^Seat (\d\d?): (.*) - \$([\d.]*) ?(.*)$")
_sizes_re = re.compile(r"^Pot sizes: \$([\d.]*)$")
_card_re = re.compile(r"\[(. .)\]")
_rake_re = re.compile(r"Rake of \$([\d.]*) from pot \d$")
_win_re = re.compile(r"^(.*) wins \$([\d.]*) with: ")
def parse_header(self):
# sections[1] is after blinds, before preflop
# section[2] is before flop
# sections[-1] is before showdown
self._split_raw()
self.table_name = self._splitted[0][6:] # cut off "Table "
self.ident = self._splitted[1][15:] # cut off "Starting Hand #"
self._parse_date(self._splitted[2][20:]) # cut off "Start time of hand: "
self.game = Game(self._splitted[4][11:]) # cut off "Game Type: "
self.limit = Limit(self._splitted[5][12:]) # cut off "Limit Type: "
self.game_type = GameType(self._splitted[6][12:]) # cut off "Table Type: "
match = self._blinds_re.match(self._splitted[8])
self.sb = Decimal(match.group(1))
self.bb = Decimal(match.group(2))
self.buyin = self.bb * 100
def parse(self):
"""Parses the body of the hand history, but first parse header if not yet parsed."""
if not self.header_parsed:
self.parse_header()
self._parse_players()
self._parse_button()
self._parse_hero()
self._parse_preflop()
self._parse_flop()
self._parse_street("turn")
self._parse_street("river")
self._parse_showdown()
self._parse_extra()
self._del_split_vars()
self.parsed = True
def _parse_players(self):
# In hh there is no indication of max_players,
# so init for 10, as there are 10 player tables on PKR.
players = self._init_seats(10)
for line in self._splitted[10:]:
match = self._seat_re.match(line)
if not match:
break
seat_number = int(match.group(1))
players[seat_number - 1] = hh._Player(
name=match.group(2),
stack=Decimal(match.group(3)),
seat=seat_number,
combo=None,
)
self.max_players = seat_number
self.players = players[: self.max_players]
def _parse_button(self):
button_row = self._splitted[self._sections[0] + 1]
# cut last two because there can be 10 seats also
# in case of one digit, the first char will be a space
# but int() can convert it without hiccups :)
button_seat = int(button_row[-2:])
self.button = self.players[button_seat - 1]
def _parse_hero(self):
dealt_row = self._splitted[self._sections[1] + 1]
match = self._hero_re.match(dealt_row)
first = match.group(1)[self._SPLIT_CARD_SPACE]
second = match.group(2)[self._SPLIT_CARD_SPACE]
hero, hero_index = self._get_hero_from_players(match.group("hero_name"))
hero.combo = Combo(first + second)
self.hero = self.players[hero_index] = hero
if self.button.name == self.hero.name:
self.button = self.hero
def _parse_preflop(self):
start = self._sections[1] + 2
stop = self._splitted.index("", start + 1) - 1
self.preflop_actions = tuple(self._splitted[start:stop])
def _parse_flop(self):
flop_section = self._STREET_SECTIONS["flop"]
start = self._sections[flop_section] + 1
stop = next(v for v in self._sections if v > start)
floplines = self._splitted[start:stop]
self.flop = _Street(floplines)
def _parse_street(self, street):
section = self._STREET_SECTIONS[street]
try:
start = self._sections[section] + 1
street_line = self._splitted[start]
cards = [
x[self._SPLIT_CARD_SPACE] for x in self._card_re.findall(street_line)
]
setattr(self, street, Card(cards[0]))
stop = next(v for v in self._sections if v > start) - 1
setattr(self, f"{street}_actions", tuple(self._splitted[start + 1 : stop]))
sizes_line = self._splitted[start - 2]
pot = Decimal(self._sizes_re.match(sizes_line).group(1))
setattr(self, f"{street}_pot", pot)
except IndexError:
setattr(self, street, None)
setattr(self, f"{street}_actions", None)
setattr(self, f"{street}_pot", None)
def _parse_showdown(self):
start = self._sections[-1] + 1
rake_line = self._splitted[start]
match = self._rake_re.match(rake_line)
self.rake = Decimal(match.group(1))
winners = []
total_pot = self.rake
for line in self._splitted[start:]:
if "shows" in line:
self.show_down = True
elif "wins" in line:
match = self._win_re.match(line)
winners.append(match.group(1))
total_pot += Decimal(match.group(2))
self.winners = tuple(winners)
self.total_pot = total_pot
def _parse_extra(self):
self.extra = dict()
self.extra["last_ident"] = self._splitted[3][11:] # cut off "Last Hand #"
self.extra["money_type"] = MoneyType(
self._splitted[7][12:]
) # cut off "Money Type: "
| |
"""Test ZHA Gateway."""
import asyncio
import time
from unittest.mock import patch
import pytest
import zigpy.profiles.zha as zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.lighting as lighting
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.zha.core.group import GroupMember
from homeassistant.components.zha.core.store import TOMBSTONE_LIFETIME
from .common import async_enable_traffic, async_find_group_entity_id, get_zha_gateway
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8"
@pytest.fixture
def zigpy_dev_basic(zigpy_device_mock):
"""Zigpy device with just a basic cluster."""
return zigpy_device_mock(
{
1: {
"in_clusters": [general.Basic.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.ON_OFF_SWITCH,
}
}
)
@pytest.fixture
async def zha_dev_basic(hass, zha_device_restored, zigpy_dev_basic):
"""ZHA device with just a basic cluster."""
zha_device = await zha_device_restored(zigpy_dev_basic)
return zha_device
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
async def test_device_left(hass, zigpy_dev_basic, zha_dev_basic):
"""Device leaving the network should become unavailable."""
assert zha_dev_basic.available is True
get_zha_gateway(hass).device_left(zigpy_dev_basic)
await hass.async_block_till_done()
assert zha_dev_basic.available is False
async def test_gateway_group_methods(hass, device_light_1, device_light_2, coordinator):
"""Test creating a group with 2 members."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_light_1._zha_gateway = zha_gateway
device_light_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_light_1.ieee, device_light_2.ieee]
members = [GroupMember(device_light_1.ieee, 1), GroupMember(device_light_2.ieee, 1)]
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.device.ieee in member_ieee_addresses
entity_id = async_find_group_entity_id(hass, LIGHT_DOMAIN, zha_group)
assert hass.states.get(entity_id) is not None
# test get group by name
assert zha_group == zha_gateway.async_get_group_by_name(zha_group.name)
# test removing a group
await zha_gateway.async_remove_zigpy_group(zha_group.group_id)
await hass.async_block_till_done()
# we shouldn't have the group anymore
assert zha_gateway.async_get_group_by_name(zha_group.name) is None
# the group entity should be cleaned up
assert entity_id not in hass.states.async_entity_ids(LIGHT_DOMAIN)
# test creating a group with 1 member
zha_group = await zha_gateway.async_create_zigpy_group(
"Test Group", [GroupMember(device_light_1.ieee, 1)]
)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 1
for member in zha_group.members:
assert member.device.ieee in [device_light_1.ieee]
# the group entity should not have been cleaned up
assert entity_id not in hass.states.async_entity_ids(LIGHT_DOMAIN)
with patch("zigpy.zcl.Cluster.request", side_effect=asyncio.TimeoutError):
await zha_group.members[0].async_remove_from_group()
assert len(zha_group.members) == 1
for member in zha_group.members:
assert member.device.ieee in [device_light_1.ieee]
async def test_gateway_create_group_with_id(hass, device_light_1, coordinator):
"""Test creating a group with a specific ID."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_light_1._zha_gateway = zha_gateway
zha_group = await zha_gateway.async_create_zigpy_group(
"Test Group", [GroupMember(device_light_1.ieee, 1)], group_id=0x1234
)
await hass.async_block_till_done()
assert len(zha_group.members) == 1
assert zha_group.members[0].device is device_light_1
assert zha_group.group_id == 0x1234
async def test_updating_device_store(hass, zigpy_dev_basic, zha_dev_basic):
"""Test saving data after a delay."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
await async_enable_traffic(hass, [zha_dev_basic])
assert zha_dev_basic.last_seen is not None
entry = zha_gateway.zha_storage.async_get_or_create_device(zha_dev_basic)
assert entry.last_seen == zha_dev_basic.last_seen
assert zha_dev_basic.last_seen is not None
last_seen = zha_dev_basic.last_seen
# test that we can't set None as last seen any more
zha_dev_basic.async_update_last_seen(None)
assert last_seen == zha_dev_basic.last_seen
# test that we won't put None in storage
zigpy_dev_basic.last_seen = None
assert zha_dev_basic.last_seen is None
await zha_gateway.async_update_device_storage()
await hass.async_block_till_done()
entry = zha_gateway.zha_storage.async_get_or_create_device(zha_dev_basic)
assert entry.last_seen == last_seen
# test that we can still set a good last_seen
last_seen = time.time()
zha_dev_basic.async_update_last_seen(last_seen)
assert last_seen == zha_dev_basic.last_seen
# test that we still put good values in storage
await zha_gateway.async_update_device_storage()
await hass.async_block_till_done()
entry = zha_gateway.zha_storage.async_get_or_create_device(zha_dev_basic)
assert entry.last_seen == last_seen
async def test_cleaning_up_storage(hass, zigpy_dev_basic, zha_dev_basic, hass_storage):
"""Test cleaning up zha storage and remove stale devices."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
await async_enable_traffic(hass, [zha_dev_basic])
assert zha_dev_basic.last_seen is not None
await zha_gateway.zha_storage.async_save()
await hass.async_block_till_done()
assert hass_storage["zha.storage"]["data"]["devices"]
device = hass_storage["zha.storage"]["data"]["devices"][0]
assert device["ieee"] == str(zha_dev_basic.ieee)
zha_dev_basic.device.last_seen = time.time() - TOMBSTONE_LIFETIME - 1
await zha_gateway.async_update_device_storage()
await hass.async_block_till_done()
await zha_gateway.zha_storage.async_save()
await hass.async_block_till_done()
assert not hass_storage["zha.storage"]["data"]["devices"]
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cProfile
import functools
import json
import operator
import time
import flask
from oslo_config import cfg
from oslo_log import log as logging
import six
from werkzeug import exceptions
from stackalytics.dashboard import helpers
from stackalytics.dashboard import parameters
from stackalytics.dashboard import vault
from stackalytics.processor import utils
from stackalytics import version as stackalytics_version
LOG = logging.getLogger(__name__)
def _check_param_in(params, name, collection, allow_all=False):
for single in (params.get(name) or []):
single = single.lower()
if allow_all and single == 'all':
continue
if single not in collection:
params[name] = []
flask.abort(404)
def _validate_params(params):
vault_inst = vault.get_vault()
memory_storage_inst = vault.get_memory_storage()
_check_param_in(params, 'release', vault_inst['releases'], True)
_check_param_in(params, 'project_type', vault_inst['project_types_index'])
_check_param_in(params, 'module', vault_inst['module_id_index'])
_check_param_in(params, 'company',
memory_storage_inst.get_companies_lower())
_check_param_in(params, 'user_id', memory_storage_inst.get_user_ids())
_check_param_in(params, 'metric', parameters.METRIC_TO_RECORD_TYPE, True)
def _get_single(params):
if params:
return params[0]
return None
def _prepare_params(kwargs, ignore):
params = kwargs.get('_params')
if not params:
params = {'action': flask.request.path}
for key in parameters.FILTER_PARAMETERS:
params[key] = parameters.get_parameter(kwargs, key, key)
if params['start_date']:
params['start_date'] = [utils.round_timestamp_to_day(
params['start_date'][0])]
if params['end_date']:
params['end_date'] = [utils.round_timestamp_to_day(
params['end_date'][0])]
_validate_params(params)
kwargs['_params'] = params
if ignore:
return dict([(k, v if k not in ignore else [])
for k, v in six.iteritems(params)])
else:
return params
def cached(ignore=None):
def decorator(func):
@functools.wraps(func)
def prepare_params_decorated_function(*args, **kwargs):
params = _prepare_params(kwargs, ignore)
cache_inst = vault.get_vault()['cache']
key = json.dumps(params)
value = cache_inst.get(key)
if not value:
value = func(*args, **kwargs)
cache_inst[key] = value
vault.get_vault()['cache_size'] += len(key) + len(value)
LOG.debug('Cache size: %(size)d, entries: %(len)d',
{'size': vault.get_vault()['cache_size'],
'len': len(cache_inst.keys())})
return value
return prepare_params_decorated_function
return decorator
def record_filter(ignore=None):
def decorator(f):
def _filter_records_by_days(start_date, end_date, memory_storage_inst):
if start_date:
start_date = utils.date_to_timestamp_ext(start_date[0])
else:
start_date = memory_storage_inst.get_first_record_day()
if end_date:
end_date = utils.date_to_timestamp_ext(end_date[0])
else:
end_date = utils.date_to_timestamp_ext('now')
start_day = utils.timestamp_to_day(start_date)
end_day = utils.timestamp_to_day(end_date)
return memory_storage_inst.get_record_ids_by_days(
six.moves.range(start_day, end_day + 1))
def _filter_records_by_modules(memory_storage_inst, mr):
selected = set([])
for m, r in mr:
if r is None:
selected |= memory_storage_inst.get_record_ids_by_modules(
[m])
else:
selected |= (
memory_storage_inst.get_record_ids_by_module_release(
m, r))
return selected
def _intersect(first, second):
if first is not None:
return first & second
return second
@functools.wraps(f)
def record_filter_decorated_function(*args, **kwargs):
memory_storage_inst = vault.get_memory_storage()
record_ids = None
params = _prepare_params(kwargs, ignore)
release = params['release']
if release:
if 'all' not in release:
record_ids = (
memory_storage_inst.get_record_ids_by_releases(
c.lower() for c in release))
project_type = params['project_type']
mr = None
if project_type:
mr = set(vault.resolve_modules(vault.resolve_project_types(
project_type), release))
module = params['module']
if module:
mr = _intersect(mr, set(vault.resolve_modules(
module, release)))
if mr is not None:
record_ids = _intersect(
record_ids, _filter_records_by_modules(
memory_storage_inst, mr))
user_id = params['user_id']
user_id = [u for u in user_id
if vault.get_user_from_runtime_storage(u)]
if user_id:
record_ids = _intersect(
record_ids,
memory_storage_inst.get_record_ids_by_user_ids(user_id))
company = params['company']
if company:
record_ids = _intersect(
record_ids,
memory_storage_inst.get_record_ids_by_companies(company))
metric = params['metric']
if 'all' not in metric:
for metric in metric:
if metric in parameters.METRIC_TO_RECORD_TYPE:
record_ids = _intersect(
record_ids,
memory_storage_inst.get_record_ids_by_types(
parameters.METRIC_TO_RECORD_TYPE[metric]))
blueprint_id = params['blueprint_id']
if blueprint_id:
record_ids = _intersect(
record_ids,
memory_storage_inst.get_record_ids_by_blueprint_ids(
blueprint_id))
start_date = params['start_date']
end_date = params['end_date']
if start_date or end_date:
record_ids = _intersect(
record_ids, _filter_records_by_days(start_date, end_date,
memory_storage_inst))
kwargs['record_ids'] = record_ids
kwargs['records'] = memory_storage_inst.get_records(record_ids)
return f(*args, **kwargs)
return record_filter_decorated_function
return decorator
def incremental_filter(result, record, param_id, context):
result[getattr(record, param_id)]['metric'] += 1
def loc_filter(result, record, param_id, context):
result[getattr(record, param_id)]['metric'] += record.loc
def mark_filter(result, record, param_id, context):
result_by_param = result[getattr(record, param_id)]
value = 0
record_type = record.type
if record_type == 'Code-Review':
result_by_param['metric'] += 1
value = record.value
elif record.type == 'Workflow':
if record.value == 1:
value = 'A'
else:
value = 'WIP'
result_by_param[value] = result_by_param.get(value, 0) + 1
if record.disagreement:
result_by_param['disagreements'] = (
result_by_param.get('disagreements', 0) + 1)
def mark_finalize(record):
new_record = record.copy()
positive = 0
numeric = 0
mark_distribution = []
for key in [-2, -1, 1, 2, 'A']:
if key in record:
if key in [1, 2]:
positive += record[key]
if key in [-2, -1, 1, 2]:
numeric += record[key]
mark_distribution.append(str(record[key]))
else:
mark_distribution.append('0')
new_record[key] = 0
new_record['disagreements'] = record.get('disagreements', 0)
if numeric:
positive_ratio = '%.1f%%' % (
(positive * 100.0) / numeric)
new_record['disagreement_ratio'] = '%.1f%%' % (
(record.get('disagreements', 0) * 100.0) / numeric)
else:
positive_ratio = helpers.INFINITY_HTML
new_record['disagreement_ratio'] = helpers.INFINITY_HTML
new_record['mark_ratio'] = (
'|'.join(mark_distribution) + ' (' + positive_ratio + ')')
new_record['positive_ratio'] = positive_ratio
return new_record
def person_day_filter(result, record, param_id, context):
day = utils.timestamp_to_day(record.date)
# fact that record-days are grouped by days in some order is used
if context.get('last_processed_day') != day:
context['last_processed_day'] = day
context['counted_user_ids'] = set()
user_id = record.user_id
value = getattr(record, param_id)
if user_id not in context['counted_user_ids']:
context['counted_user_ids'].add(user_id)
result[value]['metric'] += 1
def generate_records_for_person_day(record_ids):
memory_storage_inst = vault.get_memory_storage()
id_dates = []
for record in memory_storage_inst.get_records(record_ids):
id_dates.append((record.date, record.record_id))
id_dates.sort(key=operator.itemgetter(0))
for record in memory_storage_inst.get_records(
record_id for date, record_id in id_dates):
yield record
def aggregate_filter():
def decorator(f):
@functools.wraps(f)
def aggregate_filter_decorated_function(*args, **kwargs):
metric_param = (flask.request.args.get('metric') or
parameters.get_default('metric'))
metric = metric_param.lower()
metric_to_filters_map = {
'commits': (None, None),
'loc': (loc_filter, None),
'marks': (mark_filter, mark_finalize),
'emails': (incremental_filter, None),
'bpd': (incremental_filter, None),
'bpc': (incremental_filter, None),
'filed-bugs': (incremental_filter, None),
'resolved-bugs': (incremental_filter, None),
'members': (incremental_filter, None),
'person-day': (person_day_filter, None),
'ci': (None, None),
'patches': (None, None),
}
if metric not in metric_to_filters_map:
metric = parameters.get_default('metric')
kwargs['metric_filter'] = metric_to_filters_map[metric][0]
kwargs['finalize_handler'] = metric_to_filters_map[metric][1]
if metric == 'person-day':
kwargs['records'] = generate_records_for_person_day(
kwargs['record_ids'])
return f(*args, **kwargs)
return aggregate_filter_decorated_function
return decorator
def exception_handler():
def decorator(f):
@functools.wraps(f)
def exception_handler_decorated_function(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
if isinstance(e, exceptions.HTTPException):
raise # ignore Flask exceptions
LOG.exception(e)
flask.abort(404)
return exception_handler_decorated_function
return decorator
def templated(template=None, return_code=200):
def decorator(f):
@functools.wraps(f)
def templated_decorated_function(*args, **kwargs):
vault_inst = vault.get_vault()
template_name = template
if template_name is None:
template_name = (flask.request.endpoint.replace('.', '/') +
'.html')
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
try:
_prepare_params(kwargs, [])
except Exception:
if return_code == 200:
raise # do not re-raise on error page
# put parameters into template
ctx['metric'] = parameters.get_single_parameter(
kwargs, 'metric', use_default=True)
ctx['metric_label'] = parameters.METRIC_LABELS.get(ctx['metric'])
project_type = parameters.get_single_parameter(
kwargs, 'project_type', use_default=True)
ctx['project_type'] = project_type
ctx['project_type_inst'] = vault.get_project_type(project_type)
ctx['release'] = parameters.get_single_parameter(
kwargs, 'release', use_default=True)
company = parameters.get_single_parameter(kwargs, 'company')
ctx['company'] = company
if company:
ctx['company_original'] = (
vault.get_memory_storage().get_original_company_name(
ctx['company']))
module = parameters.get_single_parameter(kwargs, 'module')
ctx['module'] = module
if module and module in vault_inst['module_id_index']:
ctx['module_inst'] = vault_inst['module_id_index'][module]
ctx['user_id'] = parameters.get_single_parameter(kwargs, 'user_id')
if ctx['user_id']:
ctx['user_inst'] = vault.get_user_from_runtime_storage(
ctx['user_id'])
ctx['page_title'] = helpers.make_page_title(
ctx['project_type_inst'],
ctx.get('release'), ctx.get('module_inst'),
ctx.get('company_original'), ctx.get('user_inst'))
ctx['stackalytics_version'] = (
stackalytics_version.version_info.version_string())
ctx['stackalytics_release'] = (
stackalytics_version.version_info.release_string())
update_time = vault_inst['runtime_storage_update_time']
ctx['runtime_storage_update_time'] = update_time
ctx['runtime_storage_update_time_str'] = helpers.format_datetime(
update_time) if update_time else None
# deprecated -- top mentor report
ctx['review_nth'] = parameters.get_single_parameter(
kwargs, 'review_nth')
return flask.render_template(template_name, **ctx), return_code
return templated_decorated_function
return decorator
def jsonify(root='data'):
def decorator(func):
@functools.wraps(func)
def jsonify_decorated_function(*args, **kwargs):
value = func(*args, **kwargs)
if isinstance(value, tuple):
result = dict([(root[i], value[i])
for i in six.moves.range(min(len(value),
len(root)))])
else:
result = {root: value}
return json.dumps(result)
return jsonify_decorated_function
return decorator
def profiler_decorator(func):
@functools.wraps(func)
def profiler_decorated_function(*args, **kwargs):
profiler = None
profile_filename = cfg.CONF.collect_profiler_stats
if profile_filename:
LOG.debug('Profiler is enabled')
profiler = cProfile.Profile()
profiler.enable()
result = func(*args, **kwargs)
if profile_filename:
profiler.disable()
profiler.dump_stats(profile_filename)
LOG.debug('Profiler stats is written to file %s', profile_filename)
return result
return profiler_decorated_function
def response():
def decorator(func):
@functools.wraps(func)
@profiler_decorator
def response_decorated_function(*args, **kwargs):
callback = flask.app.request.args.get('callback', False)
data = func(*args, **kwargs)
if callback:
data = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
else:
mimetype = 'application/json'
resp = flask.current_app.response_class(data, mimetype=mimetype)
update_time = vault.get_vault()['vault_next_update_time']
now = utils.date_to_timestamp('now')
if now < update_time:
max_age = update_time - now
else:
max_age = 0
resp.headers['cache-control'] = 'public, max-age=%d' % (max_age,)
resp.headers['expires'] = time.strftime(
'%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(vault.get_vault()['vault_next_update_time']))
resp.headers['access-control-allow-origin'] = '*'
return resp
return response_decorated_function
return decorator
def query_filter(query_param='query'):
def decorator(f):
@functools.wraps(f)
def query_filter_decorated_function(*args, **kwargs):
query = flask.request.args.get(query_param)
if query:
kwargs['query_filter'] = lambda x: x.lower().find(query) >= 0
else:
kwargs['query_filter'] = lambda x: True
return f(*args, **kwargs)
return query_filter_decorated_function
return decorator
| |
"""
SQLite database backend
Store traces from tallyable objects in individual SQL tables.
Implementation Notes
--------------------
For each object, a table is created with the following format:
key (INT), trace (INT), v1 (FLOAT), v2 (FLOAT), v3 (FLOAT) ...
For multidimensional objects, ndim>1, eg (2,2) the table has the following format:
key (INT), trace (INT), v1_1 (FLOAT), v1_2 (FLOAT), v2_1 (FLOAT), v2_2 (FLOAT)
The key is autoincremented each time a new row is added to the table.
trace denotes the chain index, and starts at 0.
Additional Dependencies
-----------------------
sqlite3 <http://www.sqlite.org>
Changeset
---------
Created by Chris Fonnesbeck on 2007-02-01.
Updated by DH on 2007-04-04.
DB API changes, October 2008, DH.
Added support for multidimensional arrays, DH Oct. 2009
"""
# TODO: Add support for integer valued objects.
import numpy as np
from numpy import zeros, shape, squeeze, transpose
import sqlite3
import base, pickle, ram, pymc
import pdb,os
from pymc.database import base
__all__ = ['Trace', 'Database', 'load']
class Trace(base.Trace):
"""SQLite Trace class."""
def _initialize(self, chain, length):
"""Create an SQL table.
"""
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name]
# Determine size
try:
self._shape = np.shape(self._getfunc())
except TypeError:
self._shape = None
self._vstr = ', '.join(var_str(self._shape))
# If the table already exists, exit now.
if chain != 0:
return
# Create the variable name strings.
vstr = ', '.join(v + ' FLOAT' for v in var_str(self._shape))
query = "create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s )" % (self.name, vstr)
self.db.cur.execute(query)
def tally(self, chain):
"""Adds current value to trace."""
try:
# I changed str(x) to '%f'%x to solve a bug appearing due to
# locale settings. In french for instance, str prints a comma
# instead of a colon to indicate the decimal, which confuses
# the database into thinking that there are more values than there
# is. A better solution would be to use another delimiter than the
# comma. -DH
valstring = ', '.join(['%f'%x for x in np.ravel(self._getfunc())])
except:
valstring = str(self._getfunc())
# Add value to database
query = "INSERT INTO %s (recid, trace, %s) values (NULL, %s, %s)" % \
(self.name, self._vstr, chain, valstring)
self.db.cur.execute(query)
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all
chains. By default, the last chain is returned.
- slicing: A slice, overriding burn and thin assignement.
"""
if not slicing:
slicing = slice(burn, None, thin)
# If chain is None, get the data from all chains.
if chain is None:
self.db.cur.execute('SELECT * FROM %s' % self.name)
trace = self.db.cur.fetchall()
else:
# Deal with negative chains (starting from the end)
if chain < 0:
chain = range(self.db.chains)[chain]
self.db.cur.execute('SELECT * FROM %s WHERE trace=%s' % (self.name, chain))
trace = self.db.cur.fetchall()
trace = np.array(trace)[:,2:]
if len(self._shape) > 1:
trace = trace.reshape(-1, *self._shape)
return squeeze(trace[slicing])
__call__ = gettrace
# def nchains(self):
# """Return the number of existing chains, completed or not."""
# try:
# self.db.cur.execute('SELECT MAX(trace) FROM %s'%self.name)
# trace = self.db.cur.fetchall()[0][0]
#
# if trace is None:
# return 0
# else:
# return trace + 1
# except:
# return 0
def length(self, chain=-1):
"""Return the sample length of given chain. If chain is None,
return the total length of all chains."""
return len(self.gettrace(chain=chain))
class Database(base.Database):
"""SQLite database.
"""
def __init__(self, dbname, dbmode='a'):
"""Open or create an SQL database.
:Parameters:
dbname : string
The name of the database file.
dbmode : {'a', 'w'}
File mode. Use `a` to append values, and `w` to overwrite
an existing file.
"""
self.__name__ = 'sqlite'
self.dbname = dbname
self.__Trace__ = Trace
self.trace_names = [] # A list of sequences of names of the objects to tally.
self._traces = {} # A dictionary of the Trace objects.
self.chains = 0
if os.path.exists(dbname) and dbmode=='w':
os.remove(dbname)
self.DB = sqlite3.connect(dbname, check_same_thread=False)
self.cur = self.DB.cursor()
def commit(self):
"""Commit updates to database"""
self.DB.commit()
def close(self, *args, **kwds):
"""Close database."""
self.cur.close()
self.commit()
self.DB.close()
# TODO: Code savestate and getstate to enable storing of the model's state.
# state is a dictionary with two keys: sampler and step_methods.
# state['sampler'] is another dictionary containing information about
# the sampler's state (_current_iter, _iter, _burn, etc.)
# state['step_methods'] is a dictionary with keys refering to ids for
# each samplingmethod defined in sampler.
# Each id refers to another dictionary containing the state of the
# step method.
# To do this efficiently, we would need functions that stores and retrieves
# a dictionary to and from a sqlite database. Unfortunately, I'm not familiar with
# SQL enough to do that without having to read too much SQL documentation
# for my taste.
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
StepMethods."""
pass
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
StepMethods."""
return {}
def load(dbname):
"""Load an existing SQLite database.
Return a Database instance.
"""
db = Database(dbname)
# Get the name of the objects
tables = get_table_list(db.cur)
# Create a Trace instance for each object
chains = 0
for name in tables:
db._traces[name] = Trace(name=name, db=db)
db._traces[name]._shape = get_shape(db.cur, name)
setattr(db, name, db._traces[name])
db.cur.execute('SELECT MAX(trace) FROM %s'%name)
chains = max(chains, db.cur.fetchall()[0][0]+1)
db.chains=chains
db.trace_names = chains * [tables,]
db._state_ = {}
return db
# Copied form Django.
def get_table_list(cursor):
"""Returns a list of table names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_shape(cursor, name):
"""Return the shape of the table ``name``."""
cursor.execute('select * from %s'% name)
inds = cursor.description[-1][0][1:].split('_')
return tuple([int(i) for i in inds])
def var_str(shape):
"""Return a sequence of strings naming the element of the tallyable object.
:Examples:
>>> var_str((5,))
['v1', 'v2', 'v3', 'v4', 'v5']
>>> var_str((2,2))
['v1_1', 'v1_2', 'v2_1', 'v2_2']
"""
if shape in [None, ()]:
return ['v1',]
size = np.prod(shape)
indices = (np.indices(shape) + 1).reshape(-1, size)
return ['v'+'_'.join(map(str, i)) for i in zip(*indices)]
| |
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
import scipy.special as sp
from .. import families as fam
from .. import tsm as tsm
from .. import data_check as dc
from .scores import *
from .gas_core_recursions import gas_recursion
class GASRank(tsm.TSM):
""" Inherits time series methods from TSM class.
**** GENERALIZED AUTOREGRESSIVE SCORE (GAS) RANK MODELS ****
Parameters
----------
data : pd.DataFrame
Field to specify the univariate time series data that will be used.
team_1 : str (pd.DataFrame)
Specifies which column contains the home team
team_2 : str (pd.DataFrame)
Specifies which column contains the away team
family : GAS family object
Which distribution to use, e.g. GASNormal()
score_diff : str (pd.DataFrame)
Specifies which column contains the score
gradient_only : Boolean (default: True)
If true, will only use gradient rather than second-order terms
to construct the modified score.
"""
def __init__(self, data, team_1, team_2, family, score_diff, gradient_only=False):
# Initialize TSM object
super(GASRank,self).__init__('GASRank')
self.gradient_only = gradient_only
self.z_no = 2
self.max_lag = 0
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
self.home_id, self.away_id = self._create_ids(data[team_1].values,data[team_2].values)
self.team_strings = sorted(list(set(np.append(data[team_1].values,data[team_2].values))))
self.team_dict = dict(zip(self.team_strings, range(len(self.team_strings))))
self.home_count, self.away_count = self._match_count()
self.max_team = max(np.max(self.home_id),np.max(self.away_id))
self.original_dataframe = data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data, score_diff)
self.data = self.data.astype(np.float)
self.data_original = self.data.copy()
self.data_length = self.data.shape[0]
self._create_latent_variables()
self.family = family
self.model_name2, self.link, self.scale, self.shape, self.skewness, self.mean_transform, _ = self.family.setup()
self.model_name = self.model_name2 + "GAS Rank "
for no, i in enumerate(self.family.build_latent_variables()):
self.latent_variables.add_z(i[0],i[1],i[2])
self.latent_variables.z_list[2+no].start = i[3]
self.latent_variables.z_list[0].start = self.mean_transform(np.mean(self.data))
self._model = self._model_one_components
self._model_abilities = self._model_abilities_one_components
self.plot_abilities = self.plot_abilities_one_components
self.predict = self.predict_one_component
self.family_z_no = len(self.family.build_latent_variables())
self.z_no = len(self.latent_variables.z_list)
def _create_ids(self, home_teams, away_teams):
"""
Creates IDs for both players/teams
"""
categories = pd.Categorical(np.append(home_teams,away_teams))
home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1]
return home_id, away_id
def _match_count(self):
home_count, away_count = np.zeros(len(self.home_id)), np.zeros(len(self.away_id))
for t in range(0,len(home_count)):
home_count[t] = len(self.home_id[0:t+1][self.home_id[0:t+1]==self.home_id[t]]) + len(self.away_id[0:t+1][self.away_id[0:t+1]==self.home_id[t]])
away_count[t] = len(self.home_id[0:t+1][self.home_id[0:t+1]==self.away_id[t]]) + len(self.away_id[0:t+1][self.away_id[0:t+1]==self.away_id[t]])
return home_count, away_count
def _match_count_2(self):
home_count, away_count = np.zeros(len(self.home_2_id)), np.zeros(len(self.away_2_id))
for t in range(0,len(home_count)):
home_count[t] = len(self.home_2_id[0:t+1][self.home_2_id[0:t+1]==self.home_2_id[t]]) + len(self.away_2_id[0:t+1][self.away_2_id[0:t+1]==self.home_2_id[t]])
away_count[t] = len(self.home_2_id[0:t+1][self.home_2_id[0:t+1]==self.away_2_id[t]]) + len(self.away_2_id[0:t+1][self.away_2_id[0:t+1]==self.away_2_id[t]])
return home_count, away_count
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Constant', fam.Normal(0,10,transform=None), fam.Normal(0,3))
self.latent_variables.add_z('Ability Scale', fam.Normal(0,1,transform=None), fam.Normal(0,3))
def _get_scale_and_shape(self,parm):
""" Obtains appropriate model scale and shape latent variables
Parameters
----------
parm : np.array
Transformed latent variable vector
Returns
----------
None (changes model attributes)
"""
if self.scale is True:
if self.shape is True:
model_shape = parm[-1]
model_scale = parm[-2]
else:
model_shape = 0
model_scale = parm[-1]
else:
model_scale = 0
model_shape = 0
if self.skewness is True:
model_skewness = parm[-3]
else:
model_skewness = 0
return model_scale, model_shape, model_skewness
def _get_scale_and_shape_sim(self, transformed_lvs):
""" Obtains model scale, shape, skewness latent variables for
a 2d array of simulations.
Parameters
----------
transformed_lvs : np.array
Transformed latent variable vector (2d - with draws of each variable)
Returns
----------
- Tuple of np.arrays (each being scale, shape and skewness draws)
"""
if self.scale is True:
if self.shape is True:
model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
else:
model_shape = np.zeros(transformed_lvs.shape[1])
model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
else:
model_scale = np.zeros(transformed_lvs.shape[1])
model_shape = np.zeros(transformed_lvs.shape[1])
if self.skewness is True:
model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
else:
model_skewness = np.zeros(transformed_lvs.shape[1])
return model_scale, model_shape, model_skewness
def _model_one_components(self,beta):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
scale, shape, skewness = self._get_scale_and_shape(parm)
state_vectors = np.zeros(shape=(self.max_team+1))
theta = np.zeros(shape=(self.data.shape[0]))
for t in range(0,self.data.shape[0]):
theta[t] = parm[0] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]]
state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
return theta, self.data, state_vectors
def _model_two_components(self,beta):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
scale, shape, skewness = self._get_scale_and_shape(parm)
state_vectors_1 = np.zeros(shape=(self.max_team+1))
state_vectors_2 = np.zeros(shape=(self.max_team_2+1))
theta = np.zeros(shape=(self.data.shape[0]))
for t in range(0,self.data.shape[0]):
theta[t] = parm[0] + state_vectors_2[self.home_2_id[t]] - state_vectors_2[self.away_2_id[t]] + state_vectors_1[self.home_id[t]] - state_vectors_1[self.away_id[t]]
state_vectors_1[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_1[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.home_2_id[t]] += parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.away_2_id[t]] += -parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
return theta, self.data, state_vectors_1
def _model_abilities_one_components(self,beta):
""" Creates the structure of the model - store abilities
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
scale, shape, skewness = self._get_scale_and_shape(parm)
state_vectors = np.zeros(shape=(self.max_team+1))
state_vectors_store = np.zeros(shape=(int(np.max(self.home_count)+50),int(self.max_team+1)))
theta = np.zeros(shape=(self.data.shape[0]))
for t in range(0,self.data.shape[0]):
theta[t] = parm[0] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]]
state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store[int(self.home_count[t]), self.home_id[t]] = state_vectors_store[max(0,int(self.home_count[t])-1), self.home_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store[int(self.away_count[t]), self.away_id[t]] = state_vectors_store[max(0,int(self.away_count[t])-1), self.away_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
return state_vectors_store
def _model_abilities_two_components(self,beta):
""" Creates the structure of the model - store abilities
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
scale, shape, skewness = self._get_scale_and_shape(parm)
state_vectors = np.zeros(shape=(self.max_team+1))
state_vectors_2 = np.zeros(shape=(self.max_team_2+1))
state_vectors_store_1 = np.zeros(shape=(int(np.max(self.home_count)+50),int(self.max_team+1)))
state_vectors_store_2 = np.zeros(shape=(int(np.max(self.home_2_count)+50),int(self.max_team_2+1)))
theta = np.zeros(shape=(self.data.shape[0]))
for t in range(0,self.data.shape[0]):
theta[t] = parm[0] + state_vectors_2[self.home_2_id[t]] - state_vectors_2[self.away_2_id[t]] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]]
state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.home_2_id[t]] += parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.away_2_id[t]] += -parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_1[int(self.home_count[t]), self.home_id[t]] = state_vectors_store_1[max(0,int(self.home_count[t])-1), self.home_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_1[int(self.away_count[t]), self.away_id[t]] = state_vectors_store_1[max(0,int(self.away_count[t])-1), self.away_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_2[int(self.home_2_count[t]), self.home_2_id[t]] = state_vectors_store_2[max(0,int(self.home_2_count[t])-1), self.home_2_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_2[int(self.away_2_count[t]), self.away_2_id[t]] = state_vectors_store_2[max(0,int(self.away_2_count[t])-1), self.away_2_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
return state_vectors_store_1, state_vectors_store_2
def add_second_component(self, team_1, team_2):
self.home_2_id, self.away_2_id = self._create_ids(self.original_dataframe[team_1].values,self.original_dataframe[team_2].values)
self.team_strings_2 = sorted(list(set(np.append(self.original_dataframe[team_1].values,self.original_dataframe[team_2].values))))
self.team_dict_2 = dict(zip(self.team_strings_2, range(len(self.team_strings_2))))
self.home_2_count, self.away_2_count = self._match_count_2()
self.max_team_2 = max(np.max(self.home_2_id),np.max(self.away_2_id))
self.z_no += 1
self.latent_variables.z_list = []
self.latent_variables.add_z('Constant', fam.Normal(0, 10,transform=None), fam.Normal(0,3))
self.latent_variables.add_z('Ability Scale 1', fam.Normal(0,1,transform=None), fam.Normal(0,3))
self.latent_variables.add_z('Ability Scale 2', fam.Normal(0,1,transform=None), fam.Normal(0,3))
for no, i in enumerate(self.family.build_latent_variables()):
self.latent_variables.add_z(i[0],i[1],i[2])
self.latent_variables.z_list[3+no].start = i[3]
self.latent_variables.z_list[0].start = self.mean_transform(np.mean(self.data))
self._model = self._model_two_components
self._model_abilities = self._model_abilities_two_components
self.plot_abilities = self.plot_abilities_two_components
self.predict = self.predict_two_components
def neg_loglik(self, beta):
theta, Y, _ = self._model(beta)
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm)
return self.family.neg_loglikelihood(Y,self.link(theta),model_scale,model_shape,model_skewness)
def plot_abilities_one_components(self, team_ids, **kwargs):
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(15,5))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
plt.figure(figsize=figsize)
if type(team_ids) == type([]):
if type(team_ids[0]) == str:
for team_id in team_ids:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_id]],
trim='b'), label=self.team_strings[self.team_dict[team_id]])
else:
for team_id in team_ids:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_id],
trim='b'), label=self.team_strings[team_id])
else:
if type(team_ids) == str:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_ids]],
trim='b'), label=self.team_strings[self.team_dict[team_ids]])
else:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_ids],
trim='b'), label=self.team_strings[team_ids])
plt.legend()
plt.ylabel("Power")
plt.xlabel("Games")
plt.show()
def plot_abilities_two_components(self, team_ids, component_id=0, **kwargs):
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(15,5))
if component_id == 0:
name_strings = self.team_strings
name_dict = self.team_dict
else:
name_strings = self.team_strings_2
name_dict = self.team_dict_2
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
plt.figure(figsize=figsize)
if type(team_ids) == type([]):
if type(team_ids[0]) == str:
for team_id in team_ids:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[name_dict[team_id]],
trim='b'), label=name_strings[name_dict[team_id]])
else:
for team_id in team_ids:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[team_id],
trim='b'), label=name_strings[team_id])
else:
if type(team_ids) == str:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[name_dict[team_ids]],
trim='b'), label=name_strings[name_dict[team_ids]])
else:
plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[team_ids],
trim='b'), label=name_strings[team_ids])
plt.legend()
plt.ylabel("Power")
plt.xlabel("Games")
plt.show()
def predict_one_component(self, team_1, team_2, neutral=False):
"""
Returns team 1's probability of winning
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
if type(team_1) == str:
team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_1]], trim='b')[-1]
team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_2]], trim='b')[-1]
else:
team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_1], trim='b')[-1]
team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_2], trim='b')[-1]
t_z = self.transform_z()
if neutral is False:
return self.link(t_z[0] + team_1_ability - team_2_ability)
else:
return self.link(team_1_ability - team_2_ability)
def predict_two_components(self, team_1, team_2, team_1b, team_2b, neutral=False):
"""
Returns team 1's probability of winning
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
if type(team_1) == str:
team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[self.team_dict[team_1]], trim='b')[-1]
team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[self.team_dict[team_2]], trim='b')[-1]
team_1_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[self.team_dict[team_1]], trim='b')[-1]
team_2_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[self.team_dict[team_2]], trim='b')[-1]
else:
team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[team_1], trim='b')[-1]
team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[team_2], trim='b')[-1]
team_1_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[team_1_b], trim='b')[-1]
team_2_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[team_2_b], trim='b')[-1]
t_z = self.transform_z()
if neutral is False:
return self.link(t_z[0] + team_1_ability - team_2_ability + team_1_b_ability - team_2_b_ability)
else:
return self.link(team_1_ability - team_2_ability + team_1_b_ability - team_2_b_ability)
| |
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp NFS storage driver
"""
import os
import copy
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
@ddt.ddt
class NetAppNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppNfsDriverTestCase, self).setUp()
configuration = mock.Mock()
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.reserved_percentage = 0
configuration.max_over_subscription_ratio = 1.1
kwargs = {'configuration': configuration}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.db = mock.Mock()
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
def test_get_share_capacity_info(self):
mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info')
mock_get_capacity.return_value = fake.CAPACITY_VALUES
expected_total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
expected_free_capacity_gb = (na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01'))
expected_reserved_percentage = round(
self.driver.configuration.reserved_percentage)
result = self.driver._get_share_capacity_info(fake.NFS_SHARE)
self.assertEqual(expected_total_capacity_gb,
result['total_capacity_gb'])
self.assertEqual(expected_free_capacity_gb,
result['free_capacity_gb'])
self.assertEqual(expected_reserved_percentage,
round(result['reserved_percentage']))
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITIES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(flexvol_path=fake.EXPORT_PATH)])
def test_get_capacity_info_ipv6_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITIES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(flexvol_path=fake.EXPORT_PATH)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.mock_object(self.driver, '_ensure_shares_mounted')
self.assertRaises(exception.InvalidHost,
self.driver.create_volume,
volume)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
def test_create_volume_from_snapshot(self):
provider_location = fake.POOL_NAME
snapshot = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
snapshot)
self.assertEqual(provider_location, result)
def test_clone_source_to_destination_volume(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume')
expected = {'provider_location': fake.POOL_NAME}
result = self.driver._clone_source_to_destination_volume(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
self.assertEqual(expected, result)
def test_clone_source_to_destination_volume_with_do_qos_exception(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock(
side_effect=Exception))
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._clone_source_to_destination_volume,
fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
def test_clone_with_extension_check_equal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size']
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(0, mock_extend_volume.call_count)
def test_clone_with_extension_check_unequal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(1, mock_extend_volume.call_count)
def test_clone_with_extension_check_extend_exception(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
mock_extend_volume.side_effect = Exception
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
clone_source,
fake.NFS_VOLUME)
self.assertEqual(1, mock_cleanup.call_count)
def test_clone_with_extension_check_no_discovery(self):
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
self.mock_object(self.driver, '_set_rw_permissions')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = False
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
fake.CLONE_SOURCE,
fake.NFS_VOLUME)
def test_create_cloned_volume(self):
provider_location = fake.POOL_NAME
src_vref = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
src_vref)
self.assertEqual(provider_location, result)
def test_do_qos_for_volume(self):
self.assertRaises(NotImplementedError,
self.driver._do_qos_for_volume,
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
def test_create_snapshot(self):
mock_clone_backing_file_for_volume = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
self.driver.create_snapshot(fake.SNAPSHOT)
mock_clone_backing_file_for_volume.assert_called_once_with(
fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'],
fake.SNAPSHOT['volume_id'], is_snapshot=True)
def test_cleanup_volume_on_failure(self):
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
mock_local_path = self.mock_object(self.driver, 'local_path')
mock_local_path.return_value = path
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = True
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
mock_delete.assert_has_calls([mock.call(path)])
def test_cleanup_volume_on_failure_no_path(self):
self.mock_object(self.driver, 'local_path')
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = False
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
self.assertEqual(0, mock_delete.call_count)
def test_get_export_ip_path_volume_id_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_ip_path_share_provided(self):
expected = (fake.SHARE_IP, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(share=fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_volume_id_and_share_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(
fake.VOLUME_ID, fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_no_args(self):
self.assertRaises(exception.InvalidInput,
self.driver._get_export_ip_path)
def test_get_host_ip(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.SHARE_IP
result = self.driver._get_host_ip(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_path(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.EXPORT_PATH
result = self.driver._get_export_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_extend_volume(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.driver.extend_volume(fake.VOLUME, new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_extend_volume_resize_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(
self.driver, '_resize_image_file',
mock.Mock(side_effect=netapp_api.NaApiError))
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
self.assertFalse(mock_get_volume_extra_specs.called)
self.assertFalse(mock_do_qos_for_volume.called)
def test_extend_volume_qos_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(
self.driver, '_do_qos_for_volume',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_is_share_clone_compatible(self):
self.assertRaises(NotImplementedError,
self.driver._is_share_clone_compatible,
fake.NFS_VOLUME,
fake.NFS_SHARE)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
def test_share_has_space_for_clone(self, size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
with mock.patch.object(self.driver,
'_get_capacity_info',
return_value=(
total_bytes, available_bytes)):
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
@mock.patch.object(nfs_base.NetAppNfsDriver, '_get_capacity_info')
def test_share_has_space_for_clone2(self,
mock_get_capacity,
size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
mock_get_capacity.return_value = (total_bytes, available_bytes)
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
| |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import floating_ips
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance': {'uuid': FAKE_UUID}}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id):
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return {
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'}
def stub_nw_info(stubs):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(stubs)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTest(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(FloatingIpTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips'])
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = floating_ips.FloatingIPController()
self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
self.controller._normalize_ip(floating_ip)
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
self.controller._normalize_ip(floating_ip)
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_floating_ips_list(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
res_dict = self.controller.index(req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/9876')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 202)
def test_floating_ip_show(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/9876')
res = req.get_response(fakes.wsgi_app(init_only=('os-floating-ips',)))
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req)
self.assertIn('No more floating ips', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, {'pool': 'non_existant_pool'})
self.assertIn('No more floating ips in pool non_existant_pool',
ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
res_dict = self.controller.create(req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
self.controller.delete(req, 1)
def test_floating_ip_associate(self):
fixed_address = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertTrue(rsp.status_int == 202)
def test_not_extended_floating_ip_associate_fixed(self):
# Check that fixed_address is ignored if os-extended-floating-ips
# is not loaded
fixed_address_requested = '192.168.1.101'
fixed_address_allocated = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address_allocated,
kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address_requested))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertTrue(rsp.status_int == 202)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.NotAuthorized()
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(fakes.wsgi_app(init_only=('servers',)))
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['message'],
"floating ip not found")
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
self.assertTrue(rsp.status_int == 202)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, wrong_uuid, body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'wrong_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance, address):
raise exception.NotAuthorized()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
class ExtendedFloatingIpTest(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(ExtendedFloatingIpTest, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = floating_ips.FloatingIPController()
self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTest, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertTrue(rsp.status_int == 202)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(fakes.wsgi_app(init_only=('servers',)))
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Specified fixed address not assigned to instance")
class FloatingIpSerializerTest(test.TestCase):
def test_default_serializer(self):
serializer = floating_ips.FloatingIPTemplate()
text = serializer.serialize(dict(
floating_ip=dict(
instance_id=1,
ip='10.10.10.10',
fixed_ip='10.0.0.1',
id=1)))
tree = etree.fromstring(text)
self.assertEqual('floating_ip', tree.tag)
self.assertEqual('1', tree.get('instance_id'))
self.assertEqual('10.10.10.10', tree.get('ip'))
self.assertEqual('10.0.0.1', tree.get('fixed_ip'))
self.assertEqual('1', tree.get('id'))
def test_index_serializer(self):
serializer = floating_ips.FloatingIPsTemplate()
text = serializer.serialize(dict(
floating_ips=[
dict(instance_id=1,
ip='10.10.10.10',
fixed_ip='10.0.0.1',
id=1),
dict(instance_id=None,
ip='10.10.10.11',
fixed_ip=None,
id=2)]))
tree = etree.fromstring(text)
self.assertEqual('floating_ips', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('floating_ip', tree[0].tag)
self.assertEqual('floating_ip', tree[1].tag)
self.assertEqual('1', tree[0].get('instance_id'))
self.assertEqual('None', tree[1].get('instance_id'))
self.assertEqual('10.10.10.10', tree[0].get('ip'))
self.assertEqual('10.10.10.11', tree[1].get('ip'))
self.assertEqual('10.0.0.1', tree[0].get('fixed_ip'))
self.assertEqual('None', tree[1].get('fixed_ip'))
self.assertEqual('1', tree[0].get('id'))
self.assertEqual('2', tree[1].get('id'))
| |
#!/usr/bin/env python3
import argparse
import datetime
import glob
import inspect
import json
import os
import pandas
import subprocess
import sys
import codespeed_upload
def get_script_dir():
return os.path.dirname(inspect.getabsfile(get_script_dir))
SCRIPTDIR = get_script_dir()
DEFAULT_REPO = os.path.join(SCRIPTDIR, 'FStar') ## TODO: what should this be
DEFAULT_BENCHMARK_RUN_SCRIPT = './bin/run_benchmark.sh -c' ## TODO: what should this be
DEFAULT_BRANCH = 'master'
ENVIRONMENT = 'bench_machine'
CODESPEED_URL = 'http://localhost:8070/'
parser = argparse.ArgumentParser(description='Run FStar benchmarks across multiple git commits')
parser.add_argument('outdir', type=str, help='directory of output')
parser.add_argument('--benchmark_run_script', type=str, help='benchmark run script', default=DEFAULT_BENCHMARK_RUN_SCRIPT)
parser.add_argument('--repo', type=str, help='local location of Fstar repo (default: %s)'%DEFAULT_REPO, default=DEFAULT_REPO)
parser.add_argument('--repo_branch', type=str, help='git branch for the compiler (default: %s)'%DEFAULT_BRANCH, default=DEFAULT_BRANCH)
parser.add_argument('--repo_pull', action='store_true', help="do a pull on the git repo before selecting hashes", default=False)
parser.add_argument('--repo_reset_hard', action='store_true', help="after pulling a branch, reset it hard to the origin. Can need this for remote branches where they have been force pushed", default=False)
parser.add_argument('--commit_after', type=str, help='select commits after the specified date (e.g. 2017-10-02)', default=None)
parser.add_argument('--commit_before', type=str, help='select commits before the specified date (e.g. 2017-10-02)', default=None)
parser.add_argument('--max_hashes', type=int, help='maximum_number of hashes to process', default=16)
parser.add_argument('--benchmark_hook_patch', type=str, help='Patch to try if we don\'t have the benchmark hooks', default=None)
parser.add_argument('--benchmark_no_cleanup', action='store_true', default=False)
parser.add_argument('--run_stages', type=str, help='stages to run', default='bench,upload')
parser.add_argument('--environment', type=str, help='environment tag for run (default: %s)'%ENVIRONMENT, default=ENVIRONMENT)
parser.add_argument('--codespeed_url', type=str, help='codespeed URL for upload', default=CODESPEED_URL)
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
if args.verbose:
print('executing: %s'%' '.join(sys.argv))
def shell_exec(cmd, verbose=args.verbose, check=False, stdout=None, stderr=None):
if verbose:
print('+ %s'%cmd)
return subprocess.run(cmd, shell=True, check=check, stdout=stdout, stderr=stderr)
def get_git_hashes(args):
old_cwd = os.getcwd()
repo_path = os.path.abspath(args.repo)
if args.verbose: print('using repo: %s'%repo_path)
os.chdir(repo_path)
shell_exec('git checkout %s'%args.repo_branch)
if args.repo_pull:
if args.repo_reset_hard:
shell_exec('git fetch')
shell_exec('git reset --hard origin/%s'%args.repo_branch)
shell_exec('git pull')
# git date notes:
# https://docs.microsoft.com/en-us/azure/devops/repos/git/git-dates?view=azure-devops
commit_xtra_args = ' --date=local'
if args.commit_after:
commit_xtra_args += ' --after %s'%args.commit_after
if args.commit_before:
commit_xtra_args += ' --before %s'%args.commit_before
proc_output = shell_exec('git log %s --pretty=format:\'%%H %%s\' %s | grep "\\[CI\\] regenerate hints + ocaml snapshot"'%(commit_xtra_args, args.repo_branch), stdout=subprocess.PIPE)
hash_comments = proc_output.stdout.decode('utf-8').split('\n')[::-1]
hash_comments = filter(bool, hash_comments) # remove empty strings
hashes = [hc.split(' ')[0] for hc in hash_comments]
if args.verbose:
for hc in hash_comments:
print(hc)
hashes = [ h for h in hashes if h ] # filter any null hashes
hashes = hashes[-args.max_hashes:]
os.chdir(old_cwd)
return hashes
def parse_and_format_results_for_upload(fname, bench_name_prefix=''):
bench_data = []
with open(fname) as f:
for l in f:
raw_data = json.loads(l)
bench_data.append({
'name': raw_data['name'],
'time_secs': raw_data['time_secs'],
'user_time_secs': raw_data['user_time_secs'],
'gc.minor_collections': raw_data['gc']['minor_collections'],
'gc.major_collections': raw_data['gc']['major_collections'],
'gc.compactions': raw_data['gc'].get('compactions', 0),
})
if not bench_data:
print('WARN: Failed to find any data in %s'%fname)
return []
bench_data = pandas.DataFrame(bench_data)
aggregated_data = bench_data.groupby('name').apply(lambda x: x.describe().T)
aggregated_data.index.set_names(['bench_name', 'bench_metric'], inplace=True)
upload_data = []
for bench_name in aggregated_data.index.levels[0]:
# TODO: how to make this configurable
metric_name, metric_units, metric_units_title = ('time_secs', 'seconds', 'Time')
results = aggregated_data.loc[(bench_name, 'time_secs')]
upload_data.append({
'commitid': h[:7],
'commitid_long': h,
'project': 'fstar_%s'%args.repo_branch,
'branch': args.repo_branch,
'executable': 'fstar',
'executable_description': 'fstar (%s)'%args.repo_branch,
'environment': args.environment,
'benchmark': bench_name_prefix + bench_name,
'units': metric_units,
'units_title': metric_units_title,
'result_value': results['mean'],
'min': results['min'],
'max': results['max'],
'std_dev': results['std'],
})
return upload_data
run_timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
run_stages = args.run_stages.split(',')
if args.verbose: print('will run stages: %s'%run_stages)
## setup directory
outdir = os.path.abspath(args.outdir)
if args.verbose: print('making directory: %s'%outdir)
shell_exec('mkdir -p %s'%outdir)
## generate list of hash commits
hashes = get_git_hashes(args)
if args.verbose:
print('Found %d hashes'%len(hashes))
verbose_args = ' -v' if args.verbose else ''
for h in hashes:
os.chdir(outdir)
hashdir = os.path.join(outdir, h)
if args.verbose: print('processing to %s'%hashdir)
shell_exec('mkdir -p %s'%hashdir)
fstar_dir = os.path.join(hashdir, 'fstar')
if 'bench' in run_stages:
if os.path.exists(fstar_dir):
print('Skipping fstar setup for %s as directory there'%h)
else:
## setup fstar (make a clone and change the hash)
os.chdir(hashdir)
shell_exec('git clone --reference %s %s %s'%(args.repo, args.repo, fstar_dir))
os.chdir(fstar_dir)
shell_exec('git checkout %s'%h)
shell_exec('git clean -f -d -x')
# HACK: dynamically patch the fstar repo to have the benchmark stubs
if args.benchmark_hook_patch:
shell_exec('grep BENCHMARK_PRE ulib/gmake/fstar.mk || git apply %s'%args.benchmark_hook_patch)
completed_proc = shell_exec(args.benchmark_run_script)
if completed_proc.returncode != 0:
print('ERROR[%d] in fstar bench run for %s'%(completed_proc.returncode, h))
## collate benchmark output
shell_exec('mkdir -p ../bench_results; cp -r bench_results/*/* ../bench_results')
## clean directory after use
if not args.benchmark_no_cleanup:
os.chdir(hashdir)
shell_exec('rm -rf %s'%fstar_dir)
if 'upload' in run_stages:
os.chdir(hashdir)
resultdir = os.path.join(hashdir, 'bench_results')
glob_pat = '%s/*.bench'%resultdir
fnames = sorted(glob.glob(glob_pat))
if not fnames:
print('ERROR: could not find any results of form %s to upload'%glob_pat)
continue
for fname in fnames:
print('Uploading data from %s'%fname)
prefix = fname.split('/')[-1].split('.bench')[0] + '/'
upload_data = parse_and_format_results_for_upload(fname, prefix)
## upload this stuff into the codespeed server
if upload_data:
codespeed_upload.post_data_to_server(args.codespeed_url, upload_data, verbose=args.verbose)
| |
# Copyright (C) 2012-2013 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from nose.tools import *
from libpepper.builtins import add_builtins
from libpepper.environment import PepEnvironment
from libpepper.cpp.cppvalues import *
from libpepper.cpp.cpprenderer import PepCppRenderer
from libpepper.vals.all_values import *
from libpepper.usererrorexception import PepUserErrorException
from pepasserts import assert_contains
from pepasserts import assert_multiline_equal
def render_evald( val, env ):
return val.evaluate( env ).render( env )
def test_Static_variable_can_be_read():
env = PepEnvironment( PepCppRenderer() )
decl = PepClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepInit( PepType( PepInt ), PepSymbol( "i" ), PepInt( "7" ) ),
)
)
assert_equal( render_evald( decl, env ), "" )
value = PepSymbol( "MyClass.i" )
assert_equal( render_evald( value, env ), "7" )
def test_Member_function_can_be_executed():
"""
Note this test may turn out to be incorrect. Python would respond with:
TypeError: unbound method myfunc() must be called with X instance as
first argument (got int instance instead)
"""
env = PepEnvironment( PepCppRenderer() )
decl = PepClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepDef(
PepType( PepInt ),
PepSymbol( "myfunc" ),
(
( PepType( PepInt ), PepSymbol( "x" ) ),
),
(
PepReturn( PepSymbol( "x" ) ),
)
),
)
)
assert_equal( render_evald( decl, env ), "" )
value3 = PepFunctionCall(
PepSymbol( "MyClass.myfunc" ),
(
PepInt( "3" ),
)
)
value5 = PepFunctionCall(
PepSymbol( "MyClass.myfunc" ),
(
PepInt( "5" ),
)
)
assert_equal( render_evald( value5, env ), "5" )
def test_Init_returns_a_new_instance():
env = PepEnvironment( PepCppRenderer() )
decl = PepClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepPass(),
)
)
assert_equal( render_evald( decl, env ), "" )
value = PepFunctionCall( PepSymbol( "MyClass.init" ), () )
ev_value = value.evaluate( env )
assert_equal( PepKnownInstance, ev_value.__class__ )
assert_equal( "MyClass", ev_value.clazz.name )
def test_Init_with_arg_returns_new_instance_constructed_with_arg():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
decl = PepClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepDefInit(
(
( PepSymbol( "MyClass" ), PepSymbol( 'self' ) ),
( PepSymbol( "int" ), PepSymbol( 'a' ) ),
),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "self.x" ),
PepSymbol( "a" )
),
)
),
)
),
),
)
)
assert_equal( "", render_evald( decl, env ) )
make_instance = PepInit(
PepSymbol( "MyClass" ),
PepSymbol( "my_instance" ),
PepFunctionCall(
PepSymbol( "MyClass.init" ), ( PepInt( "3" ), )
)
)
assert_equal( "", render_evald( make_instance, env ) )
value = PepSymbol( "my_instance.x" )
assert_equal( "3", render_evald( value, env ) )
def test_Can_get_names_of_member_variables_from_def_init():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
definit = PepDefInit(
( ( PepSymbol( "MyClass" ), PepSymbol( 'fooself' ) ), ),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "fooself.member_one" ),
PepInt( 0 )
),
PepInit(
PepSymbol( "float" ),
PepSymbol( "fooself.member_two" ),
PepFloat( 0.1 )
),
)
),
)
),
).evaluate( env )
assert_equal(
str( [
( PepSymbol( "int" ).evaluate( env ), "member_one" ),
( PepSymbol( "float" ).evaluate( env ), "member_two" )
] ),
str( definit.get_member_variables( env ) )
)
def test_Not_allowed_non_self_inits_in_var():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
definit = PepDefInit(
( ( PepSymbol( "MyClass" ), PepSymbol( 'barself' ) ), ),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "my_var" ),
PepInt( 0 )
),
)
),
)
),
)
exception_caught = False
try:
definit.get_member_variables( env )
except PepUserErrorException, e:
exception_caught = True
assert_contains( str( e ), "'my_var' does not start with 'barself.'" )
assert( exception_caught )
def test_Must_provide_nonempty_variable_name_in_var():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
definit = PepDefInit(
( ( PepSymbol( "MyClass" ), PepSymbol( 'self' ) ), ),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "self." ),
PepInt( 0 )
),
)
),
)
),
)
exception_caught = False
try:
definit.get_member_variables( env )
except PepUserErrorException, e:
exception_caught = True
assert_contains(
str( e ),
"You must provide a variable name, not just 'self.'"
)
assert( exception_caught )
def Cannot_overwrite_method_with_member_variable__test():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
decl = PepClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepDefInit(
(
( PepSymbol( "MyClass" ), PepSymbol( 'self' ) ),
),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "self.my_meth" ),
PepInt( "3" )
),
)
),
)
),
),
PepDef(
PepSymbol( "void" ),
PepSymbol( "my_meth" ),
(
( PepSymbol( "MyClass" ), PepSymbol( 'self' ) ),
),
(
PepPass(),
),
),
)
)
assert_equal( "", render_evald( decl, env ) )
exception_caught = False
try:
make_instance = PepInit(
PepSymbol( "MyClass" ),
PepSymbol( "my_instance" ),
PepFunctionCall(
PepSymbol( "MyClass.init" ), ()
)
)
render_evald( make_instance, env )
except PepUserErrorException, e:
exception_caught = True
assert_contains(
str( e ),
"Namespace already contains the name 'my_meth'"
)
assert( exception_caught )
def test_Can_get_names_of_member_variables_from_class():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
cls = PepUserClass(
name="MyClass",
base_classes=(),
body_stmts=(
PepDefInit(
( ( PepSymbol( "MyClass" ), PepSymbol( 'self' ) ), ),
(
(
PepVar(
(
PepInit(
PepSymbol( "int" ),
PepSymbol( "self.member_one" ),
PepInt( 0 )
),
PepInit(
PepSymbol( "float" ),
PepSymbol( "self.member_two" ),
PepFloat( 0.1 )
),
)
),
)
),
),
)
).evaluate( env )
assert_equal(
str( [
( PepSymbol( "int" ).evaluate( env ), "member_one" ),
( PepSymbol( "float" ).evaluate( env ), "member_two" )
] ),
str( cls.member_variables )
)
def test_Class_reports_methods_available():
env = PepEnvironment( PepCppRenderer() )
add_builtins( env )
cls = PepUserClass(
name="MyClass",
base_classes=(),
body_stmts=(
PepDef(
PepType( PepInt ),
PepSymbol( "myfunc" ),
(
( PepSymbol( "MyClass" ), PepSymbol( "self" ) ),
),
(
PepReturn( PepInt( "3" ) ),
)
),
)
).evaluate( env )
assert_true( "myfunc" in cls.get_namespace() )
assert_true( "foo" not in cls.get_namespace() )
class FakeFn( object ):
def call( self, args, env ):
return "FakeFn ret val"
class FakeClass( object ):
def __init__( self ):
self.name = "FakeClass"
def get_namespace( self ):
return {}
def get_name( self ):
return self.name
def create_method():
clazz = FakeClass()
instance = PepKnownInstance( clazz )
fn = FakeFn()
return PepInstanceMethod( instance, fn )
def test_Calling_a_method_with_known_args_returns_the_answer():
# Create a method on an instance, which uses a function we expect
# to be called
meth = create_method()
# This is what we are testing: the underlying function was called
assert_equal(
"FakeFn ret val",
meth.call( ( PepInt( "3" ), PepInt( "4" ) ), "env" )
)
def test_Calling_a_method_with_unknown_args_returns_a_runtime_function():
# Create a method on an instance
meth = create_method()
# This is what we are testing: we returned an PepRuntimeUserFunction
# because an argument was unknown
assert_equal(
PepRuntimeUserFunction,
meth.call(
(
PepInt( "3" ),
PepVariable( PepType( PepInt ), "x" ) ),
"env"
).__class__
)
def test_Calling_a_method_with_unknown_instance_returns_a_runtime_function():
class FakeType( PepTypeMatcher ):
def __init__( self ):
self.name = ""
def get_name( self ): pass
def runtime_namespace( self, instance, insert_placeholders ):
pass
def get_namespace(): pass
def matches(): pass
def underlying_class(): pass
# Create a method on a variable holding a class type
clazz = FakeType()
instance = PepVariable( clazz, "inst" )
fn = FakeFn()
meth = PepInstanceMethod( instance, fn )
# This is what we are testing: we returned an PepRuntimeUserFunction
# because the instance was unknown
assert_equal(
PepRuntimeUserFunction,
meth.call(
( PepInt( "3" ), PepInt( "3" ) ), "env" ).__class__
)
class MyInstance( PepInstance ):
def construction_args( self ):
pass
def test_Instances_return_their_own_values_overriding_class_values():
clazz = FakeClass()
inst = MyInstance( clazz )
# Put values into both the class and the instance (instance first to
# avoid errors if I decide it is an error to override in a subnamespace)
inst.get_namespace()["a"] = "return_me"
clazz.get_namespace()["a"] = "dont_return_me"
# This is what we are testing: the instance one wins
assert_equal( "return_me", inst.get_namespace()["a"] )
def test_Instances_return_class_values_where_they_have_nothing():
class MyClass( object ):
def __init__( self ):
self.namespace = PepNamespace()
def get_namespace( self ):
return self.namespace
clazz = MyClass()
inst = MyInstance( clazz )
# Put a values into the class
clazz.get_namespace()["a"] = "class_value"
# This is what we are testing: we find it in the class
assert_equal( "class_value", inst.get_namespace()["a"] )
def test_Instance_returns_a_method_when_class_holds_a_function():
class MyClass( object ):
def __init__( self ):
self.namespace = PepNamespace()
def get_namespace( self ):
return self.namespace
clazz = MyClass()
inst = MyInstance( clazz )
fn = "fake_fn"
# Put values into both the class and the instance
clazz.get_namespace()["a"] = PepFunctionOverloadList( [fn] )
# This is what we are testing: get the function out via the instance
ans = inst.get_namespace()["a"]
# The function was wrapped as a method
ans_fn = ans._list[0]
assert_equal( PepInstanceMethod, ans_fn.__class__ )
assert_equal( inst, ans_fn.instance )
assert_equal( fn, ans_fn.fn )
def create_instance_variable_and_method_call( env ):
env.namespace['a'] = PepVariable( PepType( PepInt ), "a" )
PepClass(
PepSymbol( 'MyClass' ),
(),
(
PepDefInit(
(
( PepSymbol('MyClass'), PepSymbol('self') ),
( PepType( PepInt ), PepSymbol('x') ),
),
( PepPass(), )
),
PepDef(
PepType( PepVoid ),
PepSymbol('my_meth'),
( ( PepSymbol('MyClass'), PepSymbol('self') ), ),
( PepPass(), )
)
)
).evaluate( env )
PepInit(
PepSymbol( 'MyClass' ),
PepSymbol( 'mc' ),
PepFunctionCall( PepSymbol( 'MyClass.init' ), ( PepSymbol( "a" ), ) )
).evaluate( env )
meth = PepFunctionCall( PepSymbol( "mc.my_meth" ), () )
return meth
def Runtime_instance_has_evaluated_type_of_class___test():
env = PepEnvironment( None )
meth = create_instance_variable_and_method_call( env )
assert_equal(
PepSymbol( "MyClass" ).evaluate( env ),
PepSymbol( "mc" ).evaluated_type( env )
)
def Runtime_instance_allows_access_to_methods___test():
env = PepEnvironment( None )
meth = create_instance_variable_and_method_call( env )
# my_meth is a callable taking no args and returning void
assert_equal(
PepType( PepVoid ),
PepSymbol( "mc.my_meth" ).evaluate( env ).return_type( (), env )
)
# The methods are not known since the instance isn't
assert_false( PepSymbol( "mc.my_meth" ).evaluate( env ).is_known( env ) )
def Method_calls_of_instance_variables_are_unknown___test():
env = PepEnvironment( None )
meth = create_instance_variable_and_method_call( env )
assert_false( meth.is_known( env ) )
ev_meth = meth.evaluate( env )
assert_false( ev_meth.is_known( env ) )
def Evaluated_types_of_method_calls_of_instance_variables_are_correct___test():
env = PepEnvironment( None )
meth = create_instance_variable_and_method_call( env )
assert_equal( PepType( PepVoid ), meth.evaluated_type( env ) )
ev_meth = meth.evaluate( env )
assert_equal( PepType( PepVoid ), ev_meth.evaluated_type( env ) )
@raises(PepUserErrorException)
def No_method_called_init_allowed__test():
env = PepEnvironment( None )
PepUserClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepDef( PepType( PepInt ), PepSymbol( "init" ), (), (PepPass(),) ),
)
).evaluate( env )
@raises(PepUserErrorException)
def No_method_called_implements_allowed__test():
env = PepEnvironment( None )
PepUserClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepDef(
PepType( PepInt ),
PepSymbol( "implements" ),
(),
(PepPass(),)
),
)
).evaluate( env )
def Builtin_implements_method_does_not_break_implements_check__test():
# At one point checking for an "implements" method was actually failing
# because a global "implements" function existed - this test fails if
# that happens.
env = PepEnvironment( None )
add_builtins( env )
PepUserClass(
name=PepSymbol( "MyClass" ),
base_classes=(),
body_stmts=(
PepPass(),
)
).evaluate( env )
| |
"""
.. A wrapper for the Analyzer. It runs it as an XML-RPC server to isolate from
lengthy grammar-building times.
.. moduleauthor:: Luca Gilardi <lucag@icsi.berkeley.edu>
------
See LICENSE.txt for licensing information.
------
"""
import sys
from utils import Struct, update, display # @UnusedImport
from xmlrpclib import ServerProxy # @UnresolvedImport
from SimpleXMLRPCServer import SimpleXMLRPCServer # @UnresolvedImport
from utils import interpreter
from pprint import pprint
from xmlrpclib import Fault
import time
from threading import Thread
# Possibly change this for your system
dll = {'linux': '/jre/lib/amd64/server/libjvm.so',
'darwin': '/jre/lib/server/libjvm.dylib',
'win32': '/jre/bin/server/jvm.dll'}
try:
import jpype, os # @UnresolvedImport
jpype.startJVM(os.environ['JAVA_HOME'] + dll[sys.platform],
'-ea', '-Xmx5g', '-Djava.class.path=lib/compling.core.jar')
compling = jpype.JPackage('compling')
SlotChain = getattr(compling.grammar.unificationgrammar, 'UnificationGrammar$SlotChain')
getParses = compling.gui.util.Utils.getParses
ParserException = jpype.JException(compling.parser.ParserException) # @UnusedVariable
ECGAnalyzer = compling.parser.ecgparser.ECGAnalyzer
getDfs = compling.grammar.unificationgrammar.FeatureStructureUtilities.getDfs # @UnusedVariable
except ImportError:
from compling.grammar.unificationgrammar.UnificationGrammar import SlotChain
from compling.gui.util.Utils import getParses
from compling.parser import ParserException # @UnusedImport
from compling.parser.ecgparser import ECGAnalyzer
from compling.grammar.unificationgrammar.FeatureStructureUtilities import getDfs # @UnusedImport
from compling.gui import AnalyzerPrefs
from compling.grammar.ecg.Prefs import Property
from compling.gui.AnalyzerPrefs import AP
class Analyzer(object):
def __init__(self, prefs):
self.analyzer = ECGAnalyzer(prefs)
self.grammar = self.analyzer.grammar
self.server = None
def get_mappings(self):
mappings = self.analyzer.getMappings()
m = dict()
for entry in mappings.entrySet():
m[entry.key] = entry.value
return m
def get_lexicon(self):
lexes = self.analyzer.getLexicon()
return list(lexes)
def get_utterances(self):
utterances = self.analyzer.getUtterances()
return list(utterances)
def get_parses(self, sentence):
try:
return getParses(sentence, self.analyzer)
except ParserException, p:
print(p.message)
raise Fault(-1, p.message)
def test_analysis(self, analysis):
""" For testing how to output spans, etc. In development.
Mostly just a way to store info about methods. """
featureStruct = analysis.featureStructure
slots = featureStruct.slots.toArray() # Puts slots into array
first = slots[0] # Get first slot, e.g. ROOT
entries = first.features.entrySet().toArray() # Puts features into entry set
value = entries[0].value # Gets actual value, e.g. EventDescriptor[2]
def get_mapping(self):
v = AP.valueOf("MAPPING_PATH")
return self.analyzer.getPrefs().getSetting(v)
def parse(self, sentence):
def root(parse):
return parse.analyses[0].featureStructure.mainRoot
def as_sequence(parse):
def desc(slot):
return (slot_type(slot), slot_index(slot), slot_typesystem(slot), slot_value(slot))
slots = dict()
root_ = root(parse)
seq = [(parent, role) + desc(slots[s_id]) for parent, role, s_id in dfs('<ROOT>', root_, None, slots) if parent != -1]
return (-1, '<ROOT>') + desc(root_), seq
def convert_span(span, fs):
""" Return span, like (0, 4), name of cxn, and slot ID for cxn. """
name = span.getType().getName() if span.getType() else "None"
identity = fs.getSlot(span.slotID).slotIndex if fs.getSlot(span.slotID) else "None"
return {'span': (span.left, span.right), 'type': name, 'id': identity}
def get_spans(parses):
all_spans = []
for parse in parses:
parse_spans = []
analysis = parse.getAnalyses()[0]
spans = list(analysis.getSpans())
for span in spans:
parse_spans.append(convert_span(span, analysis.featureStructure))
all_spans.append(parse_spans)
return all_spans
parses = self.get_parses(sentence)
return {'parse': [as_sequence(p) for p in parses], 'costs': [p.cost for p in parses], 'spans': get_spans(parses)}
def getConstructionSize(self):
return len(self.analyzer.getGrammar().getAllConstructions())
def getSchemaSize(self):
return len(self.analyzer.getGrammar().getAllSchemas())
def reload(self, prefs):
""" Reloads grammar according to prefs file. """
self.analyzer = ECGAnalyzer(prefs)
self.grammar = self.analyzer.grammar
def issubtype(self, typesystem, child, parent):
"""Is <child> a child of <parent>?
"""
_ts = dict(CONSTRUCTION=self.grammar.cxnTypeSystem,
SCHEMA=self.grammar.schemaTypeSystem,
ONTOLOGY=self.grammar.ontologyTypeSystem)
ts = _ts[typesystem]
return ts.subtype(ts.getInternedString(child), ts.getInternedString(parent))
def close(self):
self.server.shutdown()
def __json__(self):
return self.__dict__
def slot_index(slot):
return slot.slotIndex
def slot_type(slot):
# if not slot.typeConstraint: print '##', slot
return slot.typeConstraint.type if slot and slot.typeConstraint else None
def slot_typesystem(slot):
return slot.typeConstraint.typeSystem.name if slot and slot.typeConstraint else None
def slot_value(slot):
return slot.atom[1:-1] if slot.atom else None
def slot(semspec, path, relative=None):
"""Returns the slot at the end of <path>, a slot
chain (a dot-separated list of role names)."""
if relative:
return semspec.getSlot(relative, SlotChain(path))
else:
return semspec.getSlot(SlotChain(path))
def test(args):
"""Just test the analyzer.
"""
prefs, sent = args
display('Creating analyzer with grammar %s ... ', prefs, term=' ')
analyzer = Analyzer(prefs)
display('done.')
for p in analyzer.parse(sent):
pprint(p)
def atom(slot):
"Does slot contain an atomic type?"
return slot.atom[1:-1] if slot.atom else ''
def dfs(name, slot, parent, seen):
slotIndex = slot.slotIndex
seen[slotIndex] = slot
if slot.features:
for e in slot.features.entrySet():
# <name, slot> pairs
n, s = unicode(e.key).replace('-', '_'), e.value
if s.slotIndex not in seen:
for x in dfs(n, s, slot, seen):
yield x
else:
yield slotIndex, n, s.slotIndex
yield parent.slotIndex if parent else -1, name, slot.slotIndex
def server(obj, host='localhost', port=8090):
server = SimpleXMLRPCServer((host, port), allow_none=True, logRequests=False, encoding='utf-8')
server.register_instance(obj)
#display('server ready (listening to http://%s:%d/).', host, port)
server.serve_forever()
return server # Added
def usage_time(start, end, analyzer):
print("Inversion time:")
print(end - start)
print("Num constructions: ")
print(analyzer.getConstructionSize())
print("Num schemas: ")
print(analyzer.getSchemaSize())
print("Total: ")
print(analyzer.getConstructionSize() + analyzer.getSchemaSize())
def main(args):
display(interpreter())
#display('Starting up Analyzer ... ', term='')
start = time.time()
analyzer = Analyzer(args[1])
end = time.time()
print("Analyzer ready...")
#usage_time(start, end, analyzer)
try:
#server_thread = Thread(target=server, kwargs={'obj': analyzer, 'host': host, 'port': port})
#serve = server_thread.start()
serve = server(analyzer)
analyzer.server = serve
except Exception, e:
print(e)
#print "Address " + host + ":" + str(port) + " is already in use. Using Analyzer on existing server. Kill that process to restart with a new Analyzer."
def main2(args):
display(interpreter())
#display('Starting up Analyzer ... ', term='')
start = time.time()
analyzer = Analyzer(args[1])
end = time.time()
print("Analyzer ready...")
return analyzer
def test_remote(sentence ='Robot1, move to location 1 2!'):
from feature import as_featurestruct
a = ServerProxy('http://localhost:8090')
d = a.parse(sentence)
s = as_featurestruct(d[0])
return s
# TODO: update this
def test_local(sentence='Robot1, move to location 1 2!'):
from feature import as_featurestruct
display('Starting up Analyzer ... ', term='')
a = Analyzer('grammar/robots.prefs')
display('done.\n', 'analyzing', sentence)
d = a.parse(sentence)
pprint(d)
# s = as_featurestruct(d[0])
# return s
return d
def usage():
display('Usage: analyzer.py <preference file>')
sys.exit(-1)
if __name__ == '__main__':
if '-t' in sys.argv:
test(sys.argv[2:])
elif '-l' in sys.argv:
test_local(*sys.argv[2:3])
else:
if len(sys.argv) != 2:
usage()
main(sys.argv)
#analyzer = main2(sys.argv)
| |
import _init_paths
import inspect
import os
import shutil
import time
import numpy as np
import tensorflow as tf
from PIL import Image
from image_pylib import IMGLIB
import data_engine
import random
VGG_MEAN = [103.939, 116.779, 123.68]
image_height = 288
image_width = 384
feature_height = int(np.ceil(image_height / 16.))
feature_width = int(np.ceil(image_width / 16.))
def checkFile(fileName):
if os.path.isfile(fileName):
return True
else:
print fileName, 'is not found!'
exit()
def checkDir(fileName, creat=False):
if os.path.isdir(fileName):
if creat:
shutil.rmtree(fileName)
os.mkdir(fileName)
else:
if creat:
os.mkdir(fileName)
else:
print fileName, 'is not found!'
exit()
def getAllFiles(dirName, houzhui):
results = []
for file in os.listdir(dirName):
file_path = os.path.join(dirName, file)
if os.path.isfile(file_path) and os.path.splitext(file_path)[1] == houzhui:
results.append([file_path,os.path.splitext(file)[0]])
return results
class d_net:
def __init__(self, vgg16_npy_path=None, d_net_path=None):
if vgg16_npy_path is None:
print vgg16_npy_path, ' not found!'
exit()
if d_net_path is None:
exit()
self.vgg16_params = np.load(vgg16_npy_path, encoding='latin1').item()
self.d_net_params = np.load(d_net_path, encoding='latin1').item()
print('npy file loaded')
def build(self, rgb):
'''
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
'''
start_time = time.time()
print('build model started')
# Convert RGB to BGR
red, green, blue = tf.split(3, 3, rgb)
assert red.get_shape().as_list()[1:] == [image_height, image_width, 1]
assert green.get_shape().as_list()[1:] == [image_height, image_width, 1]
assert blue.get_shape().as_list()[1:] == [image_height, image_width, 1]
bgr = tf.concat(3, [
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [image_height, image_width, 3]
# Conv layer 1
self.conv1_1 = self.conv_layer_const(bgr, 'conv1_1')
self.conv1_2 = self.conv_layer_const(self.conv1_1, 'conv1_2')
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
# Conv layer 2
self.conv2_1 = self.conv_layer_const(self.pool1, 'conv2_1')
self.conv2_2 = self.conv_layer_const(self.conv2_1, 'conv2_2')
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
# Conv layer 3
self.conv3_1 = self.conv_layer(self.pool2, 'conv3_1')
self.conv3_2 = self.conv_layer(self.conv3_1, 'conv3_2')
self.conv3_3 = self.conv_layer(self.conv3_2, 'conv3_3')
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
# Conv layer 4
self.conv4_1 = self.conv_layer(self.pool3, 'conv4_1')
self.conv4_2 = self.conv_layer(self.conv4_1, 'conv4_2')
self.conv4_3 = self.conv_layer(self.conv4_2, 'conv4_3')
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
# Conv layer 5
self.conv5_1 = self.conv_layer(self.pool4, 'conv5_1')
self.conv5_2 = self.conv_layer(self.conv5_1, 'conv5_2')
self.conv5_3 = self.conv_layer(self.conv5_2, 'conv5_3')
self.fc6 = self.fc_layer(self.conv5_3, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
# self.relu7 = tf.nn.relu(self.fc7)
#
# self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc7, name="prob")
self.data_dict = None
print('build model finished: %ds' % (time.time() - start_time))
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_fc_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_fc_bias(self, name):
return tf.constant(self.d_net_params[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.d_net_params[name][0], name="weights")
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def conv_layer_const(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter_const(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias_const(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def conv_layer_new(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv_biases = self.get_bias(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, conv_biases)
return bias
def get_conv_filter(self, name):
return tf.Variable(self.d_net_params[name][0], name='filter')
def get_bias(self, name):
return tf.Variable(self.d_net_params[name][1], name='biases')
def get_conv_filter_const(self, name):
return tf.constant(self.vgg16_params[name][0], name='filter')
def get_bias_const(self, name):
return tf.constant(self.vgg16_params[name][1], name='biases')
class Result:
def __init__(self, cls,imageName):
self.cls = cls[0][0][0]
self.imageName = imageName
def saveResults(results,saveFilePath):
f = open(saveFilePath, 'w')
for r in results:
f.write('%s %f\n' % (r.imageName, r.cls))
f.close()
if __name__ == '__main__':
vggModelPath = '../models/rpn_tf/vgg16.npy'
dNetModelPath = '../models/d_net2/'
saveImgDir = '../results/d_net/result2/'
checkDir(saveImgDir, True)
testImgDir = '../../DATA/syntheticData/new/d_netData'
batchSize = 1
testIndex = [20,60,100,800] #22.4144 14.6109 10.955 10.397
start_time = time.time()
with tf.device('/gpu:0'):
sess = tf.Session()
for modelIndex in testIndex:
dNetModelPathNow = dNetModelPath+'params_'+str(modelIndex)+'.npy'
cnn = d_net(vggModelPath, dNetModelPathNow)
image = tf.placeholder(tf.float32, [batchSize, image_height, image_width, 3])
with tf.name_scope('content_rpn'):
cnn.build(image)
sess.run(tf.initialize_all_variables())
saveFilePath = saveImgDir+str(modelIndex)+'.txt'
imageNames = getAllFiles(testImgDir,'.jpg')
results = []
for imageName in imageNames:
im = Image.open(imageName[0])
imageNow = np.array(im.getdata()).reshape(1, image_height, image_width, 3).astype(np.float32)
(cls) = sess.run([cnn.prob], feed_dict={image: imageNow})
#cls = [[random.random(), 0]]
r = Result(cls, imageName[0])
print str(modelIndex) + ' ' + imageName[0] + ' ' + str(r.cls) + ' ' + 'time :', time.time() - start_time
results.append(r)
results.sort(lambda x, y: cmp(x.cls,y.cls))
saveResults(results,saveFilePath)
print 'total use time', time.time()-start_time
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataflow operators."""
import copy
import re
import warnings
from contextlib import ExitStack
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.apache.beam.hooks.beam import BeamHook, BeamRunnerType
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
process_line_and_extract_dataflow_job_id_callback,
)
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.links.dataflow import DataflowJobLink
from airflow.version import version
if TYPE_CHECKING:
from airflow.utils.context import Context
class CheckJobRunning(Enum):
"""
Helper enum for choosing what to do if job is already running
IgnoreJob - do not check if running
FinishIfRunning - finish current dag run with no action
WaitForRun - wait for job to finish and then continue with new job
"""
IgnoreJob = 1
FinishIfRunning = 2
WaitForRun = 3
class DataflowConfiguration:
"""Dataflow configuration that can be passed to
:py:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator` and
:py:class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`.
:param job_name: The 'jobName' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'``in ``options`` will be overwritten.
:param append_job_name: True if unique suffix has to be appended to job name.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed. (optional) default to 300s
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs. Supported only by
:py:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`
:param check_if_running: Before running job, validate that a previous run is not in process.
IgnoreJob = do not check if running.
FinishIfRunning = if job is running finish with nothing.
WaitForRun = wait until job finished and the run job.
Supported only by:
:py:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`
"""
template_fields: Sequence[str] = ("job_name", "location")
def __init__(
self,
*,
job_name: str = "{{task.task_id}}",
append_job_name: bool = True,
project_id: Optional[str] = None,
location: Optional[str] = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 5 * 60,
wait_until_finished: Optional[bool] = None,
multiple_jobs: Optional[bool] = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
) -> None:
self.job_name = job_name
self.append_job_name = append_job_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.multiple_jobs = multiple_jobs
self.check_if_running = check_if_running
class DataflowCreateJavaJobOperator(BaseOperator):
"""
Start a Java Cloud Dataflow batch job. The parameters of the operation
will be passed to the job.
This class is deprecated.
Please use `providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
**Example**: ::
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": (2016, 8, 1),
"email": ["alex@vanboxel.be"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=30),
"dataflow_default_options": {
"project": "my-gcp-project",
"zone": "us-central1-f",
"stagingLocation": "gs://bucket/tmp/dataflow/staging/",
},
}
dag = DAG("test-dag", default_args=default_args)
task = DataflowCreateJavaJobOperator(
gcp_conn_id="gcp_default",
task_id="normalize-cal",
jar="{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar",
options={
"autoscalingAlgorithm": "BASIC",
"maxNumWorkers": "50",
"start": "{{ds}}",
"partitionType": "DAY",
},
dag=dag,
)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowCreateJavaJobOperator`
:param jar: The reference to a self executing Dataflow jar (templated).
:param job_name: The 'jobName' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:param dataflow_default_options: Map of default job options.
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs
:param check_if_running: before running job, validate that a previous run is not in process
if job is running finish with nothing, WaitForRun= wait until job finished and the run job)
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
"dataflow_default_options": {
"zone": "europe-west1-d",
"stagingLocation": "gs://my-staging-bucket/staging/",
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataflowCreateJavaJobOperator(
task_id="dataflow_example",
jar="{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar",
options={
"autoscalingAlgorithm": "BASIC",
"maxNumWorkers": "50",
"start": "{{ds}}",
"partitionType": "DAY",
"labels": {"foo": "bar"},
},
gcp_conn_id="airflow-conn-id",
dag=my - dag,
)
"""
template_fields: Sequence[str] = ("options", "jar", "job_name")
ui_color = "#0273d4"
def __init__(
self,
*,
jar: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
job_class: Optional[str] = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
multiple_jobs: bool = False,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
f"The `{self.__class__.__name__}` operator is deprecated, "
f"please use `providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(**kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.multiple_jobs = multiple_jobs
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
self.check_if_running = check_if_running
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.beam_hook: Optional[BeamHook] = None
self.dataflow_hook: Optional[DataflowHook] = None
def execute(self, context: 'Context'):
"""Execute the Apache Beam Pipeline."""
self.beam_hook = BeamHook(runner=BeamRunnerType.DataflowRunner)
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_name = self.dataflow_hook.build_dataflow_job_name(job_name=self.job_name)
pipeline_options = copy.deepcopy(self.dataflow_default_options)
pipeline_options["jobName"] = self.job_name
pipeline_options["project"] = self.project_id or self.dataflow_hook.project_id
pipeline_options["region"] = self.location
pipeline_options.update(self.options)
pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
pipeline_options.update(self.options)
def set_current_job_id(job_id):
self.job_id = job_id
process_line_callback = process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_job_id
)
with ExitStack() as exit_stack:
if self.jar.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.jar))
self.jar = tmp_gcs_file.name
is_running = False
if self.check_if_running != CheckJobRunning.IgnoreJob:
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
variables=pipeline_options,
)
while is_running and self.check_if_running == CheckJobRunning.WaitForRun:
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
variables=pipeline_options,
)
if not is_running:
pipeline_options["jobName"] = job_name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=job_name,
location=self.location,
job_id=self.job_id,
multiple_jobs=self.multiple_jobs,
)
return {"job_id": self.job_id}
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.dataflow_hook.cancel_job(
job_id=self.job_id, project_id=self.project_id or self.dataflow_hook.project_id
)
class DataflowTemplatedJobStartOperator(BaseOperator):
"""
Start a Templated Cloud Dataflow job. The parameters of the operation
will be passed to the job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowTemplatedJobStartOperator`
:param template: The reference to the Dataflow template.
:param job_name: The 'jobName' to use when executing the Dataflow template
(templated).
:param options: Map of job runtime environment options.
It will update environment argument if passed.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:param dataflow_default_options: Map of default job environment options.
:param parameters: Map of job specific parameters for the template.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param environment: Optional, Map of job runtime environment options.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
.. code-block:: python
default_args = {
"dataflow_default_options": {
"zone": "europe-west1-d",
"tempLocation": "gs://my-staging-bucket/staging/",
}
}
You need to pass the path to your dataflow template as a file reference with the
``template`` parameter. Use ``parameters`` to pass on parameters to your job.
Use ``environment`` to pass on runtime environment variables to your job.
.. code-block:: python
t1 = DataflowTemplatedJobStartOperator(
task_id="dataflow_example",
template="{{var.value.gcp_dataflow_base}}",
parameters={
"inputFile": "gs://bucket/input/my_input.txt",
"outputFile": "gs://bucket/output/my_output.txt",
},
gcp_conn_id="airflow-conn-id",
dag=my - dag,
)
``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are
templated so you can use variables in them.
Note that ``dataflow_default_options`` is expected to save high-level options
for project information, which apply to all dataflow operators in the DAG.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3
/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
For more detail on job template execution have a look at the reference:
https://cloud.google.com/dataflow/docs/templates/executing-templates
"""
template_fields: Sequence[str] = (
"template",
"job_name",
"options",
"parameters",
"project_id",
"location",
"gcp_conn_id",
"impersonation_chain",
"environment",
)
ui_color = "#0273d4"
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
*,
template: str,
job_name: str = "{{task.task_id}}",
options: Optional[Dict[str, Any]] = None,
dataflow_default_options: Optional[Dict[str, Any]] = None,
parameters: Optional[Dict[str, str]] = None,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
environment: Optional[Dict] = None,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template = template
self.job_name = job_name
self.options = options or {}
self.dataflow_default_options = dataflow_default_options or {}
self.parameters = parameters or {}
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.job = None
self.hook: Optional[DataflowHook] = None
self.impersonation_chain = impersonation_chain
self.environment = environment
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
def execute(self, context: 'Context') -> dict:
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
def set_current_job(current_job):
self.job = current_job
DataflowJobLink.persist(self, context, self.project_id, self.location, self.job.get("id"))
options = self.dataflow_default_options
options.update(self.options)
job = self.hook.start_template_dataflow(
job_name=self.job_name,
variables=options,
parameters=self.parameters,
dataflow_template=self.template,
on_new_job_callback=set_current_job,
project_id=self.project_id,
location=self.location,
environment=self.environment,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job:
self.hook.cancel_job(
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowStartFlexTemplateOperator(BaseOperator):
"""
Starts flex templates with the Dataflow pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStartFlexTemplateOperator`
:param body: The request body. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
"""
template_fields: Sequence[str] = ("body", "location", "project_id", "gcp_conn_id")
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
body: Dict,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.body = body
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job = None
self.hook: Optional[DataflowHook] = None
def execute(self, context: 'Context'):
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
def set_current_job(current_job):
self.job = current_job
DataflowJobLink.persist(self, context, self.project_id, self.location, self.job.get("id"))
job = self.hook.start_flex_template(
body=self.body,
location=self.location,
project_id=self.project_id,
on_new_job_callback=set_current_job,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job:
self.hook.cancel_job(
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowStartSqlJobOperator(BaseOperator):
"""
Starts Dataflow SQL query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStartSqlJobOperator`
.. warning::
This operator requires ``gcloud`` command (Google Cloud SDK) must be installed on the Airflow worker
<https://cloud.google.com/sdk/docs/install>`__
:param job_name: The unique name to assign to the Cloud Dataflow job.
:param query: The SQL query to execute.
:param options: Job parameters to be executed. It can be a dictionary with the following keys.
For more information, look at:
`https://cloud.google.com/sdk/gcloud/reference/beta/dataflow/sql/query
<gcloud beta dataflow sql query>`__
command reference
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
"""
template_fields: Sequence[str] = (
"job_name",
"query",
"options",
"location",
"project_id",
"gcp_conn_id",
)
template_fields_renderers = {"query": "sql"}
def __init__(
self,
job_name: str,
query: str,
options: Dict[str, Any],
location: str = DEFAULT_DATAFLOW_LOCATION,
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
drain_pipeline: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.job_name = job_name
self.query = query
self.options = options
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.drain_pipeline = drain_pipeline
self.job = None
self.hook: Optional[DataflowHook] = None
def execute(self, context: 'Context'):
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
drain_pipeline=self.drain_pipeline,
)
def set_current_job(current_job):
self.job = current_job
job = self.hook.start_sql_job(
job_name=self.job_name,
query=self.query,
options=self.options,
location=self.location,
project_id=self.project_id,
on_new_job_callback=set_current_job,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job:
self.hook.cancel_job(
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowCreatePythonJobOperator(BaseOperator):
"""
Launching Cloud Dataflow jobs written in python. Note that both
dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
This class is deprecated.
Please use `providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`.
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowCreatePythonJobOperator`
:param py_file: Reference to the python dataflow pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:param job_name: The 'job_name' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'`` in ``options`` will be overwritten.
:param py_options: Additional python options, e.g., ["-m", "-v"].
:param dataflow_default_options: Map of default job options.
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
"""
template_fields: Sequence[str] = ("options", "dataflow_default_options", "job_name", "py_file")
def __init__(
self,
*,
py_file: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: Optional[dict] = None,
options: Optional[dict] = None,
py_interpreter: str = "python3",
py_options: Optional[List[str]] = None,
py_requirements: Optional[List[str]] = None,
py_system_site_packages: bool = False,
project_id: Optional[str] = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
poll_sleep: int = 10,
drain_pipeline: bool = False,
cancel_timeout: Optional[int] = 10 * 60,
wait_until_finished: Optional[bool] = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
f"The `{self.__class__.__name__}` operator is deprecated, "
"please use `providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(**kwargs)
self.py_file = py_file
self.job_name = job_name
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.poll_sleep = poll_sleep
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.beam_hook: Optional[BeamHook] = None
self.dataflow_hook: Optional[DataflowHook] = None
def execute(self, context: 'Context'):
"""Execute the python dataflow job."""
self.beam_hook = BeamHook(runner=BeamRunnerType.DataflowRunner)
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep,
impersonation_chain=None,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_name = self.dataflow_hook.build_dataflow_job_name(job_name=self.job_name)
pipeline_options = self.dataflow_default_options.copy()
pipeline_options["job_name"] = job_name
pipeline_options["project"] = self.project_id or self.dataflow_hook.project_id
pipeline_options["region"] = self.location
pipeline_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(r"[A-Z]", lambda x: "_" + x.group(0).lower(), name)
formatted_pipeline_options = {camel_to_snake(key): pipeline_options[key] for key in pipeline_options}
def set_current_job_id(job_id):
self.job_id = job_id
process_line_callback = process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_job_id
)
with ExitStack() as exit_stack:
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id, self.delegate_to)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.py_file))
self.py_file = tmp_gcs_file.name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_python_pipeline(
variables=formatted_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=job_name,
location=self.location,
job_id=self.job_id,
multiple_jobs=False,
)
return {"job_id": self.job_id}
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.dataflow_hook.cancel_job(
job_id=self.job_id, project_id=self.project_id or self.dataflow_hook.project_id
)
| |
import hashlib
import json
import logging
import os.path
import re
from io import BytesIO
import asyncio
import mitmproxy.flow
import tornado.escape
import tornado.web
import tornado.websocket
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import version
from mitmproxy import optmanager
import mitmproxy.tools.web.master # noqa
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"client_conn": flow.client_conn.get_state(),
"server_conn": flow.server_conn.get_state(),
"type": flow.type,
"modified": flow.modified(),
"marked": flow.marked,
}
# .alpn_proto_negotiated is bytes, we need to decode that.
for conn in "client_conn", "server_conn":
if f[conn]["alpn_proto_negotiated"] is None:
continue
f[conn]["alpn_proto_negotiated"] = \
f[conn]["alpn_proto_negotiated"].decode(errors="backslashreplace")
# There are some bytes in here as well, let's skip it until we have them in the UI.
f["client_conn"].pop("tls_extensions", None)
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, http.HTTPFlow):
if flow.request:
if flow.request.raw_content:
content_length = len(flow.request.raw_content)
content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.request.is_replay,
"pretty_host": flow.request.pretty_host,
}
if flow.response:
if flow.response.raw_content:
content_length = len(flow.response.raw_content)
content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.response.is_replay,
}
f.get("server_conn", {}).pop("cert", None)
f.get("client_conn", {}).pop("mitmcert", None)
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level
}
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def write(self, chunk):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super(RequestHandler, self).write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws:; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith("application/json"):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, "Malformed JSON: {}".format(str(e)))
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> "mitmproxy.addons.view.View":
return self.application.master.view
@property
def master(self) -> "mitmproxy.tools.web.master.WebMaster":
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html")
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(
commands=flowfilter.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections: set = None
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False).encode("utf8", "surrogateescape")
for conn in cls.connections:
try:
conn.write_message(message)
except Exception: # pragma: no cover
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections: set = set()
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = io.FlowWriter(bio)
for f in self.view:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
for i in io.FlowReader(bio).stream():
asyncio.ensure_future(self.master.load_flow(i))
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
f.resume()
self.view.update([f])
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update([f])
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update([self.flow])
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update([self.flow])
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove([self.flow])
def put(self, flow_id):
flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "content":
request.text = v
else:
raise APIError(400, "Unknown update request.{}: {}".format(k, v))
elif a == "response" and hasattr(flow, "response"):
response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "content":
response.text = v
else:
raise APIError(400, "Unknown update response.{}: {}".format(k, v))
else:
raise APIError(400, "Unknown update {}: {}".format(a, b))
except APIError:
flow.revert()
raise
self.view.update([flow])
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add([f])
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update([self.flow])
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.view.update([self.flow])
try:
self.master.commands.call("replay.client", [self.flow])
except exceptions.ReplayException as e:
raise APIError(400, str(e))
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update([self.flow])
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.raw_content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search('filename=([-\w" .()]+)', original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.raw_content)
class FlowContentView(RequestHandler):
def get(self, flow_id, message, content_view):
message = getattr(self.flow, message)
description, lines, error = contentviews.get_message_content_view(
content_view.replace('_', ' '), message
)
# if error:
# add event log
self.write(dict(
lines=list(lines),
description=description
))
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Settings(RequestHandler):
def get(self):
self.write(dict(
version=version.VERSION,
mode=str(self.master.options.mode),
intercept_active=self.master.options.intercept_active,
intercept=self.master.options.intercept,
showhost=self.master.options.showhost,
upstream_cert=self.master.options.upstream_cert,
rawtcp=self.master.options.rawtcp,
http2=self.master.options.http2,
websocket=self.master.options.websocket,
anticache=self.master.options.anticache,
anticomp=self.master.options.anticomp,
stickyauth=self.master.options.stickyauth,
stickycookie=self.master.options.stickycookie,
stream=self.master.options.stream_large_bodies,
contentViews=[v.name.replace(' ', '_') for v in contentviews.views],
listen_host=self.master.options.listen_host,
listen_port=self.master.options.listen_port,
server=self.master.options.server,
))
def put(self):
update = self.json
option_whitelist = {
"intercept", "showhost", "upstream_cert",
"rawtcp", "http2", "websocket", "anticache", "anticomp",
"stickycookie", "stickyauth", "stream_large_bodies"
}
for k in update:
if k not in option_whitelist:
raise APIError(400, "Unknown setting {}".format(k))
self.master.options.update(**update)
class Options(RequestHandler):
def get(self):
self.write(optmanager.dump_dicts(self.master.options))
def put(self):
update = self.json
try:
self.master.options.update(**update)
except Exception as err:
raise APIError(400, "{}".format(err))
class SaveOptions(RequestHandler):
def post(self):
# try:
# optmanager.save(self.master.options, CONFIG_PATH, True)
# except Exception as err:
# raise APIError(400, "{}".format(err))
pass
class DnsRebind(RequestHandler):
def get(self):
raise tornado.web.HTTPError(
403,
reason="To protect against DNS rebinding, mitmweb can only be accessed by IP at the moment. "
"(https://github.com/mitmproxy/mitmproxy/issues/3234)"
)
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
super().__init__(
default_host="dns-rebind-protection",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
autoreload=False,
)
self.add_handlers("dns-rebind-protection", [(r"/.*", DnsRebind)])
self.add_handlers(
# make mitmweb accessible by IP only to prevent DNS rebinding.
r'^(localhost|[0-9.:\[\]]+)$',
[
(r"/", IndexHandler),
(r"/filter-help(?:\.json)?", FilterHelp),
(r"/updates", ClientConnection),
(r"/events(?:\.json)?", Events),
(r"/flows(?:\.json)?", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content.data", FlowContent),
(
r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\-\_]+)(?:\.json)?",
FlowContentView),
(r"/settings(?:\.json)?", Settings),
(r"/clear", ClearAll),
(r"/options(?:\.json)?", Options),
(r"/options/save", SaveOptions)
]
)
| |
# -*- coding: utf-8 -*-
import datetime
from time import sleep
from lettuce import *
from django.utils.datastructures import SortedDict
from rapidsms.contrib.locations.models import *
from survey.features.page_objects.aggregates import AggregateStatusPage, DownloadExcelPage, InvestigatorReportPage
from survey.features.page_objects.survey_completion_rates import SurveyCompletionRatesPage
from survey.models import Survey, EnumerationArea, HouseholdMemberGroup
from survey.models.batch import Batch
from survey.models.households import Household, HouseholdMember
from survey.models.investigator import Investigator
from survey import investigator_configs
@step(u'And I have 2 batches with one open')
def and_i_have_2_batches_with_one_open(step):
world.batch_1 = Batch.objects.create(order=1, name="Batch A", survey=world.survey_1)
world.batch_2 = Batch.objects.create(order=2, name="Batch B", survey=world.survey_2)
world.kampala_county = Location.objects.get(name="Kampala County")
world.someother_county = Location.objects.create(name="Some County", tree_parent=world.kampala_county.tree_parent)
world.batch_1.open_for_location(world.kampala_county.tree_parent)
@step(u'And I have eas in the lowest location')
def and_i_have_eas_in_the_lowest_location(step):
world.ea = EnumerationArea.objects.create(name="EA", survey=world.survey_1)
world.ea.locations.add(world.kampala_village)
@step(u'And one household has completed that open batch')
def and_one_household_has_completed_that_open_batch(step):
world.household_1.completed_batches.get_or_create(batch=world.batch_1)
@step(u'And I visit aggregate status page')
def and_i_visit_aggregate_status_page(step):
world.page = AggregateStatusPage(world.browser)
world.page.visit()
@step(u'Then I should see an option to select location hierarchically')
def then_i_should_see_an_option_to_select_location_hierarchically(step):
world.page.choose_location({'district': 'Kampala', 'county': 'Kampala County'})
@step(u'And I should see an option to select batch')
def and_i_should_see_an_option_to_select_batch(step):
world.page.check_if_batches_present([world.batch_1])
@step(u'And I should see a get status button')
def and_i_should_see_a_get_status_button(step):
world.page.check_get_status_button_presence()
@step(u'And I have 2 investigators with households')
def and_i_have_2_investigators_with_households(step):
investigator = Investigator.objects.create(name="Rajini", mobile_number="123", location=world.kampala_county)
investigator_2 = Investigator.objects.create(name="Batman", mobile_number="1234", location=world.someother_county)
uid_counter = 0
for index in range(investigator_configs.NUMBER_OF_HOUSEHOLD_PER_INVESTIGATOR):
Household.objects.create(investigator = investigator, uid=uid_counter+index)
Household.objects.create(investigator = investigator_2, uid=uid_counter+1+index)
uid_counter = uid_counter + 2
world.investigator = investigator
world.investigator_2 = investigator_2
@step(u'And I choose a location and an open batch')
def and_i_choose_a_location_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = 'Kampala'
locations['county'] = 'Kampala County'
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
@step(u'And I change my mind to select all districts')
def and_i_change_my_mind_to_select_all_districts(step):
world.page.select_all_district()
@step(u'And I click get status button')
def and_i_click_get_status_button(step):
world.page.submit()
@step(u'And I should see all districts as location selected')
def and_i_should_see_all_districts_location_selected(step):
world.page.see_all_districts_location_selected()
@step(u'Then I should see number of households and clusters completed and pending')
def then_i_should_see_number_of_households_and_clusters_completed_and_pending(step):
world.page.assert_status_count(pending_households=20, completed_housesholds=0, pending_clusters=2, completed_clusters=0)
@step(u'And I should see a list of investigators with corresponding phone numbers and pending households')
def and_i_should_see_a_list_of_investigators_with_corresponding_phone_numbers_and_pending_households(step):
world.page.check_presence_of_investigators(world.investigator, world.investigator_2)
@step(u'And I choose a location and a closed batch')
def and_i_choose_a_location_and_a_closed_batch(step):
world.page.choose_location({'district': 'Kampala'})
world.page.choose_batch(world.batch_2)
@step(u'And I should see a message that says that this batch is currently closed')
def and_i_should_see_a_message_that_says_that_this_batch_is_currently_closed(step):
world.page.assert_presence_of_batch_is_closed_message()
@step(u'And I visit download excel page')
def and_i_visit_download_excel_page(step):
world.page = DownloadExcelPage(world.browser)
world.page.visit()
@step(u'And I visit district aggregate page')
def and_i_visit_district_aggregate_page(step):
world.page = SurveyCompletionRatesPage(world.browser)
world.page.visit()
@step(u'Then I should see a table for completion rates')
def then_i_should_see_a_table_for_completion_rates(step):
world.page.see_completion_rates_table()
@step(u'And I should see descendants in the table')
def and_i_should_see_descendants_in_the_table(step):
world.page.is_text_present(world.kampala_subcounty.name)
@step(u'When I click on descendant name')
def when_i_click_on_descendant_name(step):
world.page.click_link_by_text(world.kampala_subcounty.name)
@step(u'Then I should see status page for that location')
def then_i_should_see_status_page_for_that_location(step):
world.page.see_completion_rates_table()
world.page.is_text_present(world.kampala_parish.name)
@step(u'And I choose ea and an open batch')
def and_i_choose_ea_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = world.kampala_district.name
locations['county'] = world.kampala_county.name
locations['subcounty'] = world.kampala_subcounty.name
locations['parish'] = world.kampala_parish.name
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
world.page.choose_ea(world.ea)
@step(u'Then I should see a table for household completion rates')
def then_i_should_see_a_table_for_household_completion_rates(step):
world.page.see_houdehold_completion_table()
@step(u'And I should see household details text')
def and_i_should_see_household_details_text(step):
world.page.is_text_present("Survey Completion by household in %s EA" % world.ea.name)
world.page.is_text_present("%s" % world.household_1.uid)
world.page.is_text_present("%s" % world.household_1.household_member.all().count())
@step(u'And I should see investigator details text')
def and_i_should_see_investigator_details_text(step):
world.page.is_text_present('Investigator: %s(%s)' % (world.investigator.name, world.investigator.mobile_number))
@step(u'And I have an investigator and households')
def and_i_have_an_investigator_and_households(step):
world.batch = Batch.objects.create(survey=world.survey_1, name="Haha")
world.investigator = Investigator.objects.create(name="some_investigator", mobile_number="123456784", ea=world.ea)
world.household_1 = Household.objects.create(investigator=world.investigator, uid=101, ea=world.ea, survey=world.survey_1)
world.household_2 = Household.objects.create(investigator=world.investigator, uid=102, ea=world.ea, survey=world.survey_1)
world.member_2 = HouseholdMember.objects.create(household=world.household_2,
date_of_birth=datetime.datetime(2000, 02, 02))
@step(u'And I should see percent completion')
def and_i_should_see_percent_completion(step):
world.page.is_text_present('Percent Completion: 50')
@step(u'And I have 2 surveys with one batch each')
def and_i_have_2_surveys_with_one_batch_each(step):
world.batch_1 = Batch.objects.create(name='batch1', order=1, survey=world.survey_1)
world.batch_2 = Batch.objects.create(name='batch2', order=1, survey=world.survey_2)
@step(u'When I select survey 2 from survey list')
def when_i_select_survey_2_from_survey_list(step):
world.page.select('survey',[world.survey_2.id])
@step(u'Then I should see batch2 in batch list')
def then_i_should_see_batch2_in_batch_list(step):
world.page.see_select_option([world.batch_2.name], 'batch')
@step(u'And I should not see batch1 in batch list')
def and_i_should_not_see_batch1_in_batch_list(step):
world.page.option_not_present([world.batch_1.name], 'batch')
@step(u'When I select survey 1 from survey list')
def when_i_select_survey_1_from_survey_list(step):
world.page.select('survey', [world.survey_1.id])
@step(u'Then I should see batch1 in batch list')
def then_i_should_see_batch1_in_batch_list(step):
world.page.see_select_option([world.batch_1.name], 'batch')
@step(u'And I should not see batch2 in batch list')
def and_i_should_not_see_batch2_in_batch_list(step):
world.page.option_not_present([world.batch_2.name], 'batch')
@step(u'And I should see title message')
def and_i_should_see_title_message(step):
world.page.is_text_present('Survey Completion by Region/District')
@step(u'When I visit investigator report page')
def when_i_visit_investigator_report_page(step):
world.page = InvestigatorReportPage(world.browser)
world.page.visit()
@step(u'Then I should see title-text message')
def then_i_should_see_title_text_message(step):
world.page.is_text_present('Choose survey to get investigators who completed the survey')
@step(u'And I should see dropdown with two surveys')
def and_i_should_see_dropdown_with_two_surveys(step):
world.page.see_select_option([world.survey_1.name, world.survey_2.name], 'survey')
@step(u'And I should see generate report button')
def and_i_should_see_generate_report_button(step):
assert world.browser.find_by_css("#download-investigator-form")[0].find_by_tag('button')[0].text == "Generate Report"
@step(u'And I have 100 locations')
def and_i_have_100_locations(step):
country = LocationType.objects.create(name="Country", slug="country")
district = LocationType.objects.create(name="District", slug="district")
world.uganda = Location.objects.create(name="uganda", type=country)
for i in xrange(100):
Location.objects.create(name="name"+str(i), tree_parent=world.uganda, type=district)
@step(u'Then I should see district completion table paginated')
def then_i_should_see_district_completion_table_paginated(step):
world.page.validate_pagination()
@step(u'And I have one batch open in those locations')
def and_i_have_one_batch_open_in_those_locations(step):
world.batch_12 = Batch.objects.create(order=12, name="Batch A", survey=world.survey_1)
world.batch_12.open_for_location(world.uganda)
@step(u'When I select one of the survey')
def when_i_select_one_of_the_survey(step):
world.page.see_select_option([world.survey_1.name, world.survey_2.name], 'survey')
@step(u'Then I should batches in that survey')
def then_i_should_batches_in_that_survey(step):
world.page.validate_select_option(world.batch_1)
@step(u'And I click generate report button')
def and_i_click_generate_report_button(step):
world.page.find_by_css("#generate_report", "Generate Report")
@step(u'And I have three surveys')
def and_i_have_three_surveys(step):
world.survey_1 = Survey.objects.create(name="Haha Survey")
world.survey_2 = Survey.objects.create(name="Hoho Survey")
@step(u'And I have batches in those surveys')
def and_i_have_batches_in_those_surveys(step):
world.batch_1 = Batch.objects.create(order=1, name="Batch A haha", survey=world.survey_1)
world.batch_2 = Batch.objects.create(order=2, name="Batch A hoho", survey=world.survey_2)
@step(u'Then I should only see the batches in that survey')
def then_i_should_only_see_the_batches_in_that_survey(step):
world.page.see_select_option(['All', str(world.batch_2.name)], 'batch')
@step(u'When I choose a batch in that survey')
def when_i_choose_a_batch_in_that_survey(step):
world.page.select('batch', [world.batch_2.id])
@step(u'Then I should be able to export the responses for that batch')
def then_i_should_be_able_to_export_the_responses_for_that_batch(step):
world.page.find_by_css("#export_excel", "Export to spreadsheet")
@step(u'When I select one of the two surveys')
def when_i_select_one_of_the_two_surveys(step):
world.page.select('survey', [str(world.survey_2.id)])
@step(u'And I have general member group')
def and_i_have_general_member_group(step):
HouseholdMemberGroup.objects.create(order=1, name="GENERAL")
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
@parameterized.named_parameters(('32_bit_index', dtypes.int32),
('64_bit_index', dtypes.int64))
class ReduceOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _testReduction(self,
tf_reduce_fn,
np_reduce_fn,
dtype,
test_inputs,
index_dtype,
rtol=1e-4,
atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(index_dtype)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=0), rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
REAL_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
COMPLEX_DATA = [
np.zeros(shape=(2, 0)).astype(np.complex64),
np.zeros(shape=(0, 30)).astype(np.complex64),
np.arange(1, 13, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-14, -2, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-4, 8, dtype=np.float32).view(np.complex64).reshape(2, 3),
]
NONEMPTY_REAL_DATA = [x for x in REAL_DATA if np.size(x) > 0]
NONEMPTY_COMPLEX_DATA = [x for x in COMPLEX_DATA if np.size(x) > 0]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
ONES = [np.ones([34000, 2])]
def testReduceSumF32(self, index_dtype):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32, self.REAL_DATA,
index_dtype)
def testReduceSumC64(self, index_dtype):
self._testReduction(math_ops.reduce_sum, np.sum, np.complex64,
self.COMPLEX_DATA, index_dtype)
def testReduceProdF32(self, index_dtype):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.REAL_DATA, index_dtype)
def testReduceProdC64(self, index_dtype):
self._testReduction(math_ops.reduce_prod, np.prod, np.complex64,
self.COMPLEX_DATA, index_dtype)
def testReduceMin(self, index_dtype):
def reference_min(dtype, inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
if np.issubdtype(dtype, np.floating):
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
np.iinfo(dtype).max)
return np.amin(inp, axis)
for dtype in set(self.all_types).intersection(
[np.float32, np.int32, np.int64]):
self._testReduction(math_ops.reduce_min,
functools.partial(reference_min, dtype), dtype,
self.REAL_DATA, index_dtype)
def testReduceMax(self, index_dtype):
def reference_max(dtype, inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
if np.issubdtype(dtype, np.floating):
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
float('-inf'))
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
np.iinfo(dtype).min)
return np.amax(inp, axis)
for dtype in set(self.all_types).intersection(
[np.float32, np.int32, np.int64]):
self._testReduction(math_ops.reduce_max,
functools.partial(reference_max, dtype), dtype,
self.REAL_DATA, index_dtype)
def testReduceMeanF32(self, index_dtype):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_REAL_DATA, index_dtype)
def testReduceMeanF16(self, index_dtype):
if np.float16 in self.all_types:
self._testReduction(math_ops.reduce_mean, np.mean, np.float16, self.ONES,
index_dtype)
def testReduceMeanC64(self, index_dtype):
self._testReduction(math_ops.reduce_mean, np.mean, np.complex64,
self.NONEMPTY_COMPLEX_DATA, index_dtype)
def testReduceAll(self, index_dtype):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA,
index_dtype)
def testReduceAny(self, index_dtype):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA,
index_dtype)
class ReduceOpPrecisionTest(xla_test.XLATestCase):
def _testReduceSum(self,
expected_result,
dtype,
test_inputs,
rtol=1e-3,
atol=1e-4):
"""Tests reduce sum on a list of input arrays.
For each array in test_inputs, check that performing reduce sum on the array
produces a value that is close to the expected result.
Args:
expected_result: the expected result.
dtype: the data type of the reduce sum operation.
test_inputs: a list of input arrays for the reduce sum operation.
rtol: the relative error.
atol: the absolute error.
"""
for test_input in test_inputs:
with self.session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = math_ops.reduce_sum(a, index)
result = sess.run(out, {
a: np.array(test_input, dtype=dtype),
index: [0]
})
# Compare the results using float32 type.
self.assertAllClose(
np.float32(result),
np.float32(expected_result),
rtol=rtol,
atol=atol)
def testReduceSumF16(self):
"""Tests the reduce sum of float16 doesn't lose too much precision."""
if np.float16 not in self.all_types:
return
f16_max = np.finfo(np.float16).max
self._testReduceSum(
f16_max, np.float16,
itertools.permutations([f16_max, f16_max, f16_max * (-1.0)], 3))
def testReduceSumBF16(self):
"""Tests the reduce sum of bfloat16 doesn't lose too much precision."""
if dtypes.bfloat16.as_numpy_dtype not in self.all_types:
return
bf16_max = np.float32(dtypes.bfloat16.max)
f32_max = dtypes.float32.max
value = min(bf16_max, f32_max - bf16_max) / 2
self._testReduceSum(
dtypes.bfloat16.as_numpy_dtype(value), dtypes.bfloat16.as_numpy_dtype,
itertools.permutations([bf16_max, value, bf16_max * (-1.0)], 3))
if __name__ == '__main__':
googletest.main()
| |
from __future__ import unicode_literals
import random
import re
import six
EC2_RESOURCE_TO_PREFIX = {
'customer-gateway': 'cgw',
'dhcp-options': 'dopt',
'image': 'ami',
'instance': 'i',
'internet-gateway': 'igw',
'network-acl': 'acl',
'network-interface': 'eni',
'network-interface-attachment': 'eni-attach',
'reserved-instance': 'uuid4',
'route-table': 'rtb',
'route-table-association': 'rtbassoc',
'security-group': 'sg',
'snapshot': 'snap',
'spot-instance-request': 'sir',
'subnet': 'subnet',
'reservation': 'r',
'volume': 'vol',
'vpc': 'vpc',
'vpc-elastic-ip': 'eipalloc',
'vpc-elastic-ip-association': 'eipassoc',
'vpc-peering-connection': 'pcx',
'vpn-connection': 'vpn',
'vpn-gateway': 'vgw'}
EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items())
def random_id(prefix=''):
size = 8
chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f']
resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size))
return '{0}-{1}'.format(prefix, resource_id)
def random_ami_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['image'])
def random_instance_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['instance'])
def random_reservation_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['reservation'])
def random_security_group_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['security-group'])
def random_snapshot_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['snapshot'])
def random_spot_request_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['spot-instance-request'])
def random_subnet_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['subnet'])
def random_subnet_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['route-table-association'])
def random_volume_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['volume'])
def random_vpc_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc'])
def random_vpc_peering_connection_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-peering-connection'])
def random_eip_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-elastic-ip-association'])
def random_internet_gateway_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['internet-gateway'])
def random_route_table_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['route-table'])
def random_eip_allocation_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-elastic-ip'])
def random_dhcp_option_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['dhcp-options'])
def random_eni_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['network-interface'])
def random_eni_attach_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['network-interface-attachment'])
def random_public_ip():
return '54.214.{0}.{1}'.format(random.choice(range(255)),
random.choice(range(255)))
def random_ip():
return "127.{0}.{1}.{2}".format(
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
def generate_route_id(route_table_id, cidr_block):
return "%s~%s" % (route_table_id, cidr_block)
def split_route_id(route_id):
values = string.split(route_id, '~')
return values[0], values[1]
def instance_ids_from_querystring(querystring_dict):
instance_ids = []
for key, value in querystring_dict.items():
if 'InstanceId' in key:
instance_ids.append(value[0])
return instance_ids
def image_ids_from_querystring(querystring_dict):
image_ids = []
for key, value in querystring_dict.items():
if 'ImageId' in key:
image_ids.append(value[0])
return image_ids
def route_table_ids_from_querystring(querystring_dict):
route_table_ids = []
for key, value in querystring_dict.items():
if 'RouteTableId' in key:
route_table_ids.append(value[0])
return route_table_ids
def vpc_ids_from_querystring(querystring_dict):
vpc_ids = []
for key, value in querystring_dict.items():
if 'VpcId' in key:
vpc_ids.append(value[0])
return vpc_ids
def sequence_from_querystring(parameter, querystring_dict):
parameter_values = []
for key, value in querystring_dict.items():
if parameter in key:
parameter_values.append(value[0])
return parameter_values
def tags_from_query_string(querystring_dict):
prefix = 'Tag'
suffix = 'Key'
response_values = {}
for key, value in querystring_dict.items():
if key.startswith(prefix) and key.endswith(suffix):
tag_index = key.replace(prefix + ".", "").replace("." + suffix, "")
tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0]
tag_value_key = "Tag.{0}.Value".format(tag_index)
if tag_value_key in querystring_dict:
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
else:
response_values[tag_key] = None
return response_values
def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration'):
"""
turn:
{u'AWSAccessKeyId': [u'the_key'],
u'Action': [u'CreateDhcpOptions'],
u'DhcpConfiguration.1.Key': [u'domain-name'],
u'DhcpConfiguration.1.Value.1': [u'example.com'],
u'DhcpConfiguration.2.Key': [u'domain-name-servers'],
u'DhcpConfiguration.2.Value.1': [u'10.0.0.6'],
u'DhcpConfiguration.2.Value.2': [u'10.0.0.7'],
u'Signature': [u'uUMHYOoLM6r+sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4='],
u'SignatureMethod': [u'HmacSHA256'],
u'SignatureVersion': [u'2'],
u'Timestamp': [u'2014-03-18T21:54:01Z'],
u'Version': [u'2013-10-15']}
into:
{u'domain-name': [u'example.com'], u'domain-name-servers': [u'10.0.0.6', u'10.0.0.7']}
"""
key_needle = re.compile(u'{0}.[0-9]+.Key'.format(option), re.UNICODE)
response_values = {}
for key, value in querystring.items():
if key_needle.match(key):
values = []
key_index = key.split(".")[1]
value_index = 1
while True:
value_key = u'{0}.{1}.Value.{2}'.format(option, key_index, value_index)
if value_key in querystring:
values.extend(querystring[value_key])
else:
break
value_index += 1
response_values[value[0]] = values
return response_values
def optional_from_querystring(parameter, querystring):
parameter_array = querystring.get(parameter)
return parameter_array[0] if parameter_array else None
def filters_from_querystring(querystring_dict):
response_values = {}
for key, value in querystring_dict.items():
match = re.search(r"Filter.(\d).Name", key)
if match:
filter_index = match.groups()[0]
value_prefix = "Filter.{0}.Value".format(filter_index)
filter_values = [filter_value[0] for filter_key, filter_value in querystring_dict.items() if
filter_key.startswith(value_prefix)]
response_values[value[0]] = filter_values
return response_values
def dict_from_querystring(parameter, querystring_dict):
use_dict = {}
for key, value in querystring_dict.items():
match = re.search(r"{0}.(\d).(\w+)".format(parameter), key)
if match:
use_dict_index = match.groups()[0]
use_dict_element_property = match.groups()[1]
if not use_dict.get(use_dict_index):
use_dict[use_dict_index] = {}
use_dict[use_dict_index][use_dict_element_property] = value[0]
return use_dict
def keypair_names_from_querystring(querystring_dict):
keypair_names = []
for key, value in querystring_dict.items():
if 'KeyName' in key:
keypair_names.append(value[0])
return keypair_names
filter_dict_attribute_mapping = {
'instance-state-name': 'state',
'instance-id': 'id',
'state-reason-code': '_state_reason.code',
}
def get_instance_value(instance, instance_attr):
keys = instance_attr.split('.')
val = instance
for key in keys:
if hasattr(val, key):
val = getattr(val, key)
elif isinstance(val, dict):
val = val[key]
else:
return None
return val
def passes_filter_dict(instance, filter_dict):
for filter_name, filter_values in filter_dict.items():
if filter_name in filter_dict_attribute_mapping:
instance_attr = filter_dict_attribute_mapping[filter_name]
instance_value = get_instance_value(instance, instance_attr)
if instance_value not in filter_values:
return False
elif filter_name.startswith('tag:'):
tags = dict((tag['key'], tag['value']) for tag in instance.get_tags())
tag_name = filter_name.replace('tag:', '', 1)
tag_value = tags.get(tag_name)
if tag_value not in filter_values:
return False
else:
raise NotImplementedError(
"Filter dicts have not been implemented in Moto for '%s' yet. Feel free to open an issue at https://github.com/spulec/moto/issues",
filter_name)
return True
def filter_reservations(reservations, filter_dict):
result = []
for reservation in reservations:
new_instances = []
for instance in reservation.instances:
if passes_filter_dict(instance, filter_dict):
new_instances.append(instance)
if new_instances:
reservation.instances = new_instances
result.append(reservation)
return result
def is_filter_matching(obj, filter, filter_value):
value = obj.get_filter_value(filter)
if isinstance(value, six.string_types):
return value in filter_value
try:
value = set(value)
return (value and value.issubset(filter_value)) or value.issuperset(filter_value)
except TypeError:
return value in filter_value
def generic_filter(filters, objects):
if filters:
for (_filter, _filter_value) in filters.items():
objects = [obj for obj in objects if is_filter_matching(obj, _filter, _filter_value)]
return objects
def simple_aws_filter_to_re(filter_string):
import fnmatch
tmp_filter = filter_string.replace('\?','[?]')
tmp_filter = tmp_filter.replace('\*','[*]')
tmp_filter = fnmatch.translate(tmp_filter)
return tmp_filter
# not really random ( http://xkcd.com/221/ )
def random_key_pair():
return {
'fingerprint': ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d:37:2d:'
'7d:b8:ca:9f:f5:f1:6f'),
'material': """---- BEGIN RSA PRIVATE KEY ----
MIICiTCCAfICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMC
VVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6
b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAd
BgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcN
MTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYD
VQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25z
b2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFt
YXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ
21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9T
rDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpE
Ibb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4
nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0Fkb
FFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTb
NYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE
-----END RSA PRIVATE KEY-----"""
}
def get_prefix(resource_id):
resource_id_prefix, separator, after = resource_id.partition('-')
if resource_id_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']:
if after.startswith('attach'):
resource_id_prefix = EC2_RESOURCE_TO_PREFIX['network-interface-attachment']
if not resource_id_prefix in EC2_RESOURCE_TO_PREFIX.values():
uuid4hex = re.compile('[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I)
if uuid4hex.match(resource_id) is not None:
resource_id_prefix = EC2_RESOURCE_TO_PREFIX['reserved-instance']
else:
return None
return resource_id_prefix
def is_valid_resource_id(resource_id):
valid_prefixes = EC2_RESOURCE_TO_PREFIX.values()
resource_id_prefix = get_prefix(resource_id)
if not resource_id_prefix in valid_prefixes:
return False
resource_id_pattern = resource_id_prefix + '-[0-9a-f]{8}'
resource_pattern_re = re.compile(resource_id_pattern)
return resource_pattern_re.match(resource_id) is not None
def is_valid_cidr(cird):
cidr_pattern = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(\d|[1-2]\d|3[0-2]))$'
cidr_pattern_re = re.compile(cidr_pattern)
return cidr_pattern_re.match(cird) is not None
| |
# Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.volume.drivers.dell import dell_storagecenter_iscsi
from cinder.volume import volume_types
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanISCSIDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
SCRPLAYPROFILE = {u'ruleCount': 0,
u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'volumeCount': 0,
u'scName': u'Storage Center 64702',
u'notes': u'Created by Dell Cinder Driver',
u'scSerialNumber': 64702,
u'userCreated': True,
u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'instanceId': u'64702.11',
u'enforceReplayCreationTimeout': False,
u'replayCreationTimeout': 20,
u'objectType': u'ScReplayProfile',
u'type': u'Consistent',
u'expireIncompleteReplaySets': True}
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
ISCSI_PROPERTIES = {'access_mode': 'rw',
'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe44'],
'target_lun': 1,
'target_luns': [1, 1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260',
u'192.168.0.22:3260']}
def setUp(self):
super(DellSCSanISCSIDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
self.driver = dell_storagecenter_iscsi.DellStorageCenterISCSIDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'iSCSI'}
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
self.connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost'}
self.connector_multipath = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost',
'multipath': True}
self.access_record_output = [
"ID Initiator Ipaddress AuthMethod UserName Apply-To",
"--- --------------- ------------- ---------- ---------- --------",
"1 iqn.1993-08.org.debian:01:222 *.*.*.* none both",
" 7dab76162"]
self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001'
self.properties = {
'target_discoverd': True,
'target_portal': '%s:3260'
% self.driver.configuration.dell_sc_iscsi_ip,
'target_iqn': self.fake_iqn,
'volume_id': 1}
self._model_update = {
'provider_location': "%s:3260,1 %s 0"
% (self.driver.configuration.dell_sc_iscsi_ip,
self.fake_iqn)
# ,
# 'provider_auth': 'CHAP %s %s' % (
# self.configuration.eqlx_chap_login,
# self.configuration.eqlx_chap_password)
}
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume_consistency_group(self,
mock_find_sc,
mock_create_volume,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1,
'consistencygroup_id': 'guid'}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
None)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype:storageprofile': 'HighPriority'})
def test_create_volume_storage_profile(self,
mock_extra,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'}
self.driver.create_volume(volume)
mock_create_volume.assert_called_once_with(self.volume_name,
1,
"HighPriority")
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_create_volume_failure(self,
mock_find_sc,
mock_create_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.driver.delete_volume(volume)
mock_delete_volume.assert_called_once_with(self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_volume',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_delete_volume_failure(self,
mock_find_sc,
mock_delete_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name, 'size': 1}
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume,
volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
data = self.driver.initialize_connection(volume, connector)
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
assert mock_find_volume.call_count == 2
expected = {'data': self.ISCSI_PROPERTIES,
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_multi_path(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where connection is multipath
volume = {'id': self.volume_name}
connector = self.connector_multipath
data = self.driver.initialize_connection(volume, connector)
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_any_call(self.volume_name)
assert mock_find_volume.call_count == 2
props = self.ISCSI_PROPERTIES
expected = {'data': props,
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, data, 'Unexpected return value')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_no_iqn(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
mock_find_iscsi_properties.side_effect = Exception('abc')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_no_server(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=None)
def test_initialize_connection_vol_not_found(self,
mock_find_iscsi_properties,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties',
return_value=ISCSI_PROPERTIES)
def test_initialize_connection_map_vol_fail(self,
mock_find_iscsi_props,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': self.volume_name}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_server(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
def test_terminate_connection_no_volume(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=False)
def test_terminate_connection_failure(self,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name}
connector = {'initiator': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value='fake')
def test_create_snapshot(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.driver.create_snapshot(snapshot)
self.assertEqual('available', snapshot['status'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_no_volume(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=None)
def test_create_snapshot_failure(self,
mock_create_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
'fake')
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertFalse(mock_find_replay_profile.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot_cg(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake', 'consistencygroup_id': 'guid'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.driver.create_volume_from_snapshot(volume, snapshot)
mock_create_view_volume.assert_called_once_with('fake',
'fake')
self.assertTrue(mock_find_replay.called)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=None)
def test_create_volume_from_snapshot_failed(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume, snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
def test_create_volume_from_snapshot_no_replay(self,
mock_create_view_volume,
mock_find_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'fake'}
snapshot = {'id': 'fake', 'volume_id': 'fake'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume, snapshot)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay.called)
self.assertFalse(mock_create_view_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone'}
src_vref = {'id': self.volume_name}
self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
self.VOLUME)
self.assertTrue(mock_find_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value='fake')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume_consistency_group(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_update_cg_volumes,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone',
'consistencygroup_id': 'guid'}
src_vref = {'id': self.volume_name}
self.driver.create_cloned_volume(volume, src_vref)
mock_create_cloned_volume.assert_called_once_with(
self.volume_name + '_clone',
self.VOLUME)
self.assertTrue(mock_find_volume.called)
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_update_cg_volumes.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_cloned_volume',
return_value=VOLUME)
def test_create_cloned_volume_no_volume(self,
mock_create_cloned_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': self.volume_name + '_clone'}
src_vref = {'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
volume, src_vref)
self.assertTrue(mock_find_volume.called)
self.assertFalse(mock_create_cloned_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay',
return_value=True)
def test_delete_snapshot(self,
mock_delete_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.driver.delete_snapshot(snapshot)
mock_delete_replay.assert_called_once_with(
self.VOLUME, self.volume_name)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay',
return_value=True)
def test_delete_snapshot_no_volume(self,
mock_delete_replay,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
snapshot = {'volume_id': self.volume_name,
'id': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
snapshot)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
def test_ensure_export(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.driver.ensure_export(context, volume)
mock_find_volume.assert_called_once_with(
self.VOLUME.get(u'name'))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_ensure_export_failed(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.ensure_export,
context, volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_ensure_export_no_volume(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
volume = {'id': self.VOLUME.get(u'name')}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.ensure_export,
context,
volume)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'expand_volume',
return_value=VOLUME)
def test_extend_volume(self,
mock_expand_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name, 'size': 1}
new_size = 2
self.driver.extend_volume(volume, new_size)
mock_expand_volume.assert_called_once_with(self.VOLUME, new_size)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'expand_volume',
return_value=None)
def test_extend_volume_no_volume(self,
mock_expand_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'name': self.volume_name, 'size': 1}
new_size = 2
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
volume, new_size)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_update_volume_stats_with_refresh(self,
mock_get_storage_usage,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(True)
self.assertEqual('iSCSI', stats['storage_protocol'])
mock_get_storage_usage.called_once_with(64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=64702)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_get_volume_stats_no_refresh(self,
mock_get_storage_usage,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(False)
self.assertEqual('iSCSI', stats['storage_protocol'])
assert mock_get_storage_usage.called is False
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'rename_volume',
return_value=True)
def test_update_migrated_volume(self,
mock_rename_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': 112}
model_update = {'_name_id': None}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
self.assertEqual(model_update, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'rename_volume',
return_value=False)
def test_update_migrated_volume_rename_fail(self,
mock_rename_volume,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': 112, '_name_id': 113}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_rename_volume.assert_called_once_with(self.VOLUME,
volume['id'])
self.assertEqual({'_name_id': 113}, rt)
def test_update_migrated_volume_no_volume_id(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': None}
backend_volume = {'id': 112, '_name_id': 113}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
self.assertEqual({'_name_id': 113}, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
def test_update_migrated_volume_no_backend_id(self,
mock_find_volume,
mock_find_sc,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 111}
backend_volume = {'id': None, '_name_id': None}
rt = self.driver.update_migrated_volume(None, volume, backend_volume,
'available')
mock_find_sc.assert_called_once_with()
mock_find_volume.assert_called_once_with(None)
self.assertEqual({'_name_id': None}, rt)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay_profile',
return_value=SCRPLAYPROFILE)
def test_create_consistencygroup(self,
mock_create_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
self.driver.create_consistencygroup(context, group)
mock_create_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay_profile',
return_value=None)
def test_create_consistencygroup_fail(self,
mock_create_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_consistencygroup, context, group)
mock_create_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'delete_volume')
def test_delete_consistencygroup(self,
mock_delete_volume,
mock_find_replay_profile,
mock_delete_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'status': 'deleted'}
model_update, volumes = self.driver.delete_consistencygroup(context,
group)
mock_find_replay_profile.assert_called_once_with(group['id'])
mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE)
mock_delete_volume.assert_called_once_with(mock_volume)
self.assertEqual(group['status'], model_update['status'])
self.assertEqual(expected_volumes, volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'delete_volume')
def test_delete_consistencygroup_not_found(self,
mock_delete_volume,
mock_find_replay_profile,
mock_delete_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'status': 'deleted'}
model_update, volumes = self.driver.delete_consistencygroup(context,
group)
mock_find_replay_profile.assert_called_once_with(group['id'])
self.assertFalse(mock_delete_replay_profile.called)
mock_delete_volume.assert_called_once_with(mock_volume)
self.assertEqual(group['status'], model_update['status'])
self.assertEqual(expected_volumes, volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_update_consistencygroup(self,
mock_find_replay_profile,
mock_update_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
rt1, rt2, rt3 = self.driver.update_consistencygroup(context,
group,
add_volumes,
remove_volumes)
mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
self.assertIsNone(rt1)
self.assertIsNone(rt2)
self.assertIsNone(rt3)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
def test_update_consistencygroup_not_found(self,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.update_consistencygroup,
context,
group,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_cg_volumes',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_update_consistencygroup_error(self,
mock_find_replay_profile,
mock_update_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
add_volumes = [{'id': '101'}]
remove_volumes = [{'id': '102'}]
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.update_consistencygroup,
context,
group,
add_volumes,
remove_volumes)
mock_find_replay_profile.assert_called_once_with(group['id'])
mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE,
add_volumes,
remove_volumes)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'snap_cg_replay',
return_value={'instanceId': '100'})
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_snap_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
model_update, snapshots = self.driver.create_cgsnapshot(context, cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cggrp['id'],
0)
self.assertEqual('available', model_update['status'])
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
def test_create_cgsnapshot_profile_not_found(self,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
context,
cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'snap_cg_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_create_cgsnapshot_fail(self,
mock_find_replay_profile,
mock_snap_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cgsnapshot,
context,
cggrp)
mock_find_replay_profile.assert_called_once_with(
cggrp['consistencygroup_id'])
mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cggrp['id'],
0)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'deleted'}
model_update, snapshots = self.driver.delete_cgsnapshot(context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cgsnap['id'])
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot_profile_not_found(self,
mock_get_all_for_cgsnapshot,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
mock_snapshot = mock.MagicMock()
expected_snapshots = [mock_snapshot]
mock_get_all_for_cgsnapshot.return_value = (expected_snapshots)
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'deleted'}
model_update, snapshots = self.driver.delete_cgsnapshot(context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
self.assertFalse(mock_delete_cg_replay.called)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual(expected_snapshots, snapshots)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_cg_replay',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=SCRPLAYPROFILE)
def test_delete_cgsnapshot_profile_failed_delete(self,
mock_find_replay_profile,
mock_delete_cg_replay,
mock_close_connection,
mock_open_connection,
mock_init):
context = {}
cgsnap = {'consistencygroup_id':
'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
'id': '100',
'status': 'available'}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_cgsnapshot,
context,
cgsnap)
mock_find_replay_profile.assert_called_once_with(
cgsnap['consistencygroup_id'])
mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE,
cgsnap['id'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing(self,
mock_manage_existing,
mock_close_connection,
mock_open_connection,
mock_init):
# Very little to do in this one. The call is sent
# straight down.
volume = {'id': 'guid'}
existing_ref = {'source-name': 'imavolumename'}
self.driver.manage_existing(volume, existing_ref)
mock_manage_existing.assert_called_once_with(volume['id'],
existing_ref)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'manage_existing')
def test_manage_existing_id(self,
mock_manage_existing,
mock_close_connection,
mock_open_connection,
mock_init):
# Very little to do in this one. The call is sent
# straight down.
volume = {'id': 'guid'}
existing_ref = {'source-id': 'imadeviceid'}
self.driver.manage_existing(volume, existing_ref)
mock_manage_existing.assert_called_once_with(volume['id'],
existing_ref)
def test_manage_existing_bad_ref(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
existing_ref = {'banana-name': 'imavolumename'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
volume,
existing_ref)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_unmanaged_volume_size',
return_value=4)
def test_manage_existing_get_size(self,
mock_get_unmanaged_volume_size,
mock_close_connection,
mock_open_connection,
mock_init):
# Almost nothing to test here. Just that we call our function.
volume = {'id': 'guid'}
existing_ref = {'source-name': 'imavolumename'}
res = self.driver.manage_existing_get_size(volume, existing_ref)
mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref)
# The above is 4GB and change.
self.assertEqual(4, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_unmanaged_volume_size',
return_value=4)
def test_manage_existing_get_size_id(self,
mock_get_unmanaged_volume_size,
mock_close_connection,
mock_open_connection,
mock_init):
# Almost nothing to test here. Just that we call our function.
volume = {'id': 'guid'}
existing_ref = {'source-id': 'imadeviceid'}
res = self.driver.manage_existing_get_size(volume, existing_ref)
mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref)
# The above is 4GB and change.
self.assertEqual(4, res)
def test_manage_existing_get_size_bad_ref(self,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
existing_ref = {'banana-name': 'imavolumename'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume,
existing_ref)
def test_retype_not_extra_specs(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None, {'extra_specs': None}, None)
self.assertFalse(res)
def test_retype_not_storage_profile(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None, {'extra_specs': {'something': 'else'}}, None)
self.assertFalse(res)
def test_retype_same(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, None, None,
{'extra_specs': {'storagetype:storageprofile': ['A', 'A']}},
None)
self.assertTrue(res)
def test_retype_malformed(self,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_common, "LOG")
res = self.driver.retype(
None, None, None,
{'extra_specs': {
'storagetype:storageprofile': ['something',
'not',
'right']}},
None)
self.assertFalse(res)
self.assertEqual(1, LOG.warning.call_count)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmanage')
def test_unmanage(self,
mock_unmanage,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
self.driver.unmanage(volume)
mock_find_volume.assert_called_once_with(volume['id'])
mock_unmanage.assert_called_once_with(self.VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmanage')
def test_unmanage_volume_not_found(self,
mock_unmanage,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': 'guid'}
self.driver.unmanage(volume)
mock_find_volume.assert_called_once_with(volume['id'])
self.assertFalse(mock_unmanage.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'update_storage_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc',
return_value=12345)
def test_retype(self,
mock_find_sc,
mock_find_volume,
mock_update_storage_profile,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.driver.retype(
None, {'id': 'volid'}, None,
{'extra_specs': {'storagetype:storageprofile': ['A', 'B']}},
None)
mock_update_storage_profile.ssert_called_once_with(
self.VOLUME, 'B')
self.assertTrue(res)
| |
import copy
import time
import inspect
from JumpScale import j
from JPackageStateObject import JPackageStateObject
#from JumpScale.core.sync.Sync import SyncLocal
from ActionManager import ActionManager
from CodeManagementRecipe import CodeManagementRecipe
from JumpScale.core.system.fs import FileLock
def JPLock(func):
def wrapper(self, *args, **kwargs):
lockname = "jp_%s_%s" % (self.domain, self.name)
with FileLock(lockname, True):
return func(self, *args, **kwargs)
return wrapper
class JPackageObject():
"""
Data representation of a JPackage, should contain all information contained in the jpackages.cfg
"""
def __init__(self, domain, name, version,instance=0):
"""
Initialization of the JPackage
@param domain: The domain that the JPackage belongs to, can be a string or the DomainObject4
@param name: The name of the JPackage
@param version: The version of the JPackage
"""
self.supportedPlatforms=[]
self.domain=domain
self.name=name
self.version=version
self.instance=None
self.supportedPlatforms=[]
self.tags=[]
self.description=""
#checks on correctness of the parameters
if not domain:
raise ValueError('The domain parameter cannot be empty or None')
if not name:
raise ValueError('The name parameter cannot be empty or None')
if not version:
raise ValueError('The version parameter cannot be empty or None')
self.buildNr=-1
self.taskletsChecksum=""
self.configchanged=False
self.metadata=None
self.dependencies=[] #key = domain_packagename
self.dependenciesNames={}
self.hrd=None
self.hrd_instance=None
self.actions=None
self._clean()
self._init()
########
#create defaults for new jpackages
hrdpath=j.system.fs.joinPaths(self.getPathMetadata(),"hrd","main.hrd")
if not j.system.fs.exists(hrdpath):
self.init()
########
#LOAD INFO FROM REPO
self.hrd=j.core.hrd.getHRD(hrdpath)
self._clear()
self.buildNr = self.hrd.getInt("jp.buildNr")
if self.hrd.exists("jp.process.tcpports"):
self.hrd.delete("jp.process.tcpports")
if self.hrd.exists("jp.process.startuptime"):
self.hrd.delete("jp.process.startuptime")
self.export = self.hrd.getBool("jp.export")
self.autobuild = self.hrd.getBool("jp.autobuild")
self.taskletsChecksum = self.hrd.getStr("jp.taskletschecksum")
self.descrChecksum = self.hrd.getStr("jp.descrchecksum",default="")
self.hrdChecksum = self.hrd.getStr("jp.hrdchecksum",default="")
self.supportedPlatforms = self.hrd.getList("jp.supportedplatforms")
j.packages.getDomainObject(self.domain)
self.blobstorRemote = None
self.blobstorLocal = None
self.actions = None
self._getState()
self.debug=self.state.debugMode
if (self.debug==False or self.debug==0) and self.hrd.exists("jp.debug"):
if int(self.hrd.getStr("jp.debug"))==1:
self.debug=1
#DO NOT SET 0, 0 means we don't count the stat from the hrd
key="%s_%s_%s" % (self.domain,self.name,self.version)
self._activeblobfolder = j.system.fs.joinPaths(j.dirs.cfgDir, "jpackages", "state", key)
self._blobfolder = j.system.fs.joinPaths(self.getPathMetadata(),"files")
# print "**JP:%s"%self
self._loaded=False
def log(self,msg,category="",level=5):
if level<j.packages.loglevel+1 and j.packages.logenable:
j.packages.log("%s:%s"%(self,msg),category=category,level=level)
# @JPLock
def _init(self):
#create defaults for new jpackages
hrddir=j.system.fs.joinPaths(self.getPathMetadata(),"hrd")
#define templates path
extpath=inspect.getfile(self.__init__)
extpath=j.system.fs.getDirName(extpath)
src=j.system.fs.joinPaths(extpath,"templates")
#this is to check if metadata in jpackage dir (on repo) is complete
if not j.system.fs.exists(hrddir):
if self.hrd==None:
content="jp.domain=%s\n"%self.domain
content+="jp.name=%s\n"%self.name
content+="jp.version=%s\n"%self.version
self.hrd=j.core.hrd.getHRD(content=content)
j.system.fs.copyDirTree(src,self.getPathMetadata(), overwriteFiles=False) #do never put this on true
j.system.fs.copyDirTree(src+"/actions/",self.getPathMetadata()+"/actions/", overwriteFiles=False)
#for easy development, overwrite specific implementations
#j.system.fs.copyFile(src+"/actions/process.depcheck.py",self.getPathMetadata()+"/actions/process.depcheck.py")
self.hrd=j.core.hrd.getHRD(path=j.system.fs.joinPaths(hrddir,"main.hrd"))
if self.hrd.getStr("jp.domain")<>self.domain:
try:
self.hrd.set("jp.domain",self.domain)
except:
print "WARNING: domain in jpackage is not same as name of directory."
if self.hrd.getStr("jp.name")<>self.name:
try:
self.hrd.set("jp.name",self.name)
except:
print "WARNING: name in jpackage is not same as name of directory."
if self.hrd.getStr("jp.version")<>self.version:
try:
self.hrd.set("jp.version",self.version)
except:
print "WARNING: version in jpackage is not same as name of directory."
descr=self.hrd.getStr("jp.description")
if descr<>False and descr<>"":
self.description=descr
if descr<>self.description:
self.hrd.set("jp.description",self.description)
self.supportedPlatforms=self.hrd.getList("jp.supportedplatforms")
if self.supportedPlatforms==[]:
self._raiseError("supported platforms cannot be empty")
j.system.fs.createDir(j.system.fs.joinPaths(self.getPathMetadata(),"uploadhistory"))
j.system.fs.createDir(j.system.fs.joinPaths(self.getPathMetadata(),"files"))
for platform in self.supportedPlatforms:
j.system.fs.createDir(self.getPathFilesPlatform(platform))
def _clean(self):
for item in [".quarantine",".tmb"]:
# for item in [".quarantine",".tmb",'actions/code.getRecipe']:
path=j.system.fs.joinPaths(self.getPathMetadata(),item)
# print "remove:%s"%path
j.system.fs.removeDirTree(path)
for item in [".quarantine",".tmb"]:
path=j.system.fs.joinPaths(self.getPathFiles(),item)
j.system.fs.removeDirTree(path)
# print "remove:%s"%path
# j.system.fs.remove("%s/actions/install.download.py"%self.getPathMetadata())
# @JPLock
def load(self,instance=None,force=False,hrddata={},findDefaultInstance=True):
if self._loaded and force==False and (instance is None or instance == self.instance):
return
#TRY AND FIND INSTANCE
if instance==None and findDefaultInstance:
root=j.system.fs.joinPaths(j.dirs.packageDir, "instance", self.domain,self.name)
if j.system.fs.exists(path=root):
instanceNames=j.system.fs.listDirsInDir(root,False,True)
if len(instanceNames)==1:
self.instance=instanceNames[0]
else:
if instance<>None:
root=j.system.fs.joinPaths(j.dirs.packageDir, "instance", self.domain,self.name,instance)
if not j.system.fs.exists(path=root):
j.events.inputerror_critical("Could not find instance '%s' for jpackage %s"%(instance,self),"jpackage.init")
self.instance=instance
if hrddata<>{}:
self._installActiveHrd(hrddata=hrddata)
hrdinstancepath = j.system.fs.joinPaths(self.getPathInstance(),"hrdinstance")
if j.system.fs.exists(hrdinstancepath):
# j.events.inputerror_critical("Cannot load jpackage:%s could not find an instance"%self)
self.hrd_instance=j.core.hrd.getHRD(hrdinstancepath)
self.actions = ActionManager(self)
#WHY WOULD THIS BE NEEDED?
#j.application.loadConfig()
self.loadBlobStores()
# print "loadactionsdone:%s"%self
# ######CHECK IF JP ALREADY INSTALLED
# if self.state.lastinstalledbuildnr>=0:
# #means jp is installed on system
# #because was already installed make sure we create active instance if we can't find the active path yet
# #get rid of past
# oldpath=j.system.fs.joinPaths(j.dirs.packageDir, "instance", self.domain,self.name,self.version)
# if j.system.fs.exists(path=oldpath):
# j.system.fs.removeDirTree(oldpath)
# root=j.system.fs.joinPaths(j.dirs.packageDir, "instance", self.domain,self.name)
# if not j.system.fs.exists(path=root) or len(j.system.fs.listDirsInDir(root,False,True))==0:
# #this is to allow system to keep on running when upgrading from old situation
# self.instance=0
# hrdinstancepath=j.system.fs.joinPaths(self.getPathInstance(),"hrdinstance")
# j.system.fs.createDir(hrdinstancepath)
# self.copyMetadataToActive()
self._loaded=True
return self
# @JPLock
def getCodeMgmtRecipe(self):
self._init()
self.load()
hrdpath=j.system.fs.joinPaths(self.getPathMetadata(),"hrd","code.hrd")
if not j.system.fs.exists(path=hrdpath):
self.init()
recipepath=j.system.fs.joinPaths(self.getPathMetadata(),"coderecipe.cfg")
if not j.system.fs.exists(path=recipepath):
self.init()
return CodeManagementRecipe(hrdpath,recipepath,jp=self)
def _installActiveHrd(self,hrddata={}):
"""
match hrd templates with active ones, add entries where needed
"""
#THE ACTIVATE ONES
hrdtemplatesPath=j.system.fs.joinPaths(self.getPathMetadata(),"hrdactive")
for item in j.system.fs.listFilesInDir(hrdtemplatesPath):
base=j.system.fs.getBaseName(item)
if base[0]<>"_":
templ=j.system.fs.fileGetContents(item)
actbasepath=j.system.fs.joinPaths(j.dirs.hrdDir,base)
if not j.system.fs.exists(actbasepath):
#means there is no hrd, put empty file
self.log("did not find active hrd for %s, will now put there"%actbasepath,category="init")
j.system.fs.writeFile(actbasepath,"")
hrd=j.core.hrd.getHRD(actbasepath)
hrd.checkValidity(templ,hrddata=hrddata)
#########
#now load the ones which are specific per instance
hrdinstancepath=j.system.fs.joinPaths(self.getPathInstance(),"hrdinstance")
j.system.fs.createDir(hrdinstancepath)
hrdtemplatesPath=j.system.fs.joinPaths(self.getPathMetadata(),"hrdinstance")
if j.system.fs.exists(path=hrdtemplatesPath):
for item in j.system.fs.listFilesInDir(hrdtemplatesPath):
base=j.system.fs.getBaseName(item)
if base[0]<>"_":
templ=j.system.fs.fileGetContents(item)
actbasepath=j.system.fs.joinPaths(self.getPathInstance(),"hrdinstance",base)
if not j.system.fs.exists(actbasepath):
#means there is no hrd, put empty file
self.log("did not find instance hrd for %s, will now put there"%actbasepath,category="init")
j.system.fs.writeFile(actbasepath,"")
hrd=j.core.hrd.getHRD(actbasepath)
hrd.checkValidity(templ,hrddata=hrddata)
j.application.loadConfig() #makes sure hrd gets reloaded to application.config object
# @JPLock
def _copyMetadataToActive(self,hrddata={}):
instancepathactions=j.system.fs.joinPaths(self.getPathInstance(),"actions")
if j.system.fs.isDir(instancepathactions):
j.system.fs.removeDirTree(instancepathactions)
j.system.fs.createDir(instancepathactions)
sourcepath=self.getPathMetadata()
for actionname in j.packages.getActionNamesInstance():
srcpath=j.system.fs.joinPaths(sourcepath,"actions","%s.py"%actionname)
destpath=j.system.fs.joinPaths(instancepathactions,"%s.py"%actionname)
j.system.fs.copyFile(srcpath,destpath)
self._installActiveHrd(hrddata=hrddata)
hrdinstancepath=j.system.fs.joinPaths(self.getPathInstance(),"hrdinstance")
self.hrd_instance=j.core.hrd.getHRD(hrdinstancepath)
dir2apply=self.getPathInstance()
#apply apackage hrd data on actions active
self.hrd_instance.applyOnDir(dir2apply)
#make sure params are filled in in actions dir
self.hrd.applyOnDir(dir2apply)
#apply hrd config from system on actions active
j.application.config.applyOnDir(dir2apply)
additionalArgs={}
additionalArgs["jp_instance"]=self.instance
additionalArgs["jp_name"]=self.name
additionalArgs["jp_domain"]=self.domain
additionalArgs["jp_version"]=self.version
j.dirs.replaceFilesDirVars(dir2apply,additionalArgs=additionalArgs)
def loadBlobStores(self):
self._init()
do = j.packages.getDomainObject(self.domain)
if do.blobstorremote.strip() <> "":
self.blobstorRemote = j.clients.blobstor.get(do.blobstorremote)
if do.blobstorlocal.strip() <> "":
self.blobstorLocal = j.clients.blobstor.get(do.blobstorlocal)
if self.blobstorRemote ==None or self.blobstorLocal==None:
self._raiseError("DEBUG NOW blobstorremote or blobstorlocal needs to be available")
def getAppPath(self):
path = self.hrd.getStr("jp.app.path")
if path is None:
j.events.inputerror_critical("Could not find 'jp.app.path' in main hrd of jpackage:%s"%jp)
path = j.dirs.replaceTxtDirVars(path)
if path is None:
j.events.inputerror_critical("Could not find data for 'jp.app.path' in main hrd of jpackage:%s"%jp)
return self.hrd_instance.applyOnContent(path)
def getDebugMode(self):
return self.state.debugMode
def getDebugModeInJpackage(self):
if self.hrd.exists("jp.debug"):
if int(self.hrd.getStr("jp.debug"))==1:
return True
return False
def setDebugMode(self,dependencies=False):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.setDebugMode(dependencies=False)
self.state.setDebugMode()
recipe=self.getCodeMgmtRecipe()
recipe.addToProtectedDirs()
self.load(findDefaultInstance=False)
self.log("set debug mode",category="init")
def setDebugModeInJpackage(self,dependencies=False):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.setDebugModeInJpackage(dependencies=False)
self.hrd.set("jp.debug",1)
self.load(findDefaultInstance=False)
self.log("set debug mode in jpackage",category="init")
def removeDebugModeInJpackage(self,dependencies=False):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.removeDebugModeInJpackage(dependencies=False)
if self.hrd.exists("jp.debug"):
self.hrd.set("jp.debug",0)
self.load(findDefaultInstance=False)
self.log("remove debug mode in jpackage",category="init")
def removeDebugMode(self,dependencies=False):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.removeDebugMode(dependencies=False)
recipe=self.getCodeMgmtRecipe()
recipe.removeFromProtectedDirs()
self.state.setDebugMode(mode=0)
self.log("remove debug mode",category="init")
###############################################################
############ MAIN OBJECT METHODS (DELETE, ...) ##############
###############################################################
@JPLock
def delete(self):
"""
Delete all metadata, files of the jpackages
"""
# self._init()
self.loadActions()
if j.application.shellconfig.interactive:
do = j.gui.dialog.askYesNo("Are you sure you want to remove %s_%s_%s, all metadata & files will be removed" % (self.domain, self.name, self.version))
else:
do = True
if do:
path = j.packages.getDataPath(self.domain, self.name, self.version)
j.system.fs.removeDirTree(path)
path = j.packages.getMetadataPath(self.domain, self.name,self.version)
j.system.fs.removeDirTree(path)
path = self.getPathActions(self.domain, self.name,self.instance)
j.system.fs.removeDirTree(path)
#@todo over ftp try to delete the targz file (less urgent), check with other quality levels to make sure we don't delete files we should not delete
@JPLock
def save(self, new=False):
"""
Creates a new config file and saves the most important jpackages params in it
@param new: True if we are saving a new Q-Package, used to ensure backwards compatibility
@type new: boolean
"""
self.log('saving jpackages data to ' + self.getPathMetadata(),category="save")
if self.buildNr == "":
self._raiseError("buildNr cannot be empty")
self.hrd.set("jp.buildNr",self.buildNr)
self.hrd.set("jp.export",self.export)
self.hrd.set("jp.autobuild",self.autobuild)
self.hrd.set("jp.taskletschecksum",self.taskletsChecksum)
self.hrd.set("jp.hrdchecksum",self.hrdChecksum)
self.hrd.set("jp.descrchecksum",self.descrChecksum)
self.hrd.set("jp.supportedplatforms",self.supportedPlatforms)
# for idx, dependency in enumerate(self.dependencies):
# self._addDependencyToHRD(idx, dependency.domain, dependency.name,minversion=dependency.minversion,maxversion=dependency.maxversion)
@JPLock
def _addDependencyToHRD(self, idx, domain, name, minversion, maxversion):
hrd = self.hrd
basekey = 'jp.dependency.%s.%%s' % idx
def setValue(name, value):
hrd.set(basekey % name, value)
setValue('domain', domain)
setValue('name', name)
setValue('minversion', minversion)
setValue('maxversion', maxversion)
##################################################################################################
################################### DEPENDENCY HANDLING #######################################
##################################################################################################
def loadDependencies(self, errorIfNotFound=True):
if self.dependencies==[]:
ids = set()
for key in self.hrd.prefix('jp.dependency'):
try:
ids.add(int(key.split('.')[2]))
except Exception:
self._raiseError("Error in jpackage hrd:%s"%self)
ids = list(ids)
ids.sort(reverse=True)
#walk over found id's
for id in ids:
key="jp.dependency.%s.%%s"%id
if not self.hrd.exists(key % 'minversion'):
self.hrd.set(key % 'minversion',"")
if not self.hrd.exists(key % 'maxversion'):
self.hrd.set(key % 'maxversion',"")
name=self.hrd.getStr(key % 'name')
domain=self.hrd.getStr(key % 'domain')
minversion=self.hrd.getStr(key % 'minversion')
maxversion=self.hrd.getStr(key % 'maxversion')
deppack=j.packages.findNewest(domain,name,\
minversion=minversion,maxversion=maxversion,returnNoneIfNotFound=not(errorIfNotFound)) #,platform=j.system.platformtype.myplatformdeppack.loadDependencies()
if errorIfNotFound == False and deppack == None:
continue
deppackKey="%s__%s"%(deppack.domain,deppack.name)
self.dependenciesNames[deppackKey]=deppack
#now deps of deps
deppack.loadDependencies()
self.dependencies.append(deppack)
for deppack2 in reversed(deppack.dependencies):
if deppack2 in self.dependencies:
self.dependencies.remove(deppack2)
self.dependencies.append(deppack2)
deppackKey2="%s__%s"%(deppack2.domain,deppack2.name)
self.dependenciesNames[deppackKey2]=deppack2
self.dependencies.reverse()
def addDependency(self, domain, name, supportedplatforms, minversion, maxversion, dependencytype):
dep = DependencyDef4()
dep.name = name
dep.domain = domain
dep.minversion = minversion
dep.maxversion = maxversion
# dep.supportedPlatforms = supportedplatforms
# dep.dependencytype = j.enumerators.DependencyType4.getByName(dependencytype)
# self.dependencyDefs.append(dep)
self.save()
self.dependencies=[]
self.loadDependencies()
#############################################################################
#################################### GETS #################################
#############################################################################
def getIsPreparedForUpdatingFiles(self):
"""
Return true if package has been prepared
"""
prepared = self.state.prepared
if prepared == 1:
return True
return False
def getKey(self):
return "%s|%s|%s"%(self.domain,self.name,self.version)
def getDependingInstalledPackages(self, recursive=False, errorIfNotFound=True):
"""
Return the packages that are dependent on this packages and installed on this machine
This is a heavy operation and might take some time
"""
##self.assertAccessable()
if errorIfNotFound and self.getDependingPackages(recursive=recursive, errorIfNotFound=errorIfNotFound) == None:
self._raiseError("No depending packages present")
[p for p in self.getDependingPackages(recursive=recursive, errorIfNotFound=errorIfNotFound) if p.isInstalled()]
def getDependingPackages(self, recursive=False, errorIfNotFound=True):
"""
Return the packages that are dependent on this package
This is a heavy operation and might take some time
"""
return [p for p in j.packages.getJPackageObjects() if self in p.getDependencies(errorIfNotFound)]
def _getState(self):
##self.assertAccessable()
"""
from dir get [qbase]/cfg/jpackages/state/$jpackagesdomain_$jpackagesname_$jpackagesversion.state
is a inifile with following variables
* lastinstalledbuildnr
* lastaction
* lasttag
* lastactiontime epoch of last time an action was done
* currentaction ("" if no action current)
* currenttag ("" if no action current)
* lastexpandedbuildNr (means expanded from tgz into jpackages dir)
@return a JpackageStateObject
"""
self.state=JPackageStateObject(self)
def getVersionAsInt(self):
"""
Translate string version representation to a number
"""
##self.assertAccessable()
#@todo
version = self.version
return float(version)
def getPathInstance(self):
"""
Return absolute pathname of the package's metadatapath
"""
return j.packages.getJPActiveInstancePath(self.domain, self.name, self.instance)
def getPathMetadata(self):
"""
Return absolute pathname of the package's metadatapath active instance
"""
return j.packages.getMetadataPath(self.domain, self.name, self.version)
def getPathFiles(self):
"""
Return absolute pathname of the jpackages's filespath
"""
##self.assertAccessable()
return j.packages.getDataPath(self.domain, self.name, self.version)
def getPathFilesPlatform(self, platform=None):
"""
Return absolute pathname of the jpackages's filespath
if not given then will be: j.system.platformtype
"""
##self.assertAccessable()
if platform==None:
platform=j.system.platformtype.myplatform
platform=self._getPackageInteractive(platform)
path = j.system.fs.joinPaths(self.getPathFiles(), str(platform))
return path
def getPathFilesPlatformForSubDir(self, subdir):
"""
Return absolute pathnames of the jpackages's filespath for platform or parent of platform if it does not exist in lowest level
if platform not given then will be: j.system.platformtype
the subdir will be used to check upon if found in one of the dirs, if never found will raise error
all matching results are returned
"""
result=[]
for possibleplatform in j.system.platformtype.getMyRelevantPlatforms():
# print platform
path = j.system.fs.joinPaths(self.getPathFiles(), possibleplatform,subdir)
#print path
if j.system.fs.exists(path):
result.append(path)
if len(result)==0:
self._raiseError("Could not find subdir %s in files dirs for '%s'"%(subdir,self))
return result
def getPathSourceCode(self):
"""
Return absolute path to where this package's source can be extracted to
"""
raise NotImplementedError()
#return j.system.fs.joinPaths(j.dirs.varDir, 'src', self.name, self.version)
def getHighestInstalledBuildNr(self):
"""
Return the latetst installed buildnumber
"""
##self.assertAccessable()
return self.state.lastinstalledbuildnr
def buildNrIncrement(self):
buildNr=0
for ql in self.getQualityLevels():
path=self.getMetadataPathQualityLevel(ql)
if path != None:
path= j.system.fs.joinPaths(path,"hrd","main.hrd")
buildNr2=j.core.hrd.getHRD(path).getInt("jp.buildNr")
if buildNr2>buildNr:
buildNr=buildNr2
buildNr+=1
self.buildNr=buildNr
self.save()
return self.buildNr
def getMetadataPathQualityLevel(self,ql):
path=j.system.fs.joinPaths(j.dirs.packageDirMD, self.domain)
if not j.system.fs.isLink(path):
self._raiseError("%s needs to be link"%path)
jpackagesdir=j.system.fs.getParent(j.system.fs.readlink(path))
path= j.system.fs.joinPaths(jpackagesdir,ql,self.name,self.version)
if not j.system.fs.exists(path=path):
return None
return path
def getQualityLevels(self):
path=j.system.fs.joinPaths(j.dirs.packageDirMD, self.domain)
if not j.system.fs.isLink(path):
self._raiseError("%s needs to be link"%path)
jpackageconfig = j.config.getConfig('sources', 'jpackages')
ql = jpackageconfig[self.domain].get('qualitylevel', [])
return [ql] if not isinstance(ql, list) else ql
def getBrokenDependencies(self, platform=None):
"""
Return a list of dependencies that cannot be resolved
"""
platform=self._getPackageInteractive(platform)
broken = []
for dep in self.dependencies: # go over my dependencies
# Do this without try catch
# pass boolean to findnewest that it should return None instead of fail
try:
j.packages.findNewest(domain=dep.domain, name=dep.name, minversion=dep.minversion, maxversion=dep.maxversion, platform=platform)
except Exception, e:
print str(e)
broken.append(dep)
return broken
def getDependencies(self, errorIfNotFound=True):
"""
Return the dependencies for the JPackage
"""
self.loadDependencies(errorIfNotFound)
return self.dependencies
def getInstanceNames(self):
root=j.system.fs.joinPaths(j.dirs.packageDir, "instance", self.domain,self.name)
if j.system.fs.exists(root):
return j.system.fs.listDirsInDir(root,False,True)
return list()
def _getPackageInteractive(self,platform):
if platform == None and len(self.supportedPlatforms) == 1:
platform = self.supportedPlatforms[0]
if platform==None and j.application.shellconfig.interactive:
platform = j.gui.dialog.askChoice("Select platform.",self.supportedPlatforms ,str(None))
if platform==None:
platform=None
return platform
def _copyBlobInfo(self):
j.system.fs.copyDirTree(self._blobfolder, self._activeblobfolder)
def getBlobInfo(self,platform,ttype,active=False):
"""
@return blobkey,[[md5,path],...]
"""
blobfolder = self._blobfolder if not active else self._activeblobfolder
path=j.system.fs.joinPaths(blobfolder,"%s___%s.info"%(platform,ttype))
if j.system.fs.exists(path):
content=j.system.fs.fileGetContents(path)
splitted=content.split("\n")
key=splitted[0].strip()
result=[]
splitted=splitted[1:]
for item in splitted:
item=item.strip()
if item=="":
continue
result.append([item.strip() for item in item.split("|")])
return key,result
else:
return None,[]
def getBlobItemPaths(self,platform,ttype,blobitempath):
"""
translates the item as shown in the blobinfo to the corresponding paths (jpackageFilesPath,destpathOnSystem)
"""
platform=platform.lower().strip()
ttype=ttype.lower().strip()
ptype = ttype
if ptype.find("cr_")==0:
ptype=ttype[3:]
filespath=j.system.fs.joinPaths(self.getPathFiles(),platform,ttype,blobitempath)
systemdest = j.packages.getTypePath(ptype, blobitempath,jp=self)
return (filespath,systemdest)
@JPLock
def getBlobPlatformTypes(self, active=False):
"""
@return [[platform,ttype],...]
"""
#@TODO this is plain wrong !!!!
result=[]
path = self._blobfolder if not active else self._activeblobfolder
if not j.system.fs.exists(path=path):
if not active:
self.init()
else:
return result
infofiles=[j.system.fs.getBaseName(item) for item in j.system.fs.listFilesInDir(path,False) if item.find("___")<>-1]
for item in infofiles:
platform,ttype=item.split("___")
ttype=ttype.replace(".info","")
if ttype<>"":
result.append([platform,ttype])
return result
def getCodeLocationsFromRecipe(self):
items=[]
for item in self.getCodeMgmtRecipe().items:
item.systemdest
path=item.getSource()
if j.system.fs.isFile(path):
path=j.system.fs.getDirName(path)
items.append(path)
items.sort()
result=[]
previtem="willnotfindthis"
for x in range(len(items)):
item=items[x]
# print "previtem:%s now:%s"%(previtem,item)
if not item.find(previtem)==0:
previtem=item
if item not in result:
# print "append"
result.append(item)
return result
def _getPlatformDirsToCopy(self):
"""
Return a list of platform related directories to be copied in sandbox
"""
platformDirs = list()
platform = j.system.platformtype
_jpackagesDir = self.getPathFiles()
platformSpecificDir = j.system.fs.joinPaths(_jpackagesDir, str(platform), '')
if j.system.fs.isDir(platformSpecificDir):
platformDirs.append(platformSpecificDir)
genericDir = j.system.fs.joinPaths(_jpackagesDir, 'generic', '')
if j.system.fs.isDir(genericDir):
platformDirs.append(genericDir)
if platform.isUnix():
unixDir = j.system.fs.joinPaths(_jpackagesDir, 'unix', '')
if j.system.fs.isDir(unixDir):
platformDirs.append(unixDir)
if platform.isSolaris():
sourceDir = j.system.fs.joinPaths(_jpackagesDir, 'solaris', '')
elif platform.isLinux():
sourceDir = j.system.fs.joinPaths(_jpackagesDir, 'linux', '')
elif platform.isDarwin():
sourceDir = j.system.fs.joinPaths(_jpackagesDir, 'darwin', '')
elif platform.isWindows():
sourceDir = j.system.fs.joinPaths(_jpackagesDir, 'win', '')
if j.system.fs.isDir(sourceDir):
if not str(sourceDir) in platformDirs:
platformDirs.append(sourceDir)
return platformDirs
#############################################################################
################################ CHECKS ###################################
#############################################################################
def hasModifiedFiles(self):
"""
Check if files are modified in the JPackage files
"""
##self.assertAccessable()
if self.state.prepared == 1:
return True
return False
def hasModifiedMetaData(self):
"""
Check if files are modified in the JPackage metadata
"""
##self.assertAccessable()
return self in j.packages.getDomainObject(self.domain).getJPackageTuplesWithModifiedMetadata()
def isInstalled(self, instance=None,checkAndDie=False,hrdcheck=True):
"""
Check if the JPackage is installed
"""
installed = self.state.lastinstalledbuildnr != -1
if hrdcheck:
if instance<>None:
hrdinstancepath = j.packages.getJPActiveInstancePath(self.domain, self.name, instance)
elif self.instance is None:
instances = self.getInstanceNames()
if instances:
hrdinstancepath = j.packages.getJPActiveInstancePath(self.domain, self.name, instances[0])
else:
hrdinstancepath = None
else:
hrdinstancepath = self.getPathInstance()
if hrdinstancepath is not None and not j.system.fs.exists(path=hrdinstancepath):
installed=False
if checkAndDie and installed==False:
j.events.opserror_critical("Jpackage %s is not installed, cannot continue."%self)
return installed
def supportsPlatform(self,platform=None):
"""
Check if a JPackage can be installed on a platform
"""
if platform==None:
relevant=j.system.platformtype.getMyRelevantPlatforms()
else:
relevant=j.system.platformtype.getParents(platform)
for supportedPlatform in self.supportedPlatforms:
if supportedPlatform in relevant:
return True
return False
def _isHostPlatformSupported(self, platform):
'''
Checks if a given platform is supported, the checks takes the
supported platform their parents in account.
@param platform: platform to check
@type platform: j.system.platformtype
@return: flag that indicates if the given platform is supported
@rtype: Boolean
'''
#@todo P1 no longer working use new j.system.platformtype
supportedPlatformPool = list()
for platform in self.supportedPlatforms:
while platform != None:
supportedPlatformPool.append(platform)
platform = platform.parent
if platform in supportedPlatformPool:
return True
else:
return False
#############################################################################
################################# ACTIONS ################################
#############################################################################
@JPLock
def start(self,dependencies=False):
"""
Start the JPackage, run the start tasklet(s)
"""
# self.isInstalled(checkAndDie=True)
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.start(False)
self.load()
self.actions.process_start()
self.log('start')
@JPLock
def stop(self,dependencies=False,walkinstances=False):
"""
Stop the JPackage, run the stop tasklet(s)
"""
if self.name=="redis" and self.instance=="system":
#this is required during bootstrap
return
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.stop(False)
if walkinstances:
for iname in self.getInstanceNames():
self.load(iname)
self.stop(walkinstances=False)
else:
if self.isInstalled():
self.load()
self.actions.process_stop()
self.log('stop')
@JPLock
def kill(self,dependencies=False):
"""
Stop the JPackage, run the stop tasklet(s)
"""
self.isInstalled(checkAndDie=True)
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.kill(False)
self.load()
self.actions.process_kill()
self.log('stop')
@JPLock
def monitor(self,dependencies=False,result=True):
"""
Stop the JPackage, run the stop tasklet(s)
"""
if dependencies:
deps = self.getDependencies()
for dep in deps:
result=result & dep.monitor(False,result)
self.load()
print "monitor for: %s"%self
result=result&self.actions.monitor_up_local()
return result
@JPLock
def monitor_net(self,ipaddr="localhost",dependencies=False,result=True):
"""
Stop the JPackage, run the stop tasklet(s)
"""
if dependencies:
deps = self.getDependencies()
for dep in deps:
result=result & dep.monitor(False,result)
self.load()
result=result&self.actions.monitor_up_net(ipaddr=ipaddr)
return result
@JPLock
def restart(self,dependencies=False):
"""
Restart the JPackage
"""
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.stop(False)
self.load()
self.actions.process_stop()
self.actions.process_start()
self.log('stop')
# if dependencies:
# deps = self.getDependencies()
# for dep in deps:
# dep.restart(False)
# self.loadActions()
# self.stop()
# self.start()
def isrunning(self,dependencies=False,ipaddr="localhost"):
"""
Check if application installed is running for jpackages
"""
self.monitor(dependencies=dependencies)
# self.monitor_up_net(dependencies=dependencies,ipaddr=ipaddr)
self.log('isrunning')
def reinstall(self, dependencies=False, download=True):
"""
Reinstall the JPackage by running its install tasklet, best not to use dependancies reinstall
"""
self.install(dependencies=dependencies, download=download, reinstall=True)
@JPLock
def prepare(self, dependencies=True, download=True):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.install(False, download, reinstall=False)
self.load()
self.actions.install_prepare()
@JPLock
def copyfiles(self, dependencies=True, download=True):
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.copyfiles(dependencies=False, download=download)
self.load()
if self.debug ==True:
self._copyfiles(doCodeRecipe=False)
self.codeLink(dependencies=False, update=False, force=True)
else:
self.actions.install_copy()
def installDebs(self):
for platform in j.system.fs.listDirsInDir(self.getPathFiles(),dirNameOnly=True):
if platform not in j.system.platformtype.getMyRelevantPlatforms():
continue
pathplatform=j.system.fs.joinPaths(self.getPathFiles(),platform)
entries=j.system.fs.listDirsInDir(pathplatform,dirNameOnly=True)
for ttype in entries:
if ttype == 'debs':
fullpath = j.system.fs.joinPaths(pathplatform, ttype)
for file_ in sorted(j.system.fs.listFilesInDir(fullpath)):
j.system.platform.ubuntu.installDebFile(file_)
def _copyfiles(self,doCodeRecipe=True):
self._cleanupfiles(doCodeRecipe)
for platform in j.system.fs.listDirsInDir(self.getPathFiles(),dirNameOnly=True):
if platform not in j.system.platformtype.getMyRelevantPlatforms():
continue
#first do the debs otherwise the other dirs cannot overwrite what debs do
self.installDebs()
pathplatform=j.system.fs.joinPaths(self.getPathFiles(),platform)
for ttype in j.system.fs.listDirsInDir(pathplatform,dirNameOnly=True):
# print "type:%s,%s"%(ttype,ttype.find("cr_"))
if doCodeRecipe==False and ttype.find("cr_")==0:
print "DO NOT COPY, because debug "
continue #skip the coderecipe folders
else:
pathttype=j.system.fs.joinPaths(pathplatform,ttype)
j.system.fs.removeIrrelevantFiles(pathttype)
if ttype in ["etc","cfg"]:
applyhrd=True
else:
applyhrd=False
if ttype == 'debs':
continue
tmp,destination=self.getBlobItemPaths(platform,ttype,"")
self.log("copy files from:%s to:%s"%(pathttype,destination))
self.__copyFiles(pathttype,destination,applyhrd=applyhrd)
self._copyBlobInfo()
def _cleanupfiles(self, doCodeRecipe):
for platform, ttype in self.getBlobPlatformTypes(True):
if not doCodeRecipe and ttype.startswith('cr_'):
continue
else:
blobkey, keys = self.getBlobInfo(platform, ttype, active=True)
for md5, relativefile in keys:
# print "1:'%s' '%s'"%(md5,relativefile)
blobpath, localpath = self.getBlobItemPaths(platform, ttype, relativefile)
# print "2:'%s' '%s'"%(blobpath,localpath)
if localpath<>"/tmp":
j.system.fs.remove(localpath)
def __copyFiles(self, path,destination,applyhrd=False):
"""
Copy the files from package dirs to their proper location in the sandbox.
@param destination: destination of the files
"""
if destination=="":
self._raiseError("A destination needs to be specified.") #done for safety, jpackage action scripts have to be adjusted
print "pathplatform:%s"%path
# self.log("Copy files from %s to %s"%(path,destination),category="copy")
j.system.fs.createDir(destination,skipProtectedDirs=True)
if applyhrd:
tmpdir=j.system.fs.getTmpDirPath()
j.system.fs.copyDirTree(path,tmpdir)
j.application.config.applyOnDir(tmpdir)
j.dirs.replaceFilesDirVars(tmpdir)
j.system.fs.copyDirTree(tmpdir, destination,skipProtectedDirs=True)
j.system.fs.removeDirTree(tmpdir)
else:
j.system.fs.copyDirTree(path, destination,keepsymlinks=True,skipProtectedDirs=True)
@JPLock
def install(self, dependencies=True, download=True, reinstall=False,reinstalldeps=False,update=False,instance=None,hrddata={}):
"""
Install the JPackage
@param dependencies: if True, all dependencies will be installed too
@param download: if True, bundles of package will be downloaded too
@param reinstall: if True, package will be reinstalled
when dependencies the reinstall will not be asked for there
"""
if not self.supportsPlatform():
self._raiseError("Only those platforms are supported by this package %s your system supports the following platforms: %s" % (str(self.supportedPlatforms), str(j.system.platformtype.getMyRelevantPlatforms())))
key="%s_%s"%(self.domain,self.name)
if key in j.packages.inInstall:
print "are already in install of jpackage"
return
if dependencies:
deps = self.getDependencies()
for dep in deps:
print "**%s asks for dependency:%s"%(self,dep)
dep.install(False, download, reinstall=reinstalldeps,hrddata=hrddata)
# If I am already installed assume my dependencies are also installed
if self.buildNr != -1 and self.buildNr <= self.state.lastinstalledbuildnr and not reinstall and self.isInstalled(instance):
self.log('already installed')
# if str(instance) in self.getInstanceNames():
# self.configure(dependencies=dependencies,instance=instance,hrddata=hrddata)
return # Nothing to do
j.packages.inInstall.append(key)
if instance is None:
instance = 0
self.instance=instance
self._copyMetadataToActive(hrddata=hrddata)
self.load(force=True,instance=instance) #reload actions to make sure new hrdactive are applied
self.stop()
if download:
self.download(dependencies=False)
if reinstall or self.buildNr > self.state.lastinstalledbuildnr:
#print 'really installing ' + str(self)
self.log('installing')
if self.state.checkNoCurrentAction == False:
self._raiseError("jpackages is in inconsistent state, ...")
self.prepare(dependencies=False)
self.copyfiles(dependencies=False)
self.actions.install_post()
if self.buildNr==-1:
self.buildNr=0
if self.debug:
self.log('install for debug (link)')
self.codeLink(dependencies=False, update=False, force=True)
if not update:
# if self.buildNr==-1 or self.configchanged or reinstall or self.buildNr > self.state.lastinstalledbuildnr:
self.configure(dependencies=False)
if self.buildNr<0:
self.buildNr=0
self.state.setLastInstalledBuildNr(self.buildNr)
j.packages.inInstall.remove(key)
def isNew(self):
if self.buildNr==-1 or self.buildNr > self.state.lastinstalledbuildnr:
return True
return False
@JPLock
def uninstall(self, unInstallDependingFirst=False):
"""
Remove the JPackage from the sandbox. In case dependent JPackages are installed, the JPackage is not removed.
@param unInstallDependingFirst: remove first dependent JPackages
"""
# Make sure there are no longer installed packages that depend on me
##self.assertAccessable()
self.loadActions()
if unInstallDependingFirst:
for p in self.getDependingInstalledPackages(errorIfNotFound=False):
p.uninstall(True)
if self.getDependingInstalledPackages(recursive=True,errorIfNotFound=False):
self._raiseError('Other package on the system dependend on this one, uninstall them first!')
j.tools.startupmanager.remove4JPackage(self)
tag = "install"
action = "uninstall"
state = self.state
if state.checkNoCurrentAction == False:
self._raiseError("jpackages is in inconsistent state, ...")
self.log('uninstalling' + str(self))
self.actions.uninstall()
state.setLastInstalledBuildNr(-1)
def prepareForUpdatingFiles(self, suppressErrors=False):
"""
After this command the operator can change the files of the jpackages.
Files do not aways come from code repo, they can also come from jpackages repo only
"""
j.system.fs.createDir(self.getPathFiles())
if self.state.prepared <> 1:
if not self.isNew():
self.download(suppressErrors=suppressErrors)
self._expand(suppressErrors=suppressErrors)
self.state.setPrepared(1)
@JPLock
def configure(self, dependencies=False,instance=None,hrddata={}):
"""
Configure the JPackage after installation, via the configure tasklet(s)
"""
self.log('configure')
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.configure(dependencies=False)
if instance==None and self.instance is None:
for instanceName in self.getInstanceNames():
self.instance=instanceName
self.configure(dependencies=False,instance=instanceName)
return
self._copyMetadataToActive(hrddata=hrddata)
self.load(force=True)
self.actions.install_configure()
self.actions.process_configure()
# self.state.setIsPendingReconfiguration(False)
j.application.loadConfig() #makes sure hrd gets reloaded to application.config object
@JPLock
def codeExport(self, dependencies=False, update=None):
"""
Export code to right locations in sandbox or on system
code recipe is being used
only the sections in the recipe which are relevant to you will be used
"""
self.load()
self.log('CodeExport')
if dependencies == None:
j.gui.dialog.askYesNo(" Do you want to link the dependencies?", False)
if update == None:
j.gui.dialog.askYesNo(" Do you want to update your code before exporting?", True)
if update:
self.codeUpdate(dependencies)
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.codeExport(dependencies=False,update=update)
@JPLock
def codeUpdate(self, dependencies=False, force=False):
"""
Update code from code repo (get newest code)
"""
self.log('CodeUpdate')
self.load(findDefaultInstance=False)
# j.clients.mercurial.statusClearAll()
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.codeUpdate(dependencies=False,force=force)
self.actions.code_update()
@JPLock
def codeCommit(self, dependencies=False, push=False):
"""
update code from code repo (get newest code)
"""
self.load(findDefaultInstance=False)
self.log('CodeCommit')
j.clients.mercurial.statusClearAll()
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.codeCommit(dependencies=False,push=push)
self.actions.code_commit()
if push:
self.codePush(dependencies)
# @JPLock
# def codePush(self, dependencies=False, merge=True):
# """
# Push code to repo (be careful this can brake code of other people)
# """
# self.load()
# j.log("CodePush")
# j.clients.mercurial.statusClearAll()
# if dependencies:
# deps = self.getDependencies()
# for dep in deps:
# dep.codePush(merge=merge)
# self.actions.code_push(merge=merge)
@JPLock
def codeLink(self, dependencies=False, update=False, force=True):
"""
Link code from local repo to right locations in sandbox
@param force: if True, do an update which removes the changes (when using as install method should be True)
"""
self.load()
# j.clients.mercurial.statusClearAll()
self.log("CodeLink")
if dependencies is None:
if j.application.shellconfig.interactive:
dependencies = j.gui.dialog.askYesNo("Do you want to link the dependencies?", False)
else:
self._raiseError("Need to specify arg 'depencies' (true or false) when non interactive")
if update is None:
if j.application.shellconfig.interactive:
update = j.gui.dialog.askYesNo("Do you want to update your code before linking?", True)
else:
self._raiseError("Need to specify arg 'update' (true or false) when non interactive")
if update:
self.codeUpdate(dependencies, force=force)
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.codeLink(dependencies=False, update=update,force=force)
hrdpath=j.system.fs.joinPaths(self.getPathMetadata(),"hrd","code.hrd")
codehrd=j.core.hrd.getHRD(hrdpath)
account=codehrd.getStr("jp.code.account")
repo=codehrd.getStr("jp.code.repo")
if account=="" or repo=="":
return
self.actions.code_link(force=force)
@JPLock
def package(self, dependencies=False,update=False):
"""
copy files from code recipe's and also manually copied files in the files sections
@param dependencies: whether or not to package the dependencies
@type dependencies: boolean
"""
self.load(instance=None,findDefaultInstance=False)
self.log('Package')
# Disable action caching:
# If a user packages for 2 different platforms in the same jshell
# instance, the second call is just ignored, which is not desired
# behaviour.
# Also, when a user packages once, then sees a need to update his/her
# code, and then packages again in the same jshell, again the second
# time would be a non-op, which is again not desired. So we disable the
# action caching for this action.
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.package()
if update:
self.actions.code_update()
self.actions.code_package()
newbuildNr = False
newblobinfo = self._calculateBlobInfo()
actionsdir=j.system.fs.joinPaths(self.getPathMetadata(), "actions")
j.system.fs.removeIrrelevantFiles(actionsdir)
taskletsChecksum, descr2 = j.tools.hash.hashDir(actionsdir)
hrddir=j.system.fs.joinPaths(self.getPathMetadata(), "hrdactive")
hrdChecksum, descr2 = j.tools.hash.hashDir(hrddir)
descrdir=j.system.fs.joinPaths(self.getPathMetadata(), "documentation")
descrChecksum, descr2 = j.tools.hash.hashDir(descrdir)
if descrChecksum <> self.descrChecksum:
self.log("Descr change.",level=5,category="buildNr")
#buildNr needs to go up
newbuildNr = True
self.descrChecksum = descrChecksum
else:
self.log("Descr did not change.",level=7,category="buildNr")
if taskletsChecksum <> self.taskletsChecksum:
self.log("Actions change.",level=5,category="buildNr")
#buildNr needs to go up
newbuildNr = True
self.taskletsChecksum = taskletsChecksum
else:
self.log("Actions did not change.",level=7,category="buildNr")
if hrdChecksum <> self.hrdChecksum:
self.log("Active HRD change.",level=5,category="buildNr")
#buildNr needs to go up
newbuildNr = True
self.hrdChecksum = hrdChecksum
else:
self.log("Active HRD did not change.",level=7,category="buildNr")
if newbuildNr or newblobinfo:
if newbuildNr:
self.buildNrIncrement()
self.log("new buildNr is:%s"%self.buildNr)
self.save()
self.load()
def _calculateBlobInfo(self):
result = False
filesdir = j.system.fs.joinPaths(self.getPathMetadata(),"files")
pathfiles = self.getPathFiles()
if not j.system.fs.exists(pathfiles):
return result
for platform in j.system.fs.listDirsInDir(pathfiles,dirNameOnly=True):
pathplatform=j.system.fs.joinPaths(self.getPathFiles(),platform)
for ttype in j.system.fs.listDirsInDir(pathplatform,dirNameOnly=True):
pathttype=j.system.fs.joinPaths(pathplatform,ttype)
j.system.fs.removeIrrelevantFiles(pathttype,followSymlinks=False)
md5,llist=j.tools.hash.hashDir(pathttype)
if llist=="":
continue
out="%s\n"%md5
out+=llist
oldkey,olditems=self.getBlobInfo(platform,ttype)
if oldkey<>md5:
if not result:
self.buildNrIncrement()
result = True
dest=j.system.fs.joinPaths(self.getPathMetadata(),"files","%s___%s.info"%(platform,ttype))
j.system.fs.createDir(j.system.fs.getDirName(dest))
j.system.fs.writeFile(dest,out)
dest=j.system.fs.joinPaths(self.getPathMetadata(),"uploadhistory","%s___%s.info"%(platform,ttype))
out="%s | %s | %s | %s\n"%(j.base.time.getLocalTimeHR(),j.base.time.getTimeEpoch(),self.buildNr,md5)
j.system.fs.writeFile(dest, out, append=True)
self.log("Uploaded changed for platform:%s type:%s"%(platform,ttype),level=5,category="upload" )
else:
self.log("No file change for platform:%s type:%s"%(platform,ttype),level=5,category="upload" )
return result
@JPLock
def compile(self,dependencies=False):
self.load()
params = j.core.params.get()
params.jpackages = self
self.log('Compile')
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.compile()
self.actions.compile()
@JPLock
def download(self, dependencies=False, destination=None, suppressErrors=False, allplatforms=False,force=False,expand=True,nocode=False,instance=None):
"""
Download the jpackages & expand
"""
self.load(instance=instance,findDefaultInstance=False)
if self.debug:
nocode=True
if dependencies==None and j.application.shellconfig.interactive:
dependencies = j.console.askYesNo("Do you want all depending packages to be downloaded too?")
else:
dependencies=dependencies
if instance<>None:
self.instance=instance
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.download(dependencies=False, destination=destination,allplatforms=allplatforms,expand=expand,nocode=nocode)
self.actions.install_download(expand=expand,nocode=nocode)
def _download(self,destination=None,force=False,expand=True,nocode=False):
j.packages.getDomainObject(self.domain)
self.log('Downloading.')
for platform,ttype in self.getBlobPlatformTypes():
if ttype[0:3]=="cr_":
if nocode:
print "no need to download (option nocode):%s %s"%(self,ttype)
continue
if destination==None:
downloaddestination=j.system.fs.joinPaths(self.getPathFiles(),platform,ttype)
else:
downloaddestination = destination
checksum,files=self.getBlobInfo(platform,ttype)
self.log("key found:%s for platform:%s type:%s"%(checksum,platform,ttype),category="download",level=6)
key="%s_%s"%(platform,ttype)
if not self.blobstorLocal.exists(checksum):
print "try to find remote"
self.blobstorRemote.copyToOtherBlobStor(checksum, self.blobstorLocal)
force=True
if force==False and self.state.downloadedBlobStorKeys.has_key(key) and self.state.downloadedBlobStorKeys[key] == checksum:
self.log("No need to download/expand for platform_type:'%s', already there."%key,level=5)
continue
self.log("expand platform_type:%s"%key,category="download")
j.system.fs.removeDirTree(downloaddestination)
j.system.fs.createDir(downloaddestination)
self.blobstorLocal.download(checksum, downloaddestination)
self.state.downloadedBlobStorKeys[key] = checksum
self.state.save()
return True
@JPLock
def backup(self,url=None,dependencies=False):
"""
Make a backup for this package by running its backup tasklet.
"""
if url==None:
url = j.console.askString("Url to backup to?")
else:
self._raiseError("url needs to be specified")
self.load()
params = j.core.params.get()
params.jpackages = self
params.url=url
self.log('Backup')
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.backup(url=url)
self.actions.backup()
@JPLock
def restore(self,url=None,dependencies=False):
"""
Make a restore for this package by running its restore tasklet.
"""
if url==None:
url = j.console.askString("Url to restore to?")
else:
self._raiseError("url needs to be specified")
self.log('restore')
self.load()
params = j.core.params.get()
params.jpackages = self
params.url=url
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.restore(url=url)
self.actions.restore()
def upload(self, remote=True, local=True,dependencies=False,onlycode=False):
if dependencies==None and j.application.shellconfig.interactive:
dependencies = j.console.askYesNo("Do you want all depending packages to be downloaded too?")
else:
dependencies=dependencies
self.load(instance=None,findDefaultInstance=False)
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.upload(remote=remote, local=local,dependencies=False,onlycode=onlycode)
self.actions.upload(onlycode=onlycode)
def getBlobKeysActive(self):
keys=[]
for platform,ttype in self.getBlobPlatformTypes():
key0,blobitems=self.getBlobInfo(platform,ttype)
keys.append(key0)
return keys
def uploadExistingBlobs(self,blobserver,dependencies=False):
"""
@return the non found keys
"""
self.loadBlobStores()
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.uploadExistingBlobs(blobserver=blobserver)
keys=self.getBlobKeysActive()
bservernew=j.clients.blobstor.get(blobserver)
notfound=[]
for key in keys:
print self,
if not bservernew.exists(key):
#does not exist on remote bserver yet
print "blob %s not on dest."%(key),
if self.blobstorLocal.exists(key):
print "upload from local."
self.blobstorLocal.copyToOtherBlobStor(key, bservernew)
elif self.blobstorRemote.exists(key):
print "upload from remote."
self.blobstorRemote.copyToOtherBlobStor(key, bservernew)
else:
print "blob %s not on sources."%key
notfound.append(key)
return notfound
def uploadExistingBlobsFromHistory(self,blobserver="jpackages_remote"):
"""
@return the non found keys
"""
self.loadBlobStores()
bservernew=j.clients.blobstor.get(blobserver)
hist=self.getBlobHistory()
for ttype in hist.keys():
epochs=hist[ttype].keys()#[int(item) for item in hist[ttype].keys()]
epochs.sort()
epochs.reverse()
for epoch in epochs:
key=hist[ttype][epoch][1]
print "%s %s %s %s"%(self.domain,self.name,ttype,hist[ttype][epoch][0])
if bservernew.exists(key):
print "found"
break
else:
#does not exist on remote bserver yet
print "blob %s not on dest."%(key),
if self.blobstorLocal.exists(key):
print "upload from local."
self.blobstorLocal.copyToOtherBlobStor(key, bservernew)
break
elif self.blobstorRemote.exists(key):
print "upload from remote."
self.blobstorRemote.copyToOtherBlobStor(key, bservernew)
break
def checkExistingBlobs(self,blobserver,dependencies=False):
"""
@return the non found keys
"""
self.loadBlobStores()
if dependencies:
deps = self.getDependencies()
for dep in deps:
dep.uploadExistingBlobs(blobserver=blobserver)
keys=self.getBlobKeysActive()
bservernew=j.clients.blobstor.get(blobserver)
notfound=[]
for key in keys:
print self,
if not bservernew.exists(key):
notfound.append(key)
return notfound
def getBlobHistory(self):
blobtypes=self.getBlobPlatformTypes()
result={}
for btype in blobtypes:
btype2="___".join(btype)
result[btype2]={}
path="%s/uploadhistory/%s.info"%(self.getPathMetadata(),btype2)
if not j.system.fs.exists(path):
print "ERROR: COULD NOT FIND %s"%path
else:
C=j.system.fs.fileGetContents(path)
for line in C.split("\n"):
if line.strip()<>"":
hrtime,ttime,nr,md5= line.split("|")
md5=md5.strip()
result[btype2][ttime]=(hrtime,md5)
return result
@JPLock
def _upload(self, remote=True, local=True,onlycode=False):
"""
Upload jpackages to Blobstor, default remote and local
"""
self.load(instance=None,findDefaultInstance=False)
self._calculateBlobInfo()
for platform,ttype in self.getBlobPlatformTypes():
key0,blobitems=self.getBlobInfo(platform,ttype)
pathttype=j.system.fs.joinPaths(self.getPathFiles(),platform,ttype)
if ttype[0:3]<>"cr_" and onlycode:
print "no need to upload (onlycode option):%s %s %s"%(self,platform,ttype)
continue
if not j.system.fs.exists(pathttype):
self._raiseError("Could not find files section:%s, check the files directory in your jpackages metadata dir, maybe there is a .info file which is wrong & does not exist here."%pathttype)
self.log("Upload platform:'%s', type:'%s' files:'%s'"%(platform,ttype,pathttype),category="upload")
if local and remote and self.blobstorRemote <> None and self.blobstorLocal <> None:
key, descr,uploadedAnything = self.blobstorLocal.put(pathttype)
self.blobstorLocal.copyToOtherBlobStor(key,self.blobstorRemote)
# key, descr,uploadedAnything = self.blobstorRemote.put(pathttype)
elif local and self.blobstorLocal <> None:
key, descr, uploadedAnything = self.blobstorLocal.put(pathttype, blobstors=[])
elif remote and self.blobstorRemote <> None:
key, descr, uploadedAnything = self.blobstorRemote.put(pathttype, blobstors=[])
else:
self._raiseError("need to upload to local or remote")
# if uploadedAnything:
# self.log("Uploaded blob for %s:%s:%s to blobstor."%(self,platform,ttype))
# else:
# self.log("Blob for %s:%s:%s was already on blobstor, no need to upload."%(self,platform,ttype))
if key0<>key:
self._raiseError("Corruption in upload for %s"%self)
@JPLock
def waitUp(self, timeout=60,dependencies=False):
self.load()
if dependencies:
deps = self.getDependencies()
else:
deps=[]
start=j.base.time.getTimeEpoch()
now=start
while now<start+timeout:
result=True
for dep in deps:
# result=result & dep.actions.monitor_up_net()
result &= dep.actions.monitor_up_local()
# result=result & self.actions.monitor_up_net()
result &= self.actions.monitor_up_local()
if result:
return True
time.sleep(0.5)
print "waitup:%s"%self
now=j.base.time.getTimeEpoch()
self._raiseErrorOps("Timeout on waitup for jp:%s"%self)
@JPLock
def waitDown(self, timeout=60,dependencies=False):
self.log("waitdown: not implemented")
return True
self.load()
if dependencies:
deps = self.getDependencies()
else:
deps=[]
start=j.base.time.getTimeEpoch()
now=start
while now<start+timeout:
result=True
for dep in deps:
result &= not(dep.actions.monitor_up_net())
result &= not(self.actions.monitor_up_net())
if result:
return True
time.sleep(0.5)
print "waitdown:%s"%self
now=j.base.time.getTimeEpoch()
self._raiseErrorOps("Timeout on waitdown for jp:%s"%self)
@JPLock
def processDepCheck(self, timeout=60,dependencies=False):
#check for dependencies for process to start
self.load()
if dependencies:
deps = self.getDependencies()
else:
deps=[]
start=j.base.time.getTimeEpoch()
now=start
while now<start+timeout:
result=True
for dep in deps:
r=dep.actions.process_depcheck()
if r == False:
result = False
r=self.actions.process_depcheck()
if r == False:
result = False
if result != False:
return True
time.sleep(0.5)
print "processdepcheck:%s"%self
now=j.base.time.getTimeEpoch()
self._raiseErrorOps("Timeout on check process dependencies for jp:%s"%self)
########################################################################
######################### RECONFIGURE ################################
########################################################################
def signalConfigurationNeeded(self):
"""
Set in the corresponding jpackages's state file if reconfiguration is needed
"""
self.state.setIsPendingReconfiguration(True)
j.packages._setHasPackagesPendingConfiguration(True)
def isPendingReconfiguration(self):
"""
Check if the JPackage needs reconfiguration
"""
if self.state.getIsPendingReconfiguration() == 1:
return True
return False
#########################################################################
####################### SHOW ############################################
def showDependencies(self):
"""
Return all dependencies of the JPackage.
See also: addDependency and removeDependency
"""
self._printList(self.getDependencies())
def showDependingInstalledPackages(self):
"""
Show which jpackages have this jpackages as dependency.
Do this only for the installed jpackages.
"""
self._printList(self.getDependingInstalledPackages())
def showDependingPackages(self):
"""
Show which jpackages have this jpackages as dependency.
"""
self._printList(self.getDependingPackages())
def _printList(self, arr):
for item in arr:
j.console.echo(item)
#########################################################################
####################### SUPPORTING FUNCTIONS ##########################
def _getDomainObject(self):
"""
Get the domain object for this Q-Package
@return: domain object for this Q-Package
@rtype: Domain.Domain
"""
return j.packages.getDomainObject(self.domain)
def _raiseError(self,message,category="jpackage"):
##self.assertAccessable()
message = "INPUTERROR: %s for jpackage %s_%s_%s (%s)" % (message, self.domain, self.name, self.version,self.instance)
j.events.inputerror_critical(message,category)
def _raiseErrorOps(self,message,category="jpackage"):
##self.assertAccessable()
message = "OPSERROR: %s for jpackage %s_%s_%s (%s)" % (message, self.domain, self.name, self.version,self.instance)
j.events.opserror_critical(message,category)
def _clear(self):
##self.assertAccessable()
"""
Clear all properties except domain, name, and version
"""
self.tags = []
self.supportedPlatforms=[]
self.buildNr = 0
self.dependencies = []
self.dependenciesNames = {}
def __cmp__(self,other):
if other == None or other=="":
return False
return self.name == other.name and str(self.domain) == str(other.domain) and j.packages._getVersionAsInt(self.version) == j.packages._getVersionAsInt(other.version)
def __repr__(self):
return self.__str__()
def _resetPreparedForUpdatingFiles(self):
self.state.setPrepared(0)
def __str__(self):
return "JPackage %s %s %s (%s) " % (self.domain, self.name, self.version,self.instance)
def __eq__(self, other):
return str(self) == str(other)
def reportNumbers(self):
return ' buildNr:' + str(self.buildNr)
| |
"""The HTTP api to control the cloud integration."""
import asyncio
from functools import wraps
import logging
import aiohttp
import async_timeout
import attr
from hass_nabucasa import Cloud, auth, thingtalk
from hass_nabucasa.const import STATE_DISCONNECTED
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.alexa import (
entities as alexa_entities,
errors as alexa_errors,
)
from homeassistant.components.google_assistant import helpers as google_helpers
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.websocket_api import const as ws_const
from homeassistant.core import callback
from .const import (
DOMAIN,
PREF_ALEXA_REPORT_STATE,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
REQUEST_TIMEOUT,
InvalidTrustedNetworks,
InvalidTrustedProxies,
RequireRelink,
)
_LOGGER = logging.getLogger(__name__)
WS_TYPE_STATUS = "cloud/status"
SCHEMA_WS_STATUS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_STATUS}
)
WS_TYPE_SUBSCRIPTION = "cloud/subscription"
SCHEMA_WS_SUBSCRIPTION = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_SUBSCRIPTION}
)
WS_TYPE_HOOK_CREATE = "cloud/cloudhook/create"
SCHEMA_WS_HOOK_CREATE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_CREATE, vol.Required("webhook_id"): str}
)
WS_TYPE_HOOK_DELETE = "cloud/cloudhook/delete"
SCHEMA_WS_HOOK_DELETE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_HOOK_DELETE, vol.Required("webhook_id"): str}
)
_CLOUD_ERRORS = {
InvalidTrustedNetworks: (
500,
"Remote UI not compatible with 127.0.0.1/::1 as a trusted network.",
),
InvalidTrustedProxies: (
500,
"Remote UI not compatible with 127.0.0.1/::1 as trusted proxies.",
),
}
async def async_setup(hass):
"""Initialize the HTTP API."""
async_register_command = hass.components.websocket_api.async_register_command
async_register_command(WS_TYPE_STATUS, websocket_cloud_status, SCHEMA_WS_STATUS)
async_register_command(
WS_TYPE_SUBSCRIPTION, websocket_subscription, SCHEMA_WS_SUBSCRIPTION
)
async_register_command(websocket_update_prefs)
async_register_command(
WS_TYPE_HOOK_CREATE, websocket_hook_create, SCHEMA_WS_HOOK_CREATE
)
async_register_command(
WS_TYPE_HOOK_DELETE, websocket_hook_delete, SCHEMA_WS_HOOK_DELETE
)
async_register_command(websocket_remote_connect)
async_register_command(websocket_remote_disconnect)
async_register_command(google_assistant_list)
async_register_command(google_assistant_update)
async_register_command(alexa_list)
async_register_command(alexa_update)
async_register_command(alexa_sync)
async_register_command(thingtalk_convert)
hass.http.register_view(GoogleActionsSyncView)
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudResendConfirmView)
hass.http.register_view(CloudForgotPasswordView)
_CLOUD_ERRORS.update(
{
auth.UserNotFound: (400, "User does not exist."),
auth.UserNotConfirmed: (400, "Email not confirmed."),
auth.UserExists: (400, "An account with the given email already exists."),
auth.Unauthenticated: (401, "Authentication failed."),
auth.PasswordChangeRequired: (400, "Password change required."),
asyncio.TimeoutError: (502, "Unable to reach the Home Assistant cloud."),
aiohttp.ClientError: (500, "Error making internal request"),
}
)
def _handle_cloud_errors(handler):
"""Webview decorator to handle auth errors."""
@wraps(handler)
async def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = await handler(view, request, *args, **kwargs)
return result
except Exception as err: # pylint: disable=broad-except
status, msg = _process_cloud_exception(err, request.path)
return view.json_message(
msg, status_code=status, message_code=err.__class__.__name__.lower()
)
return error_handler
def _ws_handle_cloud_errors(handler):
"""Websocket decorator to handle auth errors."""
@wraps(handler)
async def error_handler(hass, connection, msg):
"""Handle exceptions that raise from the wrapped handler."""
try:
return await handler(hass, connection, msg)
except Exception as err: # pylint: disable=broad-except
err_status, err_msg = _process_cloud_exception(err, msg["type"])
connection.send_error(msg["id"], err_status, err_msg)
return error_handler
def _process_cloud_exception(exc, where):
"""Process a cloud exception."""
err_info = _CLOUD_ERRORS.get(exc.__class__)
if err_info is None:
_LOGGER.exception("Unexpected error processing request for %s", where)
err_info = (502, f"Unexpected error: {exc}")
return err_info
class GoogleActionsSyncView(HomeAssistantView):
"""Trigger a Google Actions Smart Home Sync."""
url = "/api/cloud/google_actions/sync"
name = "api:cloud:google_actions/sync"
@_handle_cloud_errors
async def post(self, request):
"""Trigger a Google Actions sync."""
hass = request.app["hass"]
cloud: Cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
status = await gconf.async_sync_entities(gconf.cloud_user)
return self.json({}, status_code=status)
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = "/api/cloud/login"
name = "api:cloud:login"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema({vol.Required("email"): str, vol.Required("password"): str})
)
async def post(self, request, data):
"""Handle login request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
await cloud.login(data["email"], data["password"])
return self.json({"success": True})
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = "/api/cloud/logout"
name = "api:cloud:logout"
@_handle_cloud_errors
async def post(self, request):
"""Handle logout request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.logout()
return self.json_message("ok")
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = "/api/cloud/register"
name = "api:cloud:register"
@_handle_cloud_errors
@RequestDataValidator(
vol.Schema(
{
vol.Required("email"): str,
vol.Required("password"): vol.All(str, vol.Length(min=6)),
}
)
)
async def post(self, request, data):
"""Handle registration request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(
cloud.auth.register, data["email"], data["password"]
)
return self.json_message("ok")
class CloudResendConfirmView(HomeAssistantView):
"""Resend email confirmation code."""
url = "/api/cloud/resend_confirm"
name = "api:cloud:resend_confirm"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle resending confirm email code request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.resend_email_confirm, data["email"])
return self.json_message("ok")
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = "/api/cloud/forgot_password"
name = "api:cloud:forgot_password"
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({vol.Required("email"): str}))
async def post(self, request, data):
"""Handle forgot password request."""
hass = request.app["hass"]
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.forgot_password, data["email"])
return self.json_message("ok")
@callback
def websocket_cloud_status(hass, connection, msg):
"""Handle request for account info.
Async friendly.
"""
cloud = hass.data[DOMAIN]
connection.send_message(
websocket_api.result_message(msg["id"], _account_data(cloud))
)
def _require_cloud_login(handler):
"""Websocket decorator that requires cloud to be logged in."""
@wraps(handler)
def with_cloud_auth(hass, connection, msg):
"""Require to be logged into the cloud."""
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
connection.send_message(
websocket_api.error_message(
msg["id"], "not_logged_in", "You need to be logged in to the cloud."
)
)
return
handler(hass, connection, msg)
return with_cloud_auth
@_require_cloud_login
@websocket_api.async_response
async def websocket_subscription(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
response = await cloud.fetch_subscription_info()
if response.status != 200:
connection.send_message(
websocket_api.error_message(
msg["id"], "request_failed", "Failed to request subscription"
)
)
data = await response.json()
# Check if a user is subscribed but local info is outdated
# In that case, let's refresh and reconnect
if data.get("provider") and not cloud.is_connected:
_LOGGER.debug("Found disconnected account with valid subscriotion, connecting")
await hass.async_add_executor_job(cloud.auth.renew_access_token)
# Cancel reconnect in progress
if cloud.iot.state != STATE_DISCONNECTED:
await cloud.iot.disconnect()
hass.async_create_task(cloud.iot.connect())
connection.send_message(websocket_api.result_message(msg["id"], data))
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "cloud/update_prefs",
vol.Optional(PREF_ENABLE_GOOGLE): bool,
vol.Optional(PREF_ENABLE_ALEXA): bool,
vol.Optional(PREF_ALEXA_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_SECURE_DEVICES_PIN): vol.Any(None, str),
}
)
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("id")
changes.pop("type")
# If we turn alexa linking on, validate that we can fetch access token
if changes.get(PREF_ALEXA_REPORT_STATE):
try:
with async_timeout.timeout(10):
await cloud.client.alexa_config.async_get_access_token()
except asyncio.TimeoutError:
connection.send_error(
msg["id"], "alexa_timeout", "Timeout validating Alexa access token."
)
return
except (alexa_errors.NoTokenAvailable, RequireRelink):
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant "
"skill and then try to enable state reporting.",
)
return
await cloud.client.prefs.async_update(**changes)
connection.send_message(websocket_api.result_message(msg["id"]))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_create(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
hook = await cloud.cloudhooks.async_create(msg["webhook_id"], False)
connection.send_message(websocket_api.result_message(msg["id"], hook))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_delete(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
await cloud.cloudhooks.async_delete(msg["webhook_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
def _account_data(cloud):
"""Generate the auth data JSON response."""
if not cloud.is_logged_in:
return {"logged_in": False, "cloud": STATE_DISCONNECTED}
claims = cloud.claims
client = cloud.client
remote = cloud.remote
# Load remote certificate
if remote.certificate:
certificate = attr.asdict(remote.certificate)
else:
certificate = None
return {
"logged_in": True,
"email": claims["email"],
"cloud": cloud.iot.state,
"prefs": client.prefs.as_dict(),
"google_entities": client.google_user_config["filter"].config,
"alexa_entities": client.alexa_user_config["filter"].config,
"remote_domain": remote.instance_domain,
"remote_connected": remote.is_connected,
"remote_certificate": certificate,
}
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/connect"})
async def websocket_remote_connect(hass, connection, msg):
"""Handle request for connect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=True)
await cloud.remote.connect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/remote/disconnect"})
async def websocket_remote_disconnect(hass, connection, msg):
"""Handle request for disconnect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=False)
await cloud.remote.disconnect()
connection.send_result(msg["id"], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/google_assistant/entities"})
async def google_assistant_list(hass, connection, msg):
"""List all google assistant entities."""
cloud = hass.data[DOMAIN]
gconf = await cloud.client.get_google_config()
entities = google_helpers.async_get_entities(hass, gconf)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"traits": [trait.name for trait in entity.traits()],
"might_2fa": entity.might_2fa(),
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/google_assistant/entities/update",
"entity_id": str,
vol.Optional("should_expose"): bool,
vol.Optional("override_name"): str,
vol.Optional("aliases"): [str],
vol.Optional("disable_2fa"): bool,
}
)
async def google_assistant_update(hass, connection, msg):
"""Update google assistant config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_google_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.google_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({"type": "cloud/alexa/entities"})
async def alexa_list(hass, connection, msg):
"""List all alexa entities."""
cloud = hass.data[DOMAIN]
entities = alexa_entities.async_get_entities(hass, cloud.client.alexa_config)
result = []
for entity in entities:
result.append(
{
"entity_id": entity.entity_id,
"display_categories": entity.default_display_categories(),
"interfaces": [ifc.name() for ifc in entity.interfaces()],
}
)
connection.send_result(msg["id"], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command(
{
"type": "cloud/alexa/entities/update",
"entity_id": str,
vol.Optional("should_expose"): bool,
}
)
async def alexa_update(hass, connection, msg):
"""Update alexa entity config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop("type")
changes.pop("id")
await cloud.client.prefs.async_update_alexa_entity_config(**changes)
connection.send_result(
msg["id"], cloud.client.prefs.alexa_entity_configs.get(msg["entity_id"])
)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/alexa/sync"})
async def alexa_sync(hass, connection, msg):
"""Sync with Alexa."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
success = await cloud.client.alexa_config.async_sync_entities()
except alexa_errors.NoTokenAvailable:
connection.send_error(
msg["id"],
"alexa_relink",
"Please go to the Alexa app and re-link the Home Assistant " "skill.",
)
return
if success:
connection.send_result(msg["id"])
else:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, "Unknown error")
@websocket_api.async_response
@websocket_api.websocket_command({"type": "cloud/thingtalk/convert", "query": str})
async def thingtalk_convert(hass, connection, msg):
"""Convert a query."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
connection.send_result(
msg["id"], await thingtalk.async_convert(cloud, msg["query"])
)
except thingtalk.ThingTalkConversionError as err:
connection.send_error(msg["id"], ws_const.ERR_UNKNOWN_ERROR, str(err))
| |
from builtins import str
from builtins import object
from astropy.io import fits
import numpy as np
import astropy.units as u
import pkg_resources
import six
# From https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node20.html
# Codes for the data type of binary table columns and/or for the
# data type of variables when reading or writing keywords or data:
# DATATYPE TFORM CODE
# #define TBIT 1 /* 'X' */
# #define TBYTE 11 /* 8-bit unsigned byte, 'B' */
# #define TLOGICAL 14 /* logicals (int for keywords */
# /* and char for table cols 'L' */
# #define TSTRING 16 /* ASCII string, 'A' */
# #define TSHORT 21 /* signed short, 'I' */
# #define TLONG 41 /* signed long, */
# #define TLONGLONG 81 /* 64-bit long signed integer 'K' */
# #define TFLOAT 42 /* single precision float, 'E' */
# #define TDOUBLE 82 /* double precision float, 'D' */
# #define TCOMPLEX 83 /* complex (pair of floats) 'C' */
# #define TDBLCOMPLEX 163 /* double complex (2 doubles) 'M' */
# The following data type codes are also supported by CFITSIO:
# #define TINT 31 /* int */
# #define TSBYTE 12 /* 8-bit signed byte, 'S' */
# #define TUINT 30 /* unsigned int 'V' */
# #define TUSHORT 20 /* unsigned short 'U' */
# #define TULONG 40 /* unsigned long */
# The following data type code is only for use with fits\_get\_coltype
# #define TINT32BIT 41 /* signed 32-bit int, 'J' */
_NUMPY_TO_FITS_CODE = {
# Integers
np.int16: "I",
np.int32: "J",
np.int64: "K",
np.uint16: "I",
np.uint32: "J",
# Floating point
np.float32: "E",
np.float64: "D",
}
class FITSFile(object):
def __init__(self, primary_hdu=None, fits_extensions=None):
hdu_list = []
if primary_hdu is None:
primary_hdu = fits.PrimaryHDU()
else:
assert isinstance(primary_hdu, fits.PrimaryHDU)
hdu_list.append(primary_hdu)
if fits_extensions is not None:
fits_extensions = list(fits_extensions)
hdu_list.extend([x.hdu for x in fits_extensions])
# We embed instead of subclassing because the HDUList class has some weird interaction with the
# __init__ and __new__ methods which makes difficult to do so (we couldn't figure it out)
self._hdu_list = fits.HDUList(hdus=hdu_list)
def writeto(self, *args, **kwargs):
self._hdu_list.writeto(*args, **kwargs)
# Update the docstring to be the same as the method we are wrapping
writeto.__doc__ = fits.HDUList.writeto.__doc__
def __getitem__(self, item):
return self._hdu_list.__getitem__(item)
def info(self, output=None):
self._hdu_list.info(output)
info.__doc__ = fits.HDUList.info.__doc__
def index_of(self, key):
return self._hdu_list.index_of(key)
index_of.__doc__ = fits.HDUList.index_of.__doc__
class FITSExtension(object):
# I use __new__ instead of __init__ because I need to use the classmethod .from_columns instead of the
# constructor of fits.BinTableHDU
def __init__(self, data_tuple, header_tuple):
# Generate the header from the dictionary
header = fits.Header(header_tuple)
# Loop over the columns and generate them
fits_columns = []
for column_name, column_data in data_tuple:
# Get type of column
# NOTE: we assume the type is the same for the entire column
test_value = column_data[0]
# Generate FITS column
# By default a column does not have units, unless the content is an astropy.Quantity
units = None
if isinstance(test_value, u.Quantity):
# Probe the format
try:
# Use the one already defined, if possible
format = _NUMPY_TO_FITS_CODE[column_data.dtype.type]
except AttributeError:
# Try to infer it. Note that this could unwillingly upscale a float16 to a float32, for example
format = _NUMPY_TO_FITS_CODE[np.array(test_value.value).dtype.type]
# check if this is a vector of quantities
if test_value.shape:
format = "%i%s" % (test_value.shape[0], format)
# Store the unit as text
units = str(test_value.unit)
elif isinstance(test_value, six.string_types):
# Get maximum length, but make 1 as minimum length so if the column is completely made up of empty
# string we still can work
max_string_length = max(len(max(column_data, key=len)), 1)
format = "%iA" % max_string_length
elif np.isscalar(test_value):
format = _NUMPY_TO_FITS_CODE[np.array(test_value).dtype.type]
elif isinstance(test_value, list) or isinstance(test_value, np.ndarray):
# Probably a column array
# Check that we can convert it to a proper numpy type
try:
# Get type of first number
col_type = np.array(test_value[0]).dtype.type
except:
raise RuntimeError(
"Could not understand type of column %s" % column_name
)
# Make sure we are not dealing with objects
assert col_type != np.object and col_type != np.object_
try:
_ = np.array(test_value, col_type)
except:
raise RuntimeError(
"Column %s contain data which cannot be coerced to %s"
% (column_name, col_type)
)
else:
# see if it is a string array
if test_value.dtype.type == np.string_:
max_string_length = max(column_data, key=len).dtype.itemsize
format = "%iA" % max_string_length
else:
# All good. Check the length
# NOTE: variable length arrays are not supported
line_length = len(test_value)
format = "%i%s" % (line_length, _NUMPY_TO_FITS_CODE[col_type])
else:
# Something we do not know
raise RuntimeError(
"Column %s in dataframe contains objects which are not strings"
% column_name
)
this_column = fits.Column(
name=column_name, format=format, unit=units, array=column_data
)
fits_columns.append(this_column)
# Create the extension
self._hdu = fits.BinTableHDU.from_columns(
fits.ColDefs(fits_columns), header=header
)
# update the header to indicate that the file was created by 3ML
self._hdu.header.set(
"CREATOR",
"3ML v.%s" % (pkg_resources.get_distribution("threeML").version),
"(G.Vianello, giacomov@slac.stanford.edu)",
)
@property
def hdu(self):
return self._hdu
@classmethod
def from_fits_file_extension(cls, fits_extension):
data = fits_extension.data
data_tuple = []
for name in data.columns.names:
data_tuple.append((name, data[name]))
header_tuple = list(fits_extension.header.items())
return cls(data_tuple, header_tuple)
| |
import sys
import os
import ctypes
if os.name == 'nt':
from cefpython1 import cefpython_py27 as cefpython
#else:
# import webkit
# import jswebkit
import gobject
import gtk
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import re
import platform
import Widgets
def GetApplicationPath(file=None):
import re, os
# If file is None return current directory without trailing slash.
if file is None:
file = ""
# Only when relative path.
if not file.startswith("/") and not file.startswith("\\") and (
not re.search(r"^[\w-]+:", file)):
if hasattr(sys, "frozen"):
path = os.path.dirname(sys.executable)
elif "__file__" in globals():
path = os.path.dirname(os.path.realpath(__file__))
else:
path = os.getcwd()
path = path + os.sep + file
path = re.sub(r"[/\\]+", re.escape(os.sep), path)
path = re.sub(r"[/\\]+$", "", path)
return path
return str(file)
def ExceptHook(type, value, traceObject):
import traceback, os, time
# This hook does the following: in case of exception display it,
# write to error.log, shutdown CEF and exit application.
error = "\n".join(traceback.format_exception(type, value, traceObject))
a = os.path.abspath("outs/error.log")
with open(a, "a") as file:
file.write("\n[%s] %s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), error))
print("\n"+error+"\n")
return
cefpython.QuitMessageLoop()
cefpython.Shutdown()
# So that "finally" does not execute.
os._exit(1)
def init():
if os.name == 'nt':
sys.excepthook = ExceptHook
a = os.path.abspath("outs/debug.log")
settings = {
"log_severity": cefpython.LOGSEVERITY_INFO,
"log_file": GetApplicationPath(a),
"release_dcheck_enabled": True # Enable only when debugging.
}
cefpython.Initialize(settings)
def close():
os._exit(1)
class Window(gtk.Window):
def __init__(self, url):
super(Window, self).__init__(gtk.WINDOW_TOPLEVEL)
self.set_size_request(400, 590)
if os.name == 'nt':
self.www = Browser(url, 550, 100)
else:
self.www = IFrame(url, 550, 100)
hbox = gtk.VBox()
self.add(hbox)
hbox.pack_start(self.www)
self.show_all()
self.realize()
self.set_url(url)
self.connect('delete-event', self.esconder)
def set_url(self, url):
self.www.open(url)
self.show_all()
def esconder(self, *args):
self.hide_all()
return True
class Browser(gtk.DrawingArea): # Windows
__gsignals__ = {'mostrar': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
())
}
def __init__(self, url, x, y):
gobject.threads_init() # timer for messageloop
super(Browser, self).__init__()
self.exiting = False
self.url = url
self.mapa = True
gobject.timeout_add(10, self.OnTimer)
self.set_property('can-focus', True)
self.connect_after('realize', self.mostrar)
self.set_size_request(x, y)
def mostrar(self, *args):
print 'BROWSER MOSTRAR'
windowID = self.get_window().handle
windowInfo = cefpython.WindowInfo()
windowInfo.SetAsChild(windowID)
self.browser = cefpython.CreateBrowserSync(windowInfo,
browserSettings={},
navigateUrl=self.url)
self.frame = self.browser.GetFocusedFrame()
self.w = self.get_window()
self.connect('size-allocate', self.OnSize)
#cefpython.WindowUtils.OnSize(self.w.handle, 0, 0, 0)
self.emit('mostrar')
self.show_all()
def OnTimer(self):
if self.exiting:
return False
cefpython.MessageLoopWork()
return True
def OnFocusIn(self, widget, data):
# This function is currently not called by any of code, but if you would like
# for browser to have automatic focus add such line:
# self.mainWindow.connect('focus-in-event', self.OnFocusIn)
cefpython.WindowUtils.OnSetFocus(self.w.handle, 0, 0, 0)
def OnSize(self, widget, sizeAlloc):
cefpython.WindowUtils.OnSize(self.w.handle, 0, 0, 0)
self.emit('mostrar')
def open(self, url):
print url
self.frame.LoadUrl(url)
self.emit('mostrar')
def execute_script(self, url):
if self.mapa:
print url
self.frame.ExecuteJavascript(url)
def switch(self, server):
lista = ((0, 'Mapa'), (1, 'Claro'), (2, 'Movistar'), (3, 'Ayuda'))
enlaces = ((0, self.url), (1, 'http://www.internetclaro.com.pe'),
(2, 'http://www.movistar.com.pe/im'), (3, server + '/ayuda'))
dialogo = Alerta_Combo('Navegador', 'internet.png', 'Seleccione el enlace que desea ver:', lista)
respuesta = dialogo.iniciar()
self.frame.LoadUrl(enlaces[respuesta][1])
class IFrame(gtk.ScrolledWindow): # Ubuntu
def __init__(self, url, x, y):
super(IFrame, self).__init__()
try:
self.exiting = False
self.url = url
self.mapa = False
self.connect_after('realize', self.mostrar)
self.set_size_request(x, y)
self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
self.browser = webkit.WebView()
self.browser_frame = self.browser.get_main_frame()
self.add(self.browser)
self.browser.open(url)
except:
print 'error'
def mostrar(self, *args):
m = re.search("GtkScrolledWindow at 0x(\w+)", str(self))
hexID = m.group(1)
windowID = int(hexID, 16)
#print 'windowID', windowID, hexID, self.get_window().xid
#windowID = self.get_window().xid
def OnTimer(self):
if self.exiting:
return False
cefpython.MessageLoopWork()
return True
def OnFocusIn(self, widget, data):
cefpython.WindowUtils.OnSetFocus(self.w.handle, 0, 0, 0)
def open(self, url):
print 'browser', url
#self.browser.open(url)
def execute_script(self, url):
print url
# self.browser.execute_script(url)
try:
ctx = jswebkit.JSContext(self.browser_frame.get_global_context())
ctx.EvaluateScript(url)
except:
pass
def switch(self, server):
lista = (('Mapa', 0), ('Claro', 1), ('Movistar', 2), ('Ayuda', 3))
enlaces = (self.url, 'http://www.internetclaro.com.pe',
'http://www.movistar.com.pe/im', server + '/ayuda')
dialogo = Widgets.Alerta_Combo('Navegador', 'internet.png', 'Seleccione el enlace que desea ver:', lista)
respuesta = dialogo.iniciar()
dialogo.cerrar()
self.open(enlaces[respuesta])
print enlaces[respuesta]
class Navegador(gtk.Window):
def __init__(self, parent):
super(Navegador, self).__init__(gtk.WINDOW_TOPLEVEL)
self.http = parent.http
url = 'http://%s/despacho/ingresar?sessionid=%s&next=pantalla%s' % (self.http.dominio,
self.http.sessionid)
if os.name == 'nt':
self.www = Chrome.Browser(url, 550, 100)
else:
self.www = Chrome.IFrame(url, 550, 100)
vbox = gtk.VBox(False, 0)
self.add(vbox)
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, False, 0)
vbox.pack_start(self.www, True, True, 0)
#but_exportar = Widgets.Button('excel.png', 'Exportar a Excel')
#hbox.pack_start(but_exportar, True, True, 0)
#but_exportar.connect('clicked', 'self.exportar')
#def exportar(self, *args):
# url = self.frame.GetUrl()
if __name__ == '__main__':
init()
url = 'http://tracking.tcontur.com/despacho/login-mapa?usuario=None&password=None'
w = Window(url)
#w = gtk.Window(gtk.WINDOW_TOPLEVEL)
#if os.name == 'nt':
# www = Browser(url, 150, 150)
#else:
# www = IFrame(url, 150, 150)
#hbox = gtk.VBox()
#w.add(hbox)
#hbox.pack_start(www)
#w.realize()
#print 'realize'
#www.mostrar()
#w.show_all()
##www.open('tracking.tcontur.com')
gtk.main()
#close()
| |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import errno
import itertools
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
import traceback
from shutil import copyfile
BARE_INTERFACE_SEARCH_PATHS = [
"usr/lib/swift",
"System/iOSSupport/usr/lib/swift"
]
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS = [
"System/Library/Frameworks",
"System/iOSSupport/System/Library/Frameworks"
]
STDLIB_NAME = 'Swift'
MONOTONIC_VERSION = 1
def create_parser():
parser = argparse.ArgumentParser(
description="Builds an SDK's swiftinterfaces into swiftmodules. "
"Always searches usr/lib/swift in addition to whichever "
"framework directories are passed on the command line.",
prog=os.path.basename(__file__),
usage='%(prog)s -o output/ [INTERFACE_SEARCH_DIRS]',
epilog='Environment variables: SDKROOT, SWIFT_EXEC, '
'SWIFT_FORCE_MODULE_LOADING')
parser.add_argument('interface_framework_dirs', nargs='*',
metavar='INTERFACE_SEARCH_DIRS',
help='Relative paths to search for frameworks with '
'interfaces (default: System/Library/Frameworks)')
parser.add_argument('-o', dest='output_dir',
help='Directory to which the output will be emitted '
'(required)')
parser.add_argument('-j', dest='jobs', type=int,
help='The number of parallel jobs to execute '
'(default: # of cores)')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Print command invocations and progress info')
parser.add_argument('-n', dest='dry_run', action='store_true',
help='Dry run: don\'t actually run anything')
parser.add_argument('-sdk', default=os.getenv('SDKROOT'),
help='SDK to find frameworks and interfaces in '
'(default: $SDKROOT)')
parser.add_argument('-F', dest='framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional framework search paths')
parser.add_argument('-Fsystem', '-iframework',
dest='system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional system framework search paths')
parser.add_argument('-Fsystem-iosmac',
dest='iosmac_system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add system framework search paths '
'for iOSMac only')
parser.add_argument('-I', dest='include_dirs', metavar='DIR',
action='append', default=[],
help='Add additional header/module search paths')
parser.add_argument('-module-cache-path',
help='Temporary directory to store intermediate info')
parser.add_argument('-log-path',
help='Directory to write stdout/stderr output to')
parser.add_argument('-skip-stdlib', action='store_true',
help='Don\'t build the standard library interface')
parser.add_argument('-disable-modules-validate-system-headers',
action='store_true',
help='Disable modules verification for system headers')
parser.add_argument('-xfails', metavar='PATH',
help='JSON file containing an array of the modules '
'expected to fail')
parser.add_argument('-check-only', action='store_true',
help='Assume the resulting modules will be thrown '
'away (may be faster)')
parser.add_argument('-ignore-non-stdlib-failures', action='store_true',
help='Treat all modules but the stdlib as XFAILed')
parser.add_argument('-debug-crash-compiler', action='store_true',
help='Have the compiler crash (for testing purposes)')
parser.add_argument('-machine-parseable-monotonic-version',
action='store_true',
help='For comparing versions of this tool')
return parser
def fatal(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def run_command(args, dry_run):
if dry_run:
return (0, "", "")
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
out, err = proc.communicate()
exitcode = proc.returncode
return (exitcode, out.decode('utf-8'), err.decode('utf-8'))
except KeyboardInterrupt:
proc.terminate()
raise
def make_dirs_if_needed(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class NegatedSet:
def __init__(self, contents):
self._contents = frozenset(contents)
def __contains__(self, item):
return item not in self._contents
class ModuleFile:
def __init__(self, name, path, is_expected_to_fail):
self.name = name
self.path = path
self.is_expected_to_fail = is_expected_to_fail
def collect_slices(xfails, swiftmodule_dir):
if not os.path.isdir(swiftmodule_dir):
return
module_name, extension = \
os.path.splitext(os.path.basename(swiftmodule_dir))
assert extension == ".swiftmodule"
is_xfail = module_name in xfails
for entry in os.listdir(swiftmodule_dir):
_, extension = os.path.splitext(entry)
if extension == ".swiftinterface":
yield ModuleFile(module_name, os.path.join(swiftmodule_dir, entry),
is_xfail)
def collect_framework_modules(sdk, xfails, sdk_relative_framework_dirs):
for sdk_relative_framework_dir in sdk_relative_framework_dirs:
framework_dir = os.path.join(sdk, sdk_relative_framework_dir)
if not os.access(framework_dir, os.R_OK):
continue
for entry in os.listdir(framework_dir):
path_without_extension, extension = os.path.splitext(entry)
if extension != ".framework":
continue
module_name = os.path.basename(path_without_extension)
swiftmodule = os.path.join(framework_dir, entry, "Modules",
module_name + ".swiftmodule")
if os.access(swiftmodule, os.R_OK):
for x in collect_slices(xfails, swiftmodule):
yield x
def collect_non_framework_modules(sdk, xfails, sdk_relative_search_dirs):
for sdk_relative_search_dir in sdk_relative_search_dirs:
search_dir = os.path.join(sdk, sdk_relative_search_dir)
for dir_path, _, file_names in os.walk(search_dir, followlinks=True):
if os.path.splitext(dir_path)[1] == ".swiftmodule":
for x in collect_slices(xfails, dir_path):
yield x
else:
for interface in file_names:
module_name, extension = os.path.splitext(interface)
if extension == ".swiftinterface":
is_xfail = module_name in xfails
yield ModuleFile(module_name,
os.path.join(dir_path, interface),
is_xfail)
def should_retry_compilation(stderr):
if "has been modified since the module file" in stderr:
return True
if "mismatched umbrella headers in submodule" in stderr:
return True
if "is out of date and needs to be rebuilt: signature mismatch" in stderr:
return True
if "current parser token 'include'" in stderr:
return True
if "current parser token 'import'" in stderr:
return True
return False
def run_with_module_cache_retry(command_args, module_cache_path, dry_run):
"""Hack: runs a command several times, clearing the module cache if we get
an error about header files being modified during the run.
This shouldn't be necessary (the cached PCM files should automatically be
regenerated) but there seems to still be a bug in Clang that we haven't
tracked down yet.
"""
RETRIES = 3
attempts_stderr = ""
for r in range(RETRIES):
status, stdout, stderr = run_command(command_args, dry_run)
if status == 0:
break
if not should_retry_compilation(stderr):
break
if module_cache_path:
shutil.rmtree(module_cache_path, ignore_errors=True)
# If all retries fail, output information for each instance.
attempts_stderr += (
"\n*** Compilation attempt {}/{} failed with modules bugs. "
"Error output:\n".format(r + 1, RETRIES))
attempts_stderr += stderr
stderr = attempts_stderr
return (status, stdout, stderr)
def log_output_to_file(content, module_name, interface_base, label, log_path):
if not log_path:
return
if not content:
return
make_dirs_if_needed(log_path)
log_name = module_name + "-" + interface_base + "-" + label + ".txt"
with open(os.path.join(log_path, log_name), "w") as output_file:
output_file.write(content)
def looks_like_iosmac(interface_base):
return 'ios-macabi' in interface_base
def process_module(module_file):
global args, shared_output_lock
try:
interface_base, _ = \
os.path.splitext(os.path.basename(module_file.path))
swiftc = os.getenv('SWIFT_EXEC',
os.path.join(os.path.dirname(__file__), 'swiftc'))
command_args = [
swiftc, '-frontend',
'-build-module-from-parseable-interface',
'-sdk', args.sdk,
'-prebuilt-module-cache-path', args.output_dir,
]
module_cache_path = ""
if args.module_cache_path:
module_cache_path = os.path.join(args.module_cache_path,
str(os.getpid()))
command_args += ('-module-cache-path', module_cache_path)
if args.debug_crash_compiler:
command_args += ('-debug-crash-immediately',)
if not args.check_only:
command_args += (
'-serialize-parseable-module-interface-dependency-hashes',)
if args.disable_modules_validate_system_headers:
command_args += (
'-disable-modules-validate-system-headers',)
# FIXME: This shouldn't be necessary, but the module name is checked
# before the frontend action is.
if module_file.name == STDLIB_NAME:
command_args += ('-parse-stdlib',)
if looks_like_iosmac(interface_base):
for system_framework_path in args.iosmac_system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
command_args += ('-Fsystem', os.path.join(args.sdk, "System",
"iOSSupport", "System",
"Library", "Frameworks"))
for include_path in args.include_dirs:
command_args += ('-I', include_path)
for system_framework_path in args.system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
for framework_path in args.framework_dirs:
command_args += ('-F', framework_path)
command_args += ('-module-name', module_file.name, module_file.path)
output_path = os.path.join(args.output_dir,
module_file.name + ".swiftmodule")
if interface_base != module_file.name:
make_dirs_if_needed(output_path)
output_path = os.path.join(output_path,
interface_base + ".swiftmodule")
command_args += ('-o', output_path)
if args.verbose:
with shared_output_lock:
print("# Starting " + module_file.path)
print(' '.join(command_args))
sys.stdout.flush()
status, stdout, stderr = run_with_module_cache_retry(
command_args, module_cache_path=module_cache_path,
dry_run=args.dry_run)
log_output_to_file(stdout, module_file.name, interface_base, "out",
log_path=args.log_path)
log_output_to_file(stderr, module_file.name, interface_base, "err",
log_path=args.log_path)
return (module_file, status, stdout, stderr)
except BaseException:
# We're catching everything here because we don't want to take down the
# other jobs.
return (module_file, 1, "",
"".join(traceback.format_exception(*sys.exc_info())))
def set_up_child(parent_args, lock):
global args, shared_output_lock
args = parent_args
shared_output_lock = lock
def process_module_files(pool, module_files):
results = pool.imap_unordered(process_module, module_files)
overall_exit_status = 0
for (module_file, exit_status, stdout, stderr) in results:
with shared_output_lock:
if exit_status != 0:
print("# ", end="")
if module_file.is_expected_to_fail:
print("(XFAIL) ", end="")
else:
print("(FAIL) ", end="")
print(module_file.path)
if (not module_file.is_expected_to_fail) or args.verbose:
print(stdout, end="")
print(stderr, end="", file=sys.stderr)
elif module_file.is_expected_to_fail:
print("# (UPASS) " + module_file.path)
elif args.verbose:
print("# (PASS) " + module_file.path)
sys.stdout.flush()
if overall_exit_status == 0 and \
not module_file.is_expected_to_fail:
overall_exit_status = exit_status
return overall_exit_status
def getSDKVersion(sdkroot):
settingPath = os.path.join(sdkroot, 'SDKSettings.json')
with open(settingPath) as json_file:
data = json.load(json_file)
return data['Version']
fatal("Failed to get SDK version from: " + settingPath)
def copySystemVersionFile(sdkroot, output):
sysInfoPath = os.path.join(sdkroot,
'System/Library/CoreServices/SystemVersion.plist')
destInfoPath = os.path.join(output, 'SystemVersion.plist')
try:
copyfile(sysInfoPath, destInfoPath)
except BaseException as e:
print("cannot copy from " + sysInfoPath + " to " + destInfoPath + ": " + str(e))
def main():
global args, shared_output_lock
parser = create_parser()
args = parser.parse_args()
if args.machine_parseable_monotonic_version:
print(MONOTONIC_VERSION)
sys.exit(0)
if 'SWIFT_FORCE_MODULE_LOADING' not in os.environ:
os.environ['SWIFT_FORCE_MODULE_LOADING'] = 'prefer-serialized'
if not args.output_dir:
fatal("argument -o is required")
if not args.sdk:
fatal("SDKROOT must be set in the environment")
if not os.path.isdir(args.sdk):
fatal("invalid SDK: " + args.sdk)
# if the given output dir ends with 'prebuilt-modules', we should
# append the SDK version number so all modules will built into
# the SDK-versioned sub-directory.
if os.path.basename(args.output_dir) == 'prebuilt-modules':
args.output_dir = os.path.join(args.output_dir, getSDKVersion(args.sdk))
xfails = ()
if args.ignore_non_stdlib_failures:
if args.xfails:
print("warning: ignoring -xfails because "
"-ignore-non-stdlib-failures was provided", file=sys.stderr)
xfails = NegatedSet((STDLIB_NAME,))
elif args.xfails:
with open(args.xfails) as xfails_file:
xfails = json.load(xfails_file)
make_dirs_if_needed(args.output_dir)
# Copy a file containing SDK build version into the prebuilt module dir,
# so we can keep track of the SDK version we built from.
copySystemVersionFile(args.sdk, args.output_dir)
if 'ANDROID_DATA' not in os.environ:
shared_output_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(args.jobs, set_up_child,
(args, shared_output_lock))
else:
# Android doesn't support Python's multiprocessing as it doesn't have
# sem_open, so switch to a ThreadPool instead.
import threading
shared_output_lock = threading.Lock()
from multiprocessing.pool import ThreadPool
pool = ThreadPool(args.jobs, set_up_child,
(args, shared_output_lock))
interface_framework_dirs = (args.interface_framework_dirs or
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS)
module_files = list(itertools.chain(
collect_non_framework_modules(args.sdk, xfails,
BARE_INTERFACE_SEARCH_PATHS),
collect_framework_modules(args.sdk, xfails, interface_framework_dirs)))
if not args.skip_stdlib:
# Always do the stdlib first, so that we can use it in later steps
stdlib_module_files = (
x for x in module_files if x.name == STDLIB_NAME)
status = process_module_files(pool, stdlib_module_files)
if status != 0:
sys.exit(status)
non_stdlib_module_files = (
x for x in module_files if x.name != STDLIB_NAME)
status = process_module_files(pool, non_stdlib_module_files)
if os.name == 'nt':
import ctypes
Kernel32 = ctypes.cdll.LoadLibrary("Kernel32.dll")
Kernel32.ExitProcess(ctypes.c_ulong(status))
sys.exit(status)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Vertical Profile of Reflectivity (VPR)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Precipitation is 3-dimensional in space. The vertical distribution of
precipitation (and thus reflectivity) is typically non-uniform. As the height
of the radar beam increases with the distance from the radar location
(beam elevation, earth curvature), one sweep samples from different heights.
The effects of the non-uniform VPR and the different sampling heights need to
be accounted for if we are interested in the precipitation near the ground or
in defined heights. This module is intended to provide a set of tools to
account for these effects.
The first step will normally be to reference the polar volume data in a
3-dimensional Cartesian coordinate system. The three dimensional Cartesian
coordinates of the original polar volume data can be computed using
:func:`wradlib.vpr.volcoords_from_polar`.
Then, we can create regular 3-D grids in order to analyse the vertical profile
of reflectivity or rainfall intensity. For some applications you might want
to create so-called `Constant Altitude Plan Position Indicators (CAPPI)
<https://en.wikipedia.org/wiki/Constant_altitude_plan_position_indicator>`_
in order to make radar observations at different distances from the radar more
comparable. Basically, a CAPPI is simply one slice out of a 3-D volume grid.
Analoguous, we will refer to the elements in a three dimensional Cartesian grid
as *voxels*. In wradlib, you can create
CAPPIS (:class:`~wradlib.vpr.CAPPI`) and Pseudo CAPPIs
(:class:`~wradlib.vpr.PseudoCAPPI`) for different altitudes at once.
Here's an example how a set of CAPPIs can be created from synthetic polar
volume data::
import wradlib
import numpy as np
# define elevation and azimuth angles, ranges, radar site coordinates,
# projection
elevs = np.array([0.5,1.5,2.4,3.4,4.3,5.3,6.2,7.5,8.7,10,12,14,16.7,19.5])
azims = np.arange(0., 360., 1.)
ranges = np.arange(0., 120000., 1000.)
sitecoords = (14.924218,120.255547,500.)
proj = osr.SpatialReference()
proj.ImportFromEPSG(32651)
# create Cartesian coordinates corresponding the location of the
# polar volume bins
polxyz = wradlib.vpr.volcoords_from_polar(sitecoords, elevs,
azims, ranges, proj) # noqa
poldata = wradlib.vpr.synthetic_polar_volume(polxyz)
# this is the shape of our polar volume
polshape = (len(elevs),len(azims),len(ranges))
# now we define the coordinates for the 3-D grid (the CAPPI layers)
x = np.linspace(polxyz[:,0].min(), polxyz[:,0].max(), 120)
y = np.linspace(polxyz[:,1].min(), polxyz[:,1].max(), 120)
z = np.arange(500.,10500.,500.)
xyz = wradlib.util.gridaspoints(x, y, z)
gridshape = (len(x), len(y), len(z))
# create an instance of the CAPPI class and
# use it to create a series of CAPPIs
gridder = wradlib.vpr.CAPPI(polxyz, xyz, maxrange=ranges.max(),
gridshape=gridshape, ipclass=wradlib.ipol.Idw)
gridded = np.ma.masked_invalid( gridder(poldata) ).reshape(gridshape)
# plot results
levels = np.linspace(0,100,25)
wradlib.vis.plot_max_plan_and_vert(x, y, z, gridded, levels=levels,
cmap=pl.cm.viridis)
.. autosummary::
:nosignatures:
:toctree: generated/
volcoords_from_polar
make_3d_grid
CartesianVolume
CAPPI
PseudoCAPPI
"""
import numpy as np
from . import georef as georef
from . import ipol as ipol
from . import util as util
class CartesianVolume():
"""Create 3-D regular volume grid in Cartesian coordinates from polar \
data with multiple elevation angles
Parameters
----------
polcoords : :class:`numpy:numpy.ndarray`
of shape (num bins, 3)
gridcoords : :class:`numpy:numpy.ndarray`
of shape (num voxels, 3)
gridshape : tuple
shape of the Cartesian grid (num x, num y, num z)
maxrange : float
The maximum radar range (must be the same for each elevation angle)
ipclass : object
an interpolation class from :mod:`wradlib.ipol`
ipargs : `**kwargs`
keyword arguments corresponding to ``ipclass``
Returns
-------
output : :class:`numpy:numpy.ndarray`
float 1-d ndarray of the same length as ``gridcoords`` (num voxels,)
Examples
--------
See :ref:`/notebooks/workflow/recipe2.ipynb`.
"""
def __init__(self, polcoords, gridcoords, gridshape=None,
maxrange=None, minelev=None, maxelev=None,
ipclass=ipol.Idw, **ipargs):
# radar location in Cartesian coordinates
# TODO: pass projected radar location as argument
# (allows processing of incomplete polar volumes)
self.radloc = np.array([np.mean(polcoords[:, 0]),
np.mean(polcoords[:, 1]),
np.min(polcoords[:, 2])]).reshape((-1, 3))
# Set the mask which masks the blind voxels of the 3-D volume grid
self.mask = self._get_mask(gridcoords, polcoords, gridshape,
maxrange, minelev, maxelev)
# create an instance of the Interpolation class
self.trgix = np.where(np.logical_not(self.mask))
self.ip = ipclass(src=polcoords, trg=gridcoords[self.trgix], **ipargs)
def __call__(self, data):
"""Interpolates the polar data to 3-dimensional Cartesian coordinates
Parameters
----------
data : :class:`numpy:numpy.ndarray`
1-d array of length (num radar bins in volume,)
The length of this array must be the same as len(polcoords)
Returns
-------
output : :class:`numpy:numpy.ndarray`
1-d array of length (num voxels,)
"""
# Interpolate data in 3-D
ipdata = np.repeat(np.nan, len(self.mask))
ipdata[self.trgix] = self.ip(data)
return ipdata
def _get_mask(self, gridcoords, polcoords=None, gridshape=None,
maxrange=None, minelev=None, maxelev=None):
"""Returns a mask
(the base class only contains a dummy function which masks nothing)
This method needs to be replaced for inherited classes such as CAPPI or
PseudoCAPPI.
Parameters
----------
gridcoords : :class:`numpy:numpy.ndarray`
of shape (num voxels, 3)
polcoords : :class:`numpy:numpy.ndarray`
of shape (num bins, 3)
gridshape : tuple
shape of the Cartesian grid (num x, num y, num z)
maxrange : float
The maximum radar range
(must be the same for each elevation angle,
and same unit as gridcoords)
minelev : float
The minimum elevation angle of the volume (degree)
maxelev : float
The maximum elevation angle of the volume (degree)
Returns
-------
output : :class:`numpy:numpy.ndarray`
Boolean array of length (num voxels,)
"""
return np.repeat(False, len(gridcoords))
class CAPPI(CartesianVolume):
"""Create a Constant Altitude Plan Position Indicator (CAPPI)
A CAPPI gives the value of a target variable (typically reflectivity
in dBZ, but here also other variables such as e.g. rainfall intensity) in
a defined altitude.
In order to create a CAPPI, you first have to create an instance of this
class. Calling this instance with the actual polar volume data will return
the CAPPI grid.
Parameters
----------
polcoords : :class:`numpy:numpy.ndarray`
coordinate array of shape (num bins, 3)
Represents the 3-D coordinates of the orginal radar bins
gridcoords : :func:`numpy:numpy.array`
coordinate array of shape (num voxels, 3)
Represents the 3-D coordinates of the Cartesian grid
gridshape : tuple
shape of the original polar volume (num elevation angles,
num azimuth angles, num range bins)
size must correspond to length of polcoords
maxrange : float
The maximum radar range (must be the same for each elevation angle)
ipclass : object
an interpolation class from :mod:`wradlib.ipol`
ipargs : `**kwargs`
keyword arguments corresponding to ``ipclass``
Returns
-------
output : :class:`numpy:numpy.ndarray`
float 1-d ndarray of the same length as ``gridcoords`` (num voxels,)
See Also
--------
out_of_range
blindspots
Examples
--------
See :ref:`/notebooks/workflow/recipe2.ipynb`.
"""
def _get_mask(self, gridcoords, polcoords, gridshape,
maxrange, minelev, maxelev):
"""Masks the "blind" voxels of the Cartesian 3D-volume
For the CAPPI, blind voxels are below `minelev` and above `maxelev`
and beyond `maxrange`.
"""
below, above, out_of_range = blindspots(self.radloc, gridcoords,
minelev, maxelev, maxrange)
return np.logical_not(np.logical_not(out_of_range) &
np.logical_not(below) & np.logical_not(above))
class PseudoCAPPI(CartesianVolume):
"""Create a Pseudo-CAPPI Constant Altitude Plan Position Indicator (CAPPI)
The difference to a CAPPI (:class:`wradlib.vpr.CAPPI`) is that the blind
area *below* and *above* the radar are not masked, but filled by
interpolation.
Only the areas beyond the *range* of the radar are masked out. As a result,
"blind" areas below the radar are particularly filled from the lowest
available elevation angle.
In order to create a Pseudo CAPPI, you first have to create an instance of
this class. Calling this instance with the actual polar volume data will
return the Pseudo CAPPI grid.
Parameters
----------
polcoords : :class:`numpy:numpy.ndarray`
coordinate array of shape (num bins, 3)
Represents the 3-D coordinates of the orginal radar bins
gridcoords : :class:`numpy:numpy.ndarray`
coordinate array of shape (num voxels, 3)
Represents the 3-D coordinates of the Cartesian grid
gridshape : tuple
shape of the original polar volume (num elevation angles,
num azimuth angles, num range bins)
size must correspond to length of polcoords
maxrange : float
The maximum radar range (must be the same for each elevation angle)
ipclass : object
an interpolation class from :mod:`wradlib.ipol`
ipargs : `**kwargs`
keyword arguments corresponding to ``ipclass``
Returns
-------
output : :class:`numpy:numpy.ndarray`
float 1-d ndarray of the same length as ``gridcoords`` (num voxels,)
See Also
--------
out_of_range
Examples
--------
See :ref:`/notebooks/workflow/recipe2.ipynb`.
"""
def _get_mask(self, gridcoords, polcoords, gridshape,
maxrange, minelev, maxelev):
"""Masks the "blind" voxels of the Cartesian 3D-volume grid
For the Pseudo CAPPI, blind voxels are only those beyond `maxrange`.
"""
return np.logical_not(np.logical_not(out_of_range(self.radloc,
gridcoords,
maxrange)))
def out_of_range(center, gridcoords, maxrange):
"""Masks the region outside the radar range
Parameters
---------
center : tuple
radar location
gridcoords : :class:`numpy:numpy.ndarray`
array of 3-D coordinates with shape (num voxels, 3)
maxrange : float
maximum range (same unit as gridcoords)
Returns
-------
output : :class:`numpy:numpy.ndarray`
1-D Boolean array of length len(gridcoords)
"""
return ((gridcoords - center) ** 2).sum(axis=-1) > maxrange ** 2
def blindspots(center, gridcoords, minelev, maxelev, maxrange):
"""Masks blind regions of the radar, marked on a 3-D grid
The radar is blind below the radar, above the radar and beyond the range.
The function returns three boolean arrays which indicate whether (1) the
grid node is below the radar, (2) the grid node is above the radar,
(3) the grid node is beyond the maximum range.
Parameters
---------
center : tuple
radar location
gridcoords : :class:`numpy:numpy.ndarray`
array of 3-D coordinates with shape (num voxels, 3)
minelev : float
The minimum elevation angle of the volume (degree)
maxelev : float
The maximum elevation angle of the volume (degree)
maxrange : float
maximum range (same unit as gridcoords)
Returns
-------
output : tuple of three Boolean arrays each of length (num grid points)
"""
# distances of 3-D grid nodes from radar site (center)
dist_from_rad = np.sqrt(((gridcoords - center) ** 2).sum(axis=-1))
# below the radar
below = gridcoords[:, 2] < (georef.bin_altitude(dist_from_rad, minelev, 0,
re=6371000) +
center[:, 2])
# above the radar
above = gridcoords[:, 2] > (georef.bin_altitude(dist_from_rad, maxelev, 0,
re=6371000) +
center[:, 2])
# out of range
out_of_range = dist_from_rad > maxrange
return below, above, out_of_range
def volcoords_from_polar(sitecoords, elevs, azimuths, ranges, proj=None):
"""Create Cartesian coordinates for regular polar volumes
Parameters
----------
sitecoords : sequence of three floats indicating the radar position
(longitude in decimal degrees, latitude in decimal degrees,
height a.s.l. in meters)
elevs : sequence of elevation angles
azimuths : sequence of azimuth angles
ranges : sequence of ranges
proj : osr spatial reference object
GDAL OSR Spatial Reference Object describing projection
Returns
-------
output : :class:`numpy:numpy.ndarray`
(num volume bins, 3)
Examples
--------
See :ref:`/notebooks/workflow/recipe2.ipynb`.
"""
# make sure that elevs is an array
elevs = np.array([elevs]).ravel()
# create polar grid
el, az, r = util.meshgrid_n(elevs, azimuths, ranges)
# get projected coordinates
coords = georef.spherical_to_proj(r, az, el, sitecoords, proj=proj)
coords = coords.reshape(-1, 3)
return coords
def volcoords_from_polar_irregular(sitecoords, elevs, azimuths,
ranges, proj=None):
"""Create Cartesian coordinates for polar volumes with irregular \
sweep specifications
Parameters
----------
sitecoords : sequence of three floats indicating the radar position
(longitude in decimal degrees, latitude in decimal degrees,
height a.s.l. in meters)
elevs : sequence of elevation angles
azimuths : sequence of azimuth angles
ranges : sequence of ranges
proj : object
GDAL OSR Spatial Reference Object describing projection
Returns
-------
output : :class:`numpy:numpy.ndarray`
(num volume bins, 3)
"""
# check structure: Are azimuth angles and range bins the same for each
# elevation angle?
oneaz4all = True
onerange4all = True
# check elevs array, first: must be one-dimensional
try:
elevs = np.array(elevs)
except Exception:
print("Could not create an array from argument <elevs>.")
print("The following exception was raised:")
raise
assert (elevs.ndim == 1) and (elevs.dtype != np.dtype("object")), \
"Argument <elevs> in wradlib.volcoords_from_polar must be a 1-D array."
# now: is there one azimuths array for all elevation angles
# or one for each?
try:
azimuths = np.array(azimuths)
except Exception:
print("Could not create an array from argument <azimuths>.")
print("The following exception was raised:")
raise
if len(azimuths) == len(elevs):
# are the items of <azimuths> arrays themselves?
isseq = [util.issequence(elem) for elem in azimuths]
assert not ((False in isseq) and (True in isseq)), \
"Argument <azimuths> contains both iterable " \
"and non-iterable items."
if True in isseq:
# we expect one azimuth array for each elevation angle
oneaz4all = False
# now: is there one ranges array for all elevation angles or one for each?
try:
ranges = np.array(ranges)
except Exception:
print("Could not create an array from argument <ranges>.")
print("The following exception was raised:")
raise
if len(ranges) == len(elevs):
# are the items of <azimuths> arrays themselves?
isseq = [util.issequence(elem) for elem in ranges]
assert not ((False in isseq) and (True in isseq)), \
"Argument <azimuths> contains both iterable " \
"and non-iterable items."
if True in isseq:
# we expect one azimuth array for each elevation angle
onerange4all = False
if oneaz4all and onerange4all:
# this is the simple way
return volcoords_from_polar(sitecoords, elevs, azimuths, ranges, proj)
# No simply way, so we need to construct the coordinates arrays for
# each elevation angle
# but first adapt input arrays to this task
if onerange4all:
ranges = np.array([ranges for i in range(len(elevs))])
if oneaz4all:
azimuths = np.array([azimuths for i in range(len(elevs))])
# and second create the corresponding polar volume grid
el = np.array([])
az = np.array([])
r = np.array([])
for i, elev in enumerate(elevs):
az_tmp, r_tmp = np.meshgrid(azimuths[i], ranges[i])
el = np.append(el, np.repeat(elev, len(azimuths[i]) * len(ranges[i])))
az = np.append(az, az_tmp.ravel())
r = np.append(r, r_tmp.ravel())
# get projected coordinates
coords = georef.spherical_to_proj(r, az, el, sitecoords, proj=proj)
coords = coords.reshape(-1, 3)
return coords
def make_3d_grid(sitecoords, proj, maxrange, maxalt, horiz_res, vert_res):
"""Generate Cartesian coordinates for a regular 3-D grid based on \
radar specs.
Parameters
----------
sitecoords : tuple
Radar location coordinates in lon, lat
proj : object
GDAL OSR Spatial Reference Object describing projection
maxrange : float
maximum radar range (same unit as SRS defined by ``proj``,
typically meters)
maxalt : float
maximum altitude to which the 3-d grid should extent (meters)
horiz_res : float
horizontal resolution of the 3-d grid (same unit as
SRS defined by ``proj``, typically meters)
vert_res : float
vertical resolution of the 3-d grid (meters)
Returns
-------
output : :class:`numpy:numpy.ndarray`, tuple
float array of shape (num grid points, 3), a tuple of
3 representing the grid shape
"""
center = georef.reproject(sitecoords[0], sitecoords[1],
projection_target=proj)
# minz = sitecoords[2]
llx = center[0] - maxrange
lly = center[1] - maxrange
x = np.arange(llx, llx + 2 * maxrange + horiz_res, horiz_res)
y = np.arange(lly, lly + 2 * maxrange + horiz_res, horiz_res)
z = np.arange(0., maxalt + vert_res, vert_res)
xyz = util.gridaspoints(z, y, x)
shape = (len(z), len(y), len(x))
return xyz, shape
def synthetic_polar_volume(coords):
"""Returns a totally arbitrary synthetic polar volume - just for testing
Parameters
----------
coords : :class:`numpy:numpy.ndarray`
(num volume bins, 3), as returned by volcoords_from_polar
Returns
-------
output : :class:`numpy:numpy.ndarray`
float array of shape (num volume bins, 3)
"""
x = coords[:, 0] * 10 / np.max(coords[:, 0])
y = coords[:, 1] * 10 / np.max(coords[:, 1])
z = coords[:, 2] / 1000.
out = np.abs(np.sin(x * y)) * np.exp(-z)
out = out * 100. / out.max()
return out
def norm_vpr_stats(volume, reference_layer, stat=np.mean, **kwargs):
"""Returns the average normalised vertical profile of a volume or \
any other desired statistics
Given a Cartesian 3-d ``volume`` and an arbitrary ``reference layer``
index, the function normalises all vertical profiles represented by the
volume and computes a static of all profiles (e.g. an average vertical
profile using the default ``stat``).
Parameters
----------
volume : :class:`numpy:numpy.ndarray` or
:class:`numpy.ma.core.MaskedArray`
Cartesian 3-d grid with shape (num vertical layers, num x intervals,
num y intervals)
reference_layer : integer
This index defines the vertical layers of ``volume`` that is used to
normalise all vertical profiles
stat : function
typically a numpy statistics function (defaults to numpy.mean)
kwargs : further keyword arguments taken by ``stat``
Returns
-------
output : :class:`numpy:numpy.ndarray` or :class:`numpy.ma.core.MaskedArray`
of shape (num vertical layers,) which provides the statistic from
``stat`` applied over all normalised vertical profiles (e.g. the
mean normalised vertical profile if numpy.mean is used)
"""
tmp = volume / volume[reference_layer]
return stat(tmp.reshape((-1, np.prod(tmp.shape[-2:]))), axis=1, **kwargs)
if __name__ == '__main__':
print('wradlib: Calling module <vpr> as main...')
| |
from itertools import izip, chain, repeat
import requests
import re
from abc import abstractmethod
class BaseParser(object):
def group(self, n, iterable, padvalue=None):
return izip(*[chain(iterable, repeat(padvalue, n - 1))] * n)
@abstractmethod
def get_scores_json(self):
raise NotImplementedError
class NcaafParser(BaseParser):
def __init__(self):
self.data_url = 'http://sports.espn.go.com/ncf/bottomline/scores'
self.box_score_url = 'http://sports.espn.go.com/ncf/boxscore?gameId={game_id}'
def get_scores_json(self):
data = self._get_web_data(self.data_url)
return self._parse_ncaaf_scores(data)
def _get_web_data(self, fetch_url):
# request raw data from ESPN url
r = requests.get(fetch_url)
# do some heavy-duty cleanup
encoded = [e.replace('%26', '&') for e in r.text.replace('%20', ' ').split('&') if
re.match('^ncf_s_(left|right|url)[0-9]+=.*', e)]
# group the strings into games
return [list(e) for e in self.group(2, encoded)]
def _parse_ncaaf_scores(self, raw):
games = []
for game in raw:
game_type = self._determine_game_type(game[0])
if game_type in ['complete', 'inprogress']:
games.append(self._parse_in_progres_or_finished_game(game[0], game[1]))
else:
games.append(self._parse_future_game(game[0], game[1]))
return games
def _determine_game_type(self, gamestring):
# set up regex to determine type
complete = re.compile('.*\(FINAL.*\) ?$')
to_play = re.compile('.*\((SAT|SUN|MON|TUE|WED|THU|FRI).*\)$')
# make type determination
if to_play.match(gamestring):
return 'unplayed'
elif complete.match(gamestring):
return 'complete'
else:
return 'inprogress'
def _parse_in_progres_or_finished_game(self, gamestring, gamelink):
game_record = re.compile(
'(?:.*=)(\^)?(\(\d{1,2}\))?([^0-9]+)(\d{1,2})(?: {3})(\^)?(\(\d{1,2}\))?([^0-9]+)(\d{1,2}).*(\(.*\))')
# game_link = re.compile('^(?:.*=)(.*)')
m = game_record.match(gamestring)
g = m.groups()
m2 = re.match('^(?:.*=)(.*)', gamelink)
url = 'http://sports.espn.go.com/ncf/boxscore?gameId=' + m2.groups()[0]
game = {
'away': {
'winner': True if g[0] else False,
'ranked': int(g[1].replace('(', '').replace(')', '')) if g[1] else False,
'team': g[2].strip(),
'score': int(g[3].strip())
},
'home': {
'winner': True if g[4] else False,
'ranked': int(g[5].replace('(', '').replace(')', '')) if g[5] else False,
'team': g[6].strip(),
'score': int(g[7].strip())
},
'status': g[8].strip('(').strip(')'),
'_links': {
'score': {
'href': self.box_score_url.format(game_id=m2.groups()[0])
}
}
}
return game
def _parse_future_game(self, gamestring, gamelink):
game_record = re.compile('(?:.*=)(\(\d{1,2}\))?(.*)(?:\ at\ )(\(\d{1,2}\))?(.*)(\(.*\))')
game_link = re.compile('^(?:.*=)(.*)')
m = game_record.match(gamestring)
g = m.groups()
m2 = game_link.match(gamelink)
game = {
'home': {
'team': g[3].strip(),
'rank': int(g[2].replace('(', '').replace(')', '')) if g[2] else False
},
'away': {
'team': g[1].strip(),
'rank': int(g[0].replace('(', '').replace(')', '')) if g[0] else False
},
'status': '',
'_links': {
'score': {
'href': self.box_score_url.format(game_id=m2.groups()[0])
}
}
}
return game
class NcaabParser(BaseParser):
def __init__(self):
self.data_url = 'http://sports.espn.go.com/ncb/bottomline/scores'
self.box_score_url = 'http://sports.espn.go.com/ncb/boxscore?gameId={game_id}'
def get_scores_json(self):
data = self._get_web_data(self.data_url)
return self._parse_ncaab_scores(data)
def _get_web_data(self, fetch_url):
# request raw data from ESPN url
r = requests.get(fetch_url)
# do some heavy-duty cleanup
encoded = [e.replace('%26', '&') for e in r.text.replace('%20', ' ').split('&') if
re.match('^ncb_s_(left|right|url)[0-9]+=.*', e)]
# group the strings into games
return [list(e) for e in self.group(2, encoded)]
def _parse_ncaab_scores(self, raw):
games = []
for game in raw:
print 'Determining Game Type:'
game_type = self._determine_game_type(game[0])
print game_type
if game_type in ['complete', 'inprogress']:
print 'Parsing ' + game_type + ' game'
games.append(self._parse_in_progres_or_finished_game(game[0], game[1]))
else:
print 'Parsing future game'
games.append(self._parse_future_game(game[0], game[1]))
return games
def _determine_game_type(self, gamestring):
# set up regex to determine type
complete = re.compile('.*\(FINAL.*\) ?$')
to_play = re.compile('.*\(\d{1,2}:\d{1,2} (AM|PM) [A-Z]{2}\)$')
# make type determination
if to_play.match(gamestring):
return 'unplayed'
elif complete.match(gamestring):
return 'complete'
else:
return 'inprogress'
def _parse_in_progres_or_finished_game(self, gamestring, gamelink):
print gamestring
game_record = re.compile(
'(?:.*=)(\^)?(\(\d{1,2}\))?([^0-9]+)(\d{1,2})(?: {3})(\^)?(\(\d{1,2}\))?([^0-9]+)(\d{1,2}).*(\(.*\))')
# game_link = re.compile('^(?:.*=)(.*)')
m = game_record.match(gamestring)
print m
g = m.groups()
m2 = re.match('^(?:.*=)(.*)', gamelink)
game = {
'away': {
'winner': True if g[0] else False,
'ranked': int(g[1].replace('(', '').replace(')', '')) if g[1] else None,
'team': g[2].strip(),
'score': int(g[3].strip())
},
'home': {
'winner': True if g[4] else False,
'ranked': int(g[5].replace('(', '').replace(')', '')) if g[5] else None,
'team': g[6].strip(),
'score': int(g[7].strip())
},
'status': g[8].strip('(').strip(')'),
'gameId': m2.groups()[0],
'_links': {
'score': {
'href': self.box_score_url.format(game_id=m2.groups()[0])
}
}
}
return game
def _parse_future_game(self, gamestring, gamelink):
game_record = re.compile('(?:.*=)(\((\d{1,2})\))?(.+)(?=at)at (\((\d{1,2})\))?(.+)(?=\()\((.*)\)')
game_link = re.compile('^(?:.*=)(.*)')
m = game_record.match(gamestring)
g = m.groups()
m2 = game_link.match(gamelink)
game = {
'home': {
'winner': False,
'score': None,
'team': g[2].strip(),
'rank': int(g[1].replace('(', '').replace(')', '')) if g[1] else None
},
'away': {
'winner': False,
'score': None,
'team': g[5].strip(),
'rank': int(g[4].replace('(', '').replace(')', '')) if g[4] else None
},
'status': g[6],
'gameId': m2.groups()[0],
'_links': {
'score': {
'href': self.box_score_url.format(game_id=m2.groups()[0])
}
}
}
return game
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Result.mo_pers'
db.add_column(u'cmp_result', 'mo_pers',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'Result.establish_pers'
db.add_column(u'cmp_result', 'establish_pers',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Result.mo_pers'
db.delete_column(u'cmp_result', 'mo_pers')
# Deleting field 'Result.establish_pers'
db.delete_column(u'cmp_result', 'establish_pers')
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveway_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_fed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_reg': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'cmp.auction': {
'Meta': {'object_name': 'Auction'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'electric_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionHallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'open_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'proposal_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionRoom']", 'null': 'True', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'water_removal': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionWC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
'cmp.comparedata': {
'Meta': {'object_name': 'CompareData'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cmp_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 11, 0, 0)', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'cmp.copyauction': {
'Meta': {'object_name': 'CopyAuction'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'electric_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionHallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'open_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'proposal_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionRoom']", 'null': 'True', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'water_removal': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionWC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
'cmp.person': {
'Meta': {'object_name': 'Person'},
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
'cmp.result': {
'Meta': {'object_name': 'Result'},
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Building']", 'null': 'True', 'blank': 'True'}),
'check_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cmp_data': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmp.CompareData']", 'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'doc_files': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'doc_list': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'establish_pers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'ground': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Ground']", 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultHallway']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True'}),
'mo_pers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recommend': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultRoom']", 'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultWC']", 'null': 'True', 'blank': 'True'})
},
u'core.auctionhallway': {
'Meta': {'object_name': 'AuctionHallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionkitchen': {
'Meta': {'object_name': 'AuctionKitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'stove': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionroom': {
'Meta': {'object_name': 'AuctionRoom', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionwc': {
'Meta': {'object_name': 'AuctionWC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'separate': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wc_ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc_floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc_wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'core.basehallway': {
'Meta': {'object_name': 'BaseHallway'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.basekitchen': {
'Meta': {'object_name': 'BaseKitchen'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'core.baseroom': {
'Meta': {'object_name': 'BaseRoom'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.basewc': {
'Meta': {'object_name': 'BaseWC'},
'bath_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_toilet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_tower_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
u'core.hallway': {
'Meta': {'object_name': 'Hallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.kitchen': {
'Meta': {'object_name': 'Kitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'stove': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.resulthallway': {
'Meta': {'object_name': 'ResultHallway', '_ormbases': [u'core.Hallway']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'hallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Hallway']", 'unique': 'True', 'primary_key': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultkitchen': {
'Meta': {'object_name': 'ResultKitchen', '_ormbases': [u'core.Kitchen']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'kitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Kitchen']", 'unique': 'True', 'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultroom': {
'Meta': {'object_name': 'ResultRoom', '_ormbases': [u'core.Room']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'room_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Room']", 'unique': 'True', 'primary_key': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultwc': {
'Meta': {'object_name': 'ResultWC', '_ormbases': [u'core.WC']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'wc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.WC']", 'unique': 'True', 'primary_key': 'True'})
},
u'core.room': {
'Meta': {'object_name': 'Room', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.wc': {
'Meta': {'object_name': 'WC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'separate': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_fed_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_reg_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_fed_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_reg_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'}),
'planing_home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmp']
| |
# rfc3339.py -- Implementation of the majority of RFC 3339 for python.
# Copyright (c) 2008, 2009, 2010 LShift Ltd. <query@lshift.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Implementation of the majority of http://www.ietf.org/rfc/rfc3339.txt.
Use datetime.datetime.isoformat() as an inverse of the various parsing
routines in this module.
Limitations, with respect to RFC 3339:
- Section 4.3, "Unknown Local Offset Convention", is not implemented.
- Section 5.6, "Internet Date/Time Format", is the ONLY supported format
implemented by the various parsers in this module. (Section 5.6 is
reproduced in its entirety below.)
- Section 5.7, "Restrictions", is left to the datetime.datetime constructor
to implement, with the exception of limits on timezone
minutes-east-of-UTC magnitude. In particular, leap seconds are not
addressed by this module. (And it appears that they are not supported
by datetime, either.)
Potential Improvements:
- Support for leap seconds. (There's a table of them in RFC 3339 itself,
and http://tf.nist.gov/pubs/bulletin/leapsecond.htm updates monthly.)
Extensions beyond the RFC:
- Accepts (but will not generate) dates formatted with a time-offset
missing a colon. (Implemented because Facebook are generating
broken RFC 3339 timestamps.)
Here's an excerpt from RFC 3339 itself:
5.6. Internet Date/Time Format
The following profile of ISO 8601 [ISO8601] dates SHOULD be used in
new protocols on the Internet. This is specified using the syntax
description notation defined in [ABNF].
date-fullyear = 4DIGIT
date-month = 2DIGIT ; 01-12
date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on
; month/year
time-hour = 2DIGIT ; 00-23
time-minute = 2DIGIT ; 00-59
time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second
; rules
time-secfrac = "." 1*DIGIT
time-numoffset = ("+" / "-") time-hour ":" time-minute
time-offset = "Z" / time-numoffset
partial-time = time-hour ":" time-minute ":" time-second
[time-secfrac]
full-date = date-fullyear "-" date-month "-" date-mday
full-time = partial-time time-offset
date-time = full-date "T" full-time
NOTE: Per [ABNF] and ISO8601, the "T" and "Z" characters in this
syntax may alternatively be lower case "t" or "z" respectively.
This date/time format may be used in some environments or contexts
that distinguish between the upper- and lower-case letters 'A'-'Z'
and 'a'-'z' (e.g. XML). Specifications that use this format in
such environments MAY further limit the date/time syntax so that
the letters 'T' and 'Z' used in the date/time syntax must always
be upper case. Applications that generate this format SHOULD use
upper case letters.
NOTE: ISO 8601 defines date and time separated by "T".
Applications using this syntax may choose, for the sake of
readability, to specify a full-date and full-time separated by
(say) a space character.
"""
import datetime, time, calendar
import re
__all__ = ["tzinfo", "UTC_TZ", "parse_date", "parse_datetime", "now", "utcfromtimestamp", "utctotimestamp", "datetimetostr", "timestamptostr", "strtotimestamp"]
ZERO = datetime.timedelta(0)
class tzinfo(datetime.tzinfo):
"""
Implementation of a fixed-offset tzinfo.
"""
def __init__(self, minutesEast = 0, name = 'Z'):
"""
minutesEast -> number of minutes east of UTC that this tzinfo represents.
name -> symbolic (but uninterpreted) name of this tzinfo.
"""
self.minutesEast = minutesEast
self.offset = datetime.timedelta(minutes = minutesEast)
self.name = name
def utcoffset(self, dt):
"""Returns minutesEast from the constructor, as a datetime.timedelta."""
return self.offset
def dst(self, dt):
"""This is a fixed offset tzinfo, so always returns a zero timedelta."""
return ZERO
def tzname(self, dt):
"""Returns the name from the constructor."""
return self.name
def __repr__(self):
"""If minutesEast==0, prints specially as rfc3339.UTC_TZ."""
if self.minutesEast == 0:
return "rfc3339.UTC_TZ"
else:
return "rfc3339.tzinfo(%s,%s)" % (self.minutesEast, repr(self.name))
UTC_TZ = tzinfo(0, 'Z')
date_re_str = r'(\d\d\d\d)-(\d\d)-(\d\d)'
time_re_str = r'(\d\d):(\d\d):(\d\d)(\.(\d+))?([zZ]|(([-+])(\d\d):?(\d\d)))'
def make_re(*parts):
return re.compile(r'^\s*' + ''.join(parts) + r'\s*$')
date_re = make_re(date_re_str)
datetime_re = make_re(date_re_str, r'[ tT]', time_re_str)
def parse_date(s):
"""
Given a string matching the 'full-date' production above, returns
a datetime.date instance. Any deviation from the allowed format
will produce a raised ValueError.
>>> parse_date("2008-08-24")
datetime.date(2008, 8, 24)
>>> parse_date(" 2008-08-24 ")
datetime.date(2008, 8, 24)
>>> parse_date("2008-08-00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-06-31")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-13-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: month must be in 1..12
>>> parse_date("22008-01-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 136, in parse_date
raise ValueError('Invalid RFC 3339 date string', s)
ValueError: ('Invalid RFC 3339 date string', '22008-01-01')
>>> parse_date("2008-08-24").isoformat()
'2008-08-24'
"""
m = date_re.match(s)
if m:
(y, m, d) = m.groups()
return datetime.date(int(y), int(m), int(d))
else:
raise ValueError('Invalid RFC 3339 date string', s)
def _offset_to_tzname(offset):
"""
Converts an offset in minutes to an RFC 3339 "time-offset" string.
>>> _offset_to_tzname(0)
'+00:00'
>>> _offset_to_tzname(-1)
'-00:01'
>>> _offset_to_tzname(-60)
'-01:00'
>>> _offset_to_tzname(-779)
'-12:59'
>>> _offset_to_tzname(1)
'+00:01'
>>> _offset_to_tzname(60)
'+01:00'
>>> _offset_to_tzname(779)
'+12:59'
"""
offset = int(offset)
if offset < 0:
tzsign = '-'
else:
tzsign = '+'
offset = abs(offset)
tzhour = offset / 60
tzmin = offset % 60
return '%s%02d:%02d' % (tzsign, tzhour, tzmin)
def parse_datetime(s):
"""
Given a string matching the 'date-time' production above, returns
a datetime.datetime instance. Any deviation from the allowed
format will produce a raised ValueError.
>>> parse_datetime("2008-08-24T00:00:00Z")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime(" 2008-08-24T00:00:00Z ")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 208, in parse_datetime
raise ValueError('Invalid RFC 3339 datetime string', s)
ValueError: ('Invalid RFC 3339 datetime string', '2008-08-24T00:00:00')
>>> parse_datetime("2008-08-24T00:00:00+00:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00+01:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(60,'+01:00'))
>>> parse_datetime("2008-08-24T00:00:00-01:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-60,'-01:00'))
>>> parse_datetime("2008-08-24T00:00:00-01:23")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
>>> parse_datetime("2008-08-24T24:00:00Z")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 206, in parse_datetime
tz)
ValueError: hour must be in 0..23
>>> midnightUTC = parse_datetime("2008-08-24T00:00:00Z")
>>> oneamBST = parse_datetime("2008-08-24T01:00:00+01:00")
>>> midnightUTC == oneamBST
True
>>> elevenpmUTC = parse_datetime("2008-08-23T23:00:00Z")
>>> midnightBST = parse_datetime("2008-08-24T00:00:00+01:00")
>>> midnightBST == elevenpmUTC
True
>>> elevenpmUTC.isoformat()
'2008-08-23T23:00:00+00:00'
>>> oneamBST.isoformat()
'2008-08-24T01:00:00+01:00'
>>> parse_datetime("2008-08-24T00:00:00.123Z").isoformat()
'2008-08-24T00:00:00.123000+00:00'
Facebook generates incorrectly-formatted RFC 3339 timestamps, with
the time-offset missing the colon:
>>> parse_datetime("2008-08-24T00:00:00+0000")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00+0100")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(60,'+01:00'))
>>> parse_datetime("2008-08-24T00:00:00-0100")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-60,'-01:00'))
>>> parse_datetime("2008-08-24T00:00:00-0123")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
While we accept such broken time-offsets, we don't generate them:
>>> parse_datetime("2008-08-24T00:00:00+0100").isoformat()
'2008-08-24T00:00:00+01:00'
Seconds don't have to be integers:
>>> parse_datetime("2008-08-24T00:00:11.25Z")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:11.25-0123")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
>>> parse_datetime("2008-08-24T00:00:11.25+0123")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.tzinfo(83,'+01:23'))
Rendering non-integer seconds produces an acceptable, if
non-minimal result:
>>> parse_datetime("2008-08-24T00:00:11.25Z").isoformat()
'2008-08-24T00:00:11.250000+00:00'
"""
m = datetime_re.match(s)
if m:
(y, m, d, hour, min, sec, ignore1, frac_sec, wholetz, ignore2, tzsign, tzhour, tzmin) = \
m.groups()
if frac_sec:
frac_sec = float("0." + frac_sec)
else:
frac_sec = 0
microsec = int((frac_sec * 1000000) + 0.5)
if wholetz == 'z' or wholetz == 'Z':
tz = UTC_TZ
else:
tzhour = int(tzhour)
tzmin = int(tzmin)
offset = tzhour * 60 + tzmin
if offset == 0:
tz = UTC_TZ
else:
if tzhour > 24 or tzmin > 60 or offset > 1439: ## see tzinfo docs for the 1439 part
raise ValueError('Invalid timezone offset', s, wholetz)
if tzsign == '-':
offset = -offset
tz = tzinfo(offset, _offset_to_tzname(offset))
return datetime.datetime(int(y), int(m), int(d),
int(hour), int(min), int(sec), microsec,
tz)
else:
raise ValueError('Invalid RFC 3339 datetime string', s)
def now():
"""Return a timezone-aware datetime.datetime object in
rfc3339.UTC_TZ timezone, representing the current moment
(time.time()). Useful as a replacement for the (timezone-unaware)
datetime.datetime.now() method."""
return utcfromtimestamp(time.time())
def utcfromtimestamp(unix_epoch_timestamp):
"""Interprets its argument as a count of seconds elapsed since the
Unix epoch, and returns a datetime.datetime in rfc3339.UTC_TZ
timezone."""
(y, m, d, hour, min, sec) = time.gmtime(unix_epoch_timestamp)[:6]
return datetime.datetime(y, m, d, hour, min, sec, 0, UTC_TZ)
def utctotimestamp(dt):
"""Returns a count of the elapsed seconds between the Unix epoch
and the passed-in datetime.datetime object."""
return calendar.timegm(dt.utctimetuple())
def datetimetostr(dt):
"""Return a RFC3339 date-time string corresponding to the given
datetime object."""
if dt.utcoffset() is not None:
return dt.isoformat()
else:
return "%sZ" % dt.isoformat()
def timestamptostr(ts):
"""Return a RFC3339 date-time string corresponding to the given
Unix-epoch timestamp."""
return datetimetostr(utcfromtimestamp(ts))
def strtotimestamp(s):
"""Return the Unix-epoch timestamp corresponding to the given RFC3339
date-time string."""
return utctotimestamp(parse_datetime(s))
| |
#
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
import re
import time
import copy
from behave import *
from datetime import datetime, timedelta
import base64
import uuid
import sys, requests, json
import bdd_test_util
import compose
CORE_REST_PORT = 7050
JSONRPC_VERSION = "2.0"
class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService
def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue
def buildUrl(context, ipAddress, path):
schema = "http"
if 'TLS' in context.tags:
schema = "https"
return "{0}://{1}:{2}{3}".format(schema, ipAddress, CORE_REST_PORT, path)
def currentTime():
return time.strftime("%H:%M:%S")
def getDockerComposeFileArgsFromYamlFile(compose_yaml):
parts = compose_yaml.split()
args = []
for part in parts:
args = args + ["-f"] + [part]
return args
@given(u'we compose "{composeYamlFile}"')
def step_impl(context, composeYamlFile):
# time.sleep(10) # Should be replaced with a definitive interlock guaranteeing that all peers/membersrvc are ready
composition = compose.Composition(composeYamlFile)
context.compose_containers = composition.containerDataList
context.composition = composition
@when(u'requesting "{path}" from "{containerName}"')
def step_impl(context, path, containerName):
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, path)
print("Requesting path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
assert resp.status_code == 200, "Failed to GET url %s: %s" % (request_url,resp.text)
context.response = resp
print("")
@then(u'I should get a JSON response containing "{attribute}" attribute')
def step_impl(context, attribute):
getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
@then(u'I should get a JSON response containing no "{attribute}" attribute')
def step_impl(context, attribute):
try:
getAttributeFromJSON(attribute, context.response.json(), "")
assert None, "Attribute found in response (%s)" %(attribute)
except AssertionError:
print("Attribute not found as was expected.")
def getAttributeFromJSON(attribute, jsonObject, msg):
return getHierarchyAttributesFromJSON(attribute.split("."), jsonObject, msg)
def getHierarchyAttributesFromJSON(attributes, jsonObject, msg):
if len(attributes) > 0:
assert attributes[0] in jsonObject, msg
return getHierarchyAttributesFromJSON(attributes[1:], jsonObject[attributes[0]], msg)
return jsonObject
def formatStringToCompare(value):
# double quotes are replaced by simple quotes because is not possible escape double quotes in the attribute parameters.
return str(value).replace("\"", "'")
@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@then(u'I should get a JSON response with array "{attribute}" contains "{expectedValue}" elements')
def step_impl(context, attribute, expectedValue):
foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
assert (len(foundValue) == int(expectedValue)), "For attribute %s, expected array of size (%s), instead found (%s)" % (attribute, expectedValue, len(foundValue))
@given(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@when(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@when(u'I deploy lang chaincode "{chaincodePath}" of "{chainLang}" with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodePath, chainLang, ctor, containerName):
print("Printing chaincode language " + chainLang)
chaincode = {
"path": chaincodePath,
"language": chainLang,
"constructor": ctor,
"args": getArgsFromContext(context),
}
deployChainCodeToContainer(context, chaincode, containerName)
def getArgsFromContext(context):
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
return args
@when(u'I deploy chaincode "{chaincodePath}" with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodePath, ctor, containerName):
chaincode = {
"path": chaincodePath,
"language": "GOLANG",
"constructor": ctor,
"args": getArgsFromContext(context),
}
deployChainCodeToContainer(context, chaincode, containerName)
@when(u'I deploy chaincode with name "{chaincodeName}" and with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodeName, ctor, containerName):
chaincode = {
"name": chaincodeName,
"language": "GOLANG",
"constructor": ctor,
"args": getArgsFromContext(context),
}
deployChainCodeToContainer(context, chaincode, containerName)
time.sleep(2.0) # After #2068 implemented change this to only apply after a successful ping
def deployChainCodeToContainer(context, chaincode, containerName):
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("Requesting path = {0}".format(request_url))
chaincodeSpec = createChaincodeSpec(context, chaincode)
chaincodeOpPayload = createChaincodeOpPayload("deploy", chaincodeSpec)
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
chaincodeName = resp.json()['result']['message']
chaincodeSpec['chaincodeID']['name'] = chaincodeName
context.chaincodeSpec = chaincodeSpec
print(json.dumps(chaincodeSpec, indent=4))
print("")
def createChaincodeSpec(context, chaincode):
chaincode = validateChaincodeDictionary(chaincode)
args = prepend(chaincode["constructor"], chaincode["args"])
# Create a ChaincodeSpec structure
chaincodeSpec = {
"type": getChaincodeTypeValue(chaincode["language"]),
"chaincodeID": {
"path" : chaincode["path"],
"name" : chaincode["name"]
},
"ctorMsg": {
"args" : args
},
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
if 'metadata' in context:
chaincodeSpec["metadata"] = context.metadata
return chaincodeSpec
def validateChaincodeDictionary(chaincode):
chaincodeFields = ["path", "name", "language", "constructor", "args"]
for field in chaincodeFields:
if field not in chaincode:
chaincode[field] = ""
return chaincode
def getChaincodeTypeValue(chainLang):
if chainLang == "GOLANG":
return 1
elif chainLang =="JAVA":
return 4
elif chainLang == "NODE":
return 2
elif chainLang == "CAR":
return 3
elif chainLang == "UNDEFINED":
return 0
return 1
@when(u'I mock deploy chaincode with name "{chaincodeName}"')
def step_impl(context, chaincodeName):
chaincode = {
"name": chaincodeName,
"language": "GOLANG"
}
context.chaincodeSpec = createChaincodeSpec(context, chaincode)
@then(u'I should have received a chaincode name')
def step_impl(context):
if 'chaincodeSpec' in context:
assert context.chaincodeSpec['chaincodeID']['name'] != ""
# Set the current transactionID to the name passed back
context.transactionID = context.chaincodeSpec['chaincodeID']['name']
elif 'grpcChaincodeSpec' in context:
assert context.grpcChaincodeSpec.chaincodeID.name != ""
# Set the current transactionID to the name passed back
context.transactionID = context.grpcChaincodeSpec.chaincodeID.name
else:
fail('chaincodeSpec not in context')
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" with "{idGenAlg}"')
def step_impl(context, chaincodeName, functionName, containerName, idGenAlg):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
invokeChaincode(context, "invoke", functionName, containerName, idGenAlg)
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" "{times}" times')
def step_impl(context, chaincodeName, functionName, containerName, times):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chain")
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
assert resp.status_code == 200, "Failed to get chain height %s: %s" % (request_url,resp.text)
context.chainheight = getAttributeFromJSON("height", resp.json(), "Height not found in response.")
context.txcount = times
for i in range(int(times)):
invokeChaincode(context, "invoke", functionName, containerName)
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" with attributes "{attrs}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, attrs, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert attrs, "attrs were not specified"
invokeChaincode(context, "invoke", functionName, containerName, None, attrs.split(","))
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
invokeChaincode(context, "invoke", functionName, containerName)
@when(u'I invoke master chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeMasterChaincode(context, "invoke", chaincodeName, functionName, containerName)
@then(u'I should have received a transactionID')
def step_impl(context):
assert 'transactionID' in context, 'transactionID not found in context'
assert context.transactionID != ""
pass
@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeChaincode(context, "query", functionName, containerName)
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeChaincode(context, "query", functionName, containerName)
def createChaincodeOpPayload(method, chaincodeSpec):
chaincodeOpPayload = {
"jsonrpc": JSONRPC_VERSION,
"method" : method,
"params" : chaincodeSpec,
"id" : 1
}
return chaincodeOpPayload
def invokeChaincode(context, devopsFunc, functionName, containerName, idGenAlg=None, attributes=[]):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
args = prepend(functionName, args)
for idx, attr in enumerate(attributes):
attributes[idx] = attr.strip()
context.chaincodeSpec['attributes'] = attributes
#If idGenAlg is passed then, we still using the deprecated devops API because this parameter can't be passed in the new API.
if idGenAlg != None:
context.chaincodeSpec['ctorMsg']['args'] = to_bytes(args)
invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg)
else:
context.chaincodeSpec['ctorMsg']['args'] = args
invokeUsingChaincodeService(context, devopsFunc, functionName, containerName)
def invokeUsingChaincodeService(context, devopsFunc, functionName, containerName):
# Invoke the POST
chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, context.chaincodeSpec)
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
print("Using attributes {0}".format(context.chaincodeSpec['attributes']))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'result' in resp.json():
result = resp.json()['result']
if 'message' in result:
transactionID = result['message']
context.transactionID = transactionID
def invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg):
# Invoke the POST
chaincodeInvocationSpec = {
"chaincodeSpec" : context.chaincodeSpec
}
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
if idGenAlg is not None:
chaincodeInvocationSpec['idGenerationAlg'] = idGenAlg
request_url = buildUrl(context, ipAddress, "/devops/{0}".format(devopsFunc))
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'message' in resp.json():
transactionID = context.response.json()['message']
context.transactionID = transactionID
def invokeMasterChaincode(context, devopsFunc, chaincodeName, functionName, containerName):
args = []
if 'table' in context:
args = context.table[0].cells
args = prepend(functionName, args)
typeGolang = 1
chaincodeSpec = {
"type": typeGolang,
"chaincodeID": {
"name" : chaincodeName
},
"ctorMsg": {
"args" : args
}
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, chaincodeSpec)
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'result' in resp.json():
result = resp.json()['result']
if 'message' in result:
transactionID = result['message']
context.transactionID = transactionID
@then(u'I wait "{seconds}" seconds for chaincode to build')
def step_impl(context, seconds):
""" This step takes into account the chaincodeImagesUpToDate tag, in which case the wait is reduce to some default seconds"""
reducedWaitTime = 4
if 'chaincodeImagesUpToDate' in context.tags:
print("Assuming images are up to date, sleeping for {0} seconds instead of {1} in scenario {2}".format(reducedWaitTime, seconds, context.scenario.name))
time.sleep(float(reducedWaitTime))
else:
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds for transaction to be committed to block on "{containerName}"')
def step_impl(context, seconds, containerName):
assert 'transactionID' in context, "transactionID not found in context"
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
def multiRequest(context, seconds, containerDataList, pathBuilderFunc):
"""Perform a multi request against the system"""
# Build map of "containerName" : response
respMap = {container.containerName:None for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, pathBuilderFunc(context, container))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
respMap[container.containerName] = resp
else:
raise Exception("Max time exceeded waiting for multiRequest with current response map = {0}".format(respMap))
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to all peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in context.compose_containers}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in context.compose_containers:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@then(u'I check the transaction ID if it is "{tUUID}"')
def step_impl(context, tUUID):
assert 'transactionID' in context, "transactionID not found in context"
assert context.transactionID == tUUID, "transactionID is not tUUID"
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@then(u'I wait up to "{seconds}" seconds for transactions to be committed to peers')
def step_impl(context, seconds):
assert 'chainheight' in context, "chainheight not found in context"
assert 'txcount' in context, "txcount not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, "/chain")
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
height = getAttributeFromJSON("height", resp.json(), "Height not found in response.")
if height >= int(context.chainheight) + int(context.txcount):
# Success, continue
respMap[container.containerName] = 200
break
else:
continue
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@then(u'I should get a rejection message in the listener after stopping it')
def step_impl(context):
assert "eventlistener" in context, "no eventlistener is started"
context.eventlistener.terminate()
output = context.eventlistener.stdout.read()
rejection = "Received rejected transaction"
assert rejection in output, "no rejection message was found"
assert output.count(rejection) == 1, "only one rejection message should be found"
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on all peers')
def step_impl(context, chaincodeName, functionName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
args = prepend(functionName, args)
context.chaincodeSpec['ctorMsg']['args'] = args #context.table[0].cells if ('table' in context) else []
# Invoke the POST
chaincodeOpPayload = createChaincodeOpPayload("query", context.chaincodeSpec)
responses = []
for container in context.compose_containers:
request_url = buildUrl(context, container.ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
responses.append(resp)
context.responses = responses
@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
def step_impl(context, chaincodeName, functionName, value):
query_common(context, chaincodeName, functionName, value, False)
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
def step_impl(context, chaincodeName, functionName, value):
query_common(context, chaincodeName, functionName, value, True)
def query_common(context, chaincodeName, functionName, value, failOnError):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
assert 'peerToSecretMessage' in context, "peerToSecretMessage map not found in context"
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Update the chaincodeSpec ctorMsg for invoke
context.chaincodeSpec['ctorMsg']['args'] = [functionName, value]
# Invoke the POST
# Make deep copy of chaincodeSpec as we will be changing the SecurityContext per call.
chaincodeOpPayload = createChaincodeOpPayload("query", copy.deepcopy(context.chaincodeSpec))
responses = []
for container in containerDataList:
# Change the SecurityContext per call
chaincodeOpPayload['params']["secureContext"] = context.peerToSecretMessage[container.composeService]['enrollId']
print("Container {0} enrollID = {1}".format(container.containerName, container.getEnv("CORE_SECURITY_ENROLLID")))
request_url = buildUrl(context, container.ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), timeout=30, verify=False)
if failOnError:
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
print("RESULT from {0} of chaincode from peer {1}".format(functionName, container.containerName))
print(json.dumps(resp.json(), indent = 4))
responses.append(resp)
context.responses = responses
@then(u'I should get a JSON response from all peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
for resp in context.responses:
foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@then(u'I should get a JSON response from peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
for resp in context.responses:
foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@given(u'I register with CA supplying username "{userName}" and secret "{secret}" on peers')
def step_impl(context, userName, secret):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
# Get list of IPs to login to
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
# Login to each container specified
for containerData in containerDataList:
request_url = buildUrl(context, containerData.ipAddress, "/registrar")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
# Create new User entry
bdd_test_util.registerUser(context, secretMsg, containerData.composeService)
# Store the username in the context
context.userName = userName
# if we already have the chaincodeSpec, change secureContext
if 'chaincodeSpec' in context:
context.chaincodeSpec["secureContext"] = context.userName
@given(u'I use the following credentials for querying peers')
def step_impl(context):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers, username, secret) not found in context"
peerToSecretMessage = {}
# Login to each container specified using username and secret
for row in context.table.rows:
peer, userName, secret = row['peer'], row['username'], row['secret']
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
ipAddress = bdd_test_util.ipFromContainerNamePart(peer, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/registrar")
print("POSTing to service = {0}, path = {1}".format(peer, request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
peerToSecretMessage[peer] = secretMsg
context.peerToSecretMessage = peerToSecretMessage
@given(u'I stop peers')
def step_impl(context):
compose_op(context, "stop")
@given(u'I start a listener')
def step_impl(context):
gopath = os.environ.get('GOPATH')
assert gopath is not None, "Please set GOPATH properly!"
listener = os.path.join(gopath, "src/github.com/hyperledger/fabric/build/bin/block-listener")
assert os.path.isfile(listener), "Please build the block-listener binary!"
bdd_test_util.start_background_process(context, "eventlistener", [listener, "-listen-to-rejections"] )
@given(u'I start peers')
def step_impl(context):
compose_op(context, "start")
@given(u'I pause peers')
def step_impl(context):
compose_op(context, "pause")
@given(u'I unpause peers')
def step_impl(context):
compose_op(context, "unpause")
def compose_op(context, op):
assert 'table' in context, "table (of peers) not found in context"
assert 'composition' in context, "composition not found in context"
services = context.table.headings
context.composition.issueCommand([op] + services)
context.compose_containers = context.composition.containerDataList
def to_bytes(strlist):
return [base64.standard_b64encode(s.encode('ascii')) for s in strlist]
def prepend(elem, l):
if l is None or l == "":
tail = []
else:
tail = l
if elem is None:
return tail
return [elem] + tail
@given(u'I do nothing')
def step_impl(context):
pass
| |
import sys
import threading
import httplib
import time
import json
import asyncore
import socket
import os
import logging
import random
import multiprocessing
import SocketServer
from OutgoingMessage import *
from Util import *
from multiprocessing import Queue
# Setup logging redirection
logger = logging.getLogger('bridge')
hdlr = logging.FileHandler('./bridge.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# Global ref to the singleton bridge server running on this node
bridgeServer = None
FIT = {} # map from interest name to (semaphore, content) tuples (this blocks the exit bridge)
PIT = {} # map from interest name to (semaphore, content) tuples (this blocks the entry bridge)
class BridgeHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.client_address = client_address
self.server = server
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
print >> sys.stderr, "Handler initialized for address: " + str(self.client_address)
logger.info("Handler initialized")
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
global bridgeServer
lengths = []
dtype = self.request.recv(1)
if (dtype == 'k'):
print >> sys.stderr, "received generating and returning key..."
fin = self.request.makefile()
bytes = ""
byte = fin.read(1)
while (byte != "\n"):
bytes = bytes + byte
byte = fin.read(1)
data = bytes
mod = bridgeServer.mod
gen = bridgeServer.gen
bits = bridgeServer.bits
rand = random.randint(0, mod)
power = (rand % (2 ** bits))
ours = iterModExp(gen, power, mod)
# Send our half back on down
fout = self.request.makefile()
returnData = str(ours) + "\n"
fout.write(returnData)
fout.flush()
# Compute and save our key
theirs = int(data) # it was written as a string
key = iterModExp(ours, int(theirs), mod)
bridgeServer.stage.keyMap[self.client_address[0]] = str(key)
return
elif (dtype == 'i'):
print >> sys.stderr, "received, forwarding interest..."
fin = self.request.makefile()
bytes = ""
byte = fin.read(1)
while (byte != "\n"):
bytes = bytes + byte
byte = fin.read(1)
interestName = bytes
msg = OutgoingMessage(None, None, interestName, None, True)
event = threading.Event()
# Send the interest now and block
bridgeServer.stage.ndnOutputStage.put(msg, event)
event.clear()
event.wait()
# We've returned - fetch the content
content = str(bridgeServer.stage.ndnOutputStage.bridgeFIT[msg.tag][1])
# Sign the content using the key for the bridge
if (bridgeServer.stage.keyMap[self.client_address[0]] != None):
sig = generateHMACTag(bridgeServer.stage.keyMap[self.client_address[0]], content)
# Send the content and the signature to the other bridge
fout = self.request.makefile()
fout.write(content + "\n")
fout.write(sig + "\n")
fout.flush()
else:
raise RuntimeError()
return
def finish(self):
logger.info("BridgeHandler closing")
return SocketServer.BaseRequestHandler.finish(self)
class BridgeServer(SocketServer.TCPServer, threading.Thread):
def __init__(self, host, port, mod, gen, bits, stage, handler_class = BridgeHandler):
threading.Thread.__init__(self)
self.gen = gen
self.mod = mod
self.bits = bits
self.stage = stage
SocketServer.TCPServer.__init__(self, (host, port), handler_class)
def server_activate(self):
SocketServer.TCPServer.server_activate(self)
return
def run(self):
self.running = True
while (self.running):
self.handle_request()
return
def handle_request(self):
print >> sys.stderr, "BridgeServer handle_request"
return SocketServer.TCPServer.handle_request(self)
def verify_request(self, request, client_address):
return SocketServer.TCPServer.verify_request(self, request, client_address)
def process_request(self, request, client_address):
return SocketServer.TCPServer.process_request(self, request, client_address)
def server_close(self):
return SocketServer.TCPServer.server_close(self)
def finish_request(self, request, client_address):
return SocketServer.TCPServer.finish_request(self, request, client_address)
def close_request(self, request_address):
return SocketServer.TCPServer.close_request(self, request_address)
class Bridge(threading.Thread):
def __init__(self, paramMap, ndnOutputStage):
threading.Thread.__init__(self)
global bridgeServer
self.paramMap = paramMap
self.gateways = []
self.prefixGatewayMap = {}
self.keyMap = {}
self.connected = False
self.ndnOutputStage = ndnOutputStage
self.mod = int(self.paramMap["KEYGEN_GROUP_MODULUS"])
self.gen = int(self.paramMap["KEYGEN_GROUP_GENERATOR"])
self.bits = int(self.paramMap["KEYGEN_KEY_BITS"])
# Create the global server
bridgeServer = BridgeServer(self.paramMap["PUBLIC_IP"], int(self.paramMap["BRIDGE_LOCAL_PORT"]), self.mod, self.gen, self.bits, self)
def run(self):
global bridgeServer
self.running = True
bridgeServer.start()
# Establish long-term connection
print >> sys.stderr, "Establishing connection with directory: " + str(self.paramMap["BRIDGE_SERVER_ADDRESS"])
self.conn = httplib.HTTPConnection(self.paramMap["BRIDGE_SERVER_ADDRESS"])
# Loop until we're told to quit
print >> sys.stderr, "Running bridge"
while (self.running):
# Try to connect first
if (not self.connected):
self.connectToServer()
# If connected, send the server a heartbeat message and update our gateway list
if (self.connected):
self.sendHeartbeat()
self.updateGateways()
# Sleep it off man...
time.sleep(int(self.paramMap["BRIDGE_SERVER_UPDATE_FREQ"]))
def connectToServer(self):
params = {'tmp' : 'tmp'}
headers = {"Content-type": "application/json","Accept": "text/plain"}
resp = self.sendMsg("POST", "/connect", params, headers)
if (int(resp.status) == 200):
self.connected = True
def sendHeartbeat(self):
params = {'tmp' : 'tmp'}
headers = {"Content-type": "application/json","Accept": "text/plain"}
return self.sendMsg("POST", "/heartbeat", params, headers)
def sendMsg(self, cmd, url, params, headers):
if (params == None or headers == None):
self.conn.request(cmd, url)
else:
self.conn.request(cmd, url, json.dumps(params), headers)
return self.conn.getresponse()
def updateGateways(self):
resp = self.sendMsg("GET", "/list-gateways", None, None)
list = resp.read()
dic = json.loads(list)
self.gateways = []
for gateway in dic["gateways"]:
self.gateways.append(str(gateway)) # gateway should be the address
def getGateways(self):
return self.gateways
def lookupPrefix(self, prefix):
if (prefix in self.prefixGatewayMap):
return self.prefixGatewayMap[prefix]
else:
return None
# Generate our half of the DH share
def generatePairwiseKey(self, sock):
rand = random.randint(0, self.mod)
power = (rand % (2 ** self.bits))
ours = iterModExp(self.gen, power, self.mod)
# Send our half of the share to the other guy
sharestr = str(ours)
payload = "k" + sharestr + "\n"
fout = sock.makefile()
fout.write(payload)
fout.flush()
# Receive their share
fin = sock.makefile()
bytes = ""
byte = fin.read(1)
while (byte != "\n"):
bytes = bytes + byte
byte = fin.read(1)
theirs = int(bytes)
# Compute and save the key
key = iterModExp(ours, theirs, self.mod)
return key
# Messages are sent as follows: |name length|name|
def sendInterest(self, interest, targetAddress):
global PIT
interest = str(interest)
if (targetAddress != self.paramMap["PUBLIC_IP"]): # don't forward to ourselves..
sock = None
# Retrieve socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((targetAddress, int(self.paramMap["BRIDGE_LOCAL_PORT"])))
# fout = sock.makefile()
print >> sys.stderr, "Socket retrieved - sending data"
logger.info("Socket retrieved - sending data")
# Check to see if we need to establish a key
if (not (targetAddress in self.keyMap)):
keyStart = time.time()
key = self.generatePairwiseKey(sock)
self.keyMap[targetAddress] = str(key)
keyEnd = time.time()
diff = keyEnd - keyStart
logger.info('BRIDGE-KEY-EST: ' + str(diff))
print >> sys.stderr, "New key establsihed: " + str(key)
logger.info("New key established: " + str(key))
# Refresh the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((targetAddress, int(self.paramMap["BRIDGE_LOCAL_PORT"])))
# Send the interest now
payload = "i" + interest + "\n"
fout = sock.makefile()
fout.write(payload)
fout.flush()
# Block and wait for the content and then its signature
fin = sock.makefile()
bytes = ""
byte = fin.read(1)
while (byte != "\n"):
bytes = bytes + byte
byte = fin.read(1)
content = str(bytes)
# Signature
byte = fin.read(1)
bytes = ""
while (byte != "\n"):
bytes = bytes + byte
byte = fin.read(1)
sig = str(bytes)
# Verify the signature (tag)
tag = generateHMACTag(self.keyMap[targetAddress], content)
if (tag != sig):
print >> sys.stderr, "MAC tag verification failed (exp, got): " + str(tag) + ", " + str(sig)
return None
else:
return content
else:
return None
def returnContent(self, content, sourceAddress):
raise RuntimeError()
# Runnable unit for testing...
if __name__ == "__main__":
if (sys.argv[1] == "s"):
server = BridgeServer(None, "192.168.1.10", 9000)
server.start()
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addrtuple = ("192.168.1.10", 9000)
print(addrtuple)
sock.connect(addrtuple) # address is a tuple, e.g., targetAddress = ("www.python.org", 80)
sock.send("hello world")
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory iSCSI Driver for Openstack Cinder (Folsom release)
Uses Violin REST API via XG Tools to manage a vSHARE licensed
memory gateway to provide network block-storage services.
---
Each allocated lun is configured as writable with a 512b blocksize.
*** Note that The fields for zero'ing the lun or performing thin
provisioning are not supported by vSHARE at this time. ***
Each new volume/lun is exported to a new iSCSI target specifically
made for it. The idea is that this allows CHAP authentication to be
managed independently on a per-volume basis. The export is then
configured to use a specific initiator group (igroup) that has been
pre-configured for use by Nova/Cinder hosts.
When an initiator has been chosen to connect to one of the available
luns (eg via 'nova volume-attach'), it will be added to the configured
igroup allowing it to see the export. It will also be given any
target location and authentication information needed to connect to
the chosen lun.
"""
import time
import random
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.volume.driver import ISCSIDriver
# TODO: add version checking for xgtools libraries
#
from vxg.core.session import XGSession
from vxg.core.node import XGNode
import vxg
LOG = logging.getLogger(__name__)
violin_opts = [
cfg.StrOpt('gateway_vip',
default='',
help='IP address or hostname of the v6000 master VIP'),
cfg.StrOpt('gateway_mga',
default='',
help='IP address or hostname of mg-a'),
cfg.StrOpt('gateway_mgb',
default='',
help='IP address or hostname of mg-b'),
cfg.StrOpt('gateway_user',
default='admin',
help='User name for connecting to the Memory Gateway'),
cfg.StrOpt('gateway_password',
default='',
help='User name for connecting to the Memory Gateway'),
cfg.IntOpt('gateway_iscsi_port',
default=3260,
help='IP port to use for iSCSI targets'),
cfg.StrOpt('gateway_iscsi_target_prefix',
default='iqn.2004-02.com.vmem:',
help='prefix for iscsi volumes'),
cfg.StrOpt('gateway_iscsi_igroup_name',
default='openstack',
help='name of igroup for initiators'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(violin_opts)
class InvalidBackendConfig(exception.CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinDriver(ISCSIDriver):
""" Executes commands relating to Violin Memory Arrays """
def __init__(self, *args, **kwargs):
super(ViolinDriver, self).__init__(*args, **kwargs)
self.session_start_time = 0
self.session_timeout = 900
self.array_info = []
self.vmem_vip=None
self.vmem_mga=None
self.vmem_mgb=None
self.container=""
self.device_id=""
def do_setup(self, context):
""" Any initialization the driver does while starting """
if not FLAGS.gateway_vip:
raise exception.InvalidInput(reason=_(
'Gateway VIP is not set'))
if not FLAGS.gateway_mga:
raise exception.InvalidInput(reason=_(
'Gateway IP for mg-a is not set'))
if not FLAGS.gateway_mgb:
raise exception.InvalidInput(reason=_(
'Gateway IP for mg-b is not set'))
self.vmem_vip = vxg.open(FLAGS.gateway_vip, FLAGS.gateway_user,
FLAGS.gateway_password)
self.vmem_mga = vxg.open(FLAGS.gateway_mga, FLAGS.gateway_user,
FLAGS.gateway_password)
self.vmem_mgb = vxg.open(FLAGS.gateway_mgb, FLAGS.gateway_user,
FLAGS.gateway_password)
self.gateway_iscsi_ip_addresses_mga = self._get_active_iscsi_ips(self.vmem_mga)
for ip in self.gateway_iscsi_ip_addresses_mga:
self.array_info.append({ "node": FLAGS.gateway_mga,
"addr": ip,
"conn": self.vmem_mga })
self.gateway_iscsi_ip_addresses_mgb = self._get_active_iscsi_ips(self.vmem_mgb)
for ip in self.gateway_iscsi_ip_addresses_mgb:
self.array_info.append({ "node": FLAGS.gateway_mgb,
"addr": ip,
"conn": self.vmem_mgb })
vip = self.vmem_vip.basic
ret_dict = vip.get_node_values("/vshare/state/local/container/*")
if ret_dict:
self.container = ret_dict.items()[0][1]
ret_dict = vip.get_node_values(
"/media/state/array/%s/chassis/system/dev_id" % self.container)
if ret_dict:
self.device_id = ret_dict.items()[0][1]
ret_dict = vip.get_node_values("/wsm/inactivity_timeout")
if ret_dict:
self.timeout = ret_dict.items()[0][1]
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
vip = self.vmem_vip.basic
if len(self.container) == 0:
raise InvalidBackendConfig(reason=_('container is missing'))
if len(self.device_id) == 0:
raise InvalidBackendConfig(reason=_('device ID is missing'))
bn = "/vshare/config/iscsi/enable"
resp = vip.get_node_values(bn)
if resp[bn] != True:
raise InvalidBackendConfig(reason=_('iSCSI is not enabled'))
bn = "/vshare/config/igroup/%s" % FLAGS.gateway_iscsi_igroup_name
resp = vip.get_node_values(bn)
if len(resp.keys()) == 0:
raise InvalidBackendConfig(reason=_('igroup is missing'))
if len(self.gateway_iscsi_ip_addresses_mga) == 0:
raise InvalidBackendConfig(reason=_(
'no available iSCSI IPs on mga'))
if len(self.gateway_iscsi_ip_addresses_mgb) == 0:
raise InvalidBackendConfig(reason=_(
'no available iSCSI IPs on mgb'))
def create_volume(self, volume):
""" Creates a volume """
self._login()
self._create_lun(volume)
def create_volume_from_snapshot(self, volume, snapshot):
""" Creates a volume from a snapshot """
raise NotImplementedError()
def delete_volume(self, volume):
""" Deletes a volume """
self._login()
self._delete_lun(volume)
def create_snapshot(self, snapshot):
""" Creates a snapshot from an existing volume """
raise NotImplementedError()
def delete_snapshot(self, snapshot):
""" Deletes a snapshot """
raise NotImplementedError()
def ensure_export(self, context, volume):
"""Synchronously checks and re-exports volumes at cinder start time """
# NYI
pass
def create_export(self, context, volume):
""" Exports the volume """
self._login()
vol = self._get_short_name(volume['name'])
tgt = self._create_iscsi_target(volume)
lun = self._export_lun(volume)
self.vmem_vip.basic.save_config()
iqn = "%s%s:%s" % (FLAGS.gateway_iscsi_target_prefix, tgt['node'], vol)
provider_data = { 'provider_location': self._iscsi_location(
tgt['addr'], FLAGS.iscsi_port, iqn, lun) }
return provider_data
def remove_export(self, context, volume):
""" Removes an export for a logical volume """
self._login()
self._unexport_lun(volume)
self._delete_iscsi_target(volume)
self.vmem_vip.basic.save_config()
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
# NYI
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection (target<-->initiator) """
self._login()
self._add_igroup_member(connector)
self.vmem_vip.basic.save_config()
return super(ViolinDriver, self).initialize_connection(volume, connector)
def terminate_connection(self, volume, connector):
""" Terminates the connection (target<-->initiator) """
super(ViolinDriver, self).terminate_connection(volume, connector)
self._login()
self._remove_igroup_member(connector)
self.vmem_vip.basic.save_config()
def _create_lun(self, volume):
"""
Creates a new lun.
The equivalent CLI command is "lun create container
<container_name> name <lun_name> size <gb>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("[VMEM] Creating lun %s (%d GB)"),
volume['name'], volume['size'])
# using the defaults for other fields: (container, name, size,
# quantity, nozero, thin, readonly, startnum, blksize)
#
for i in range(3):
self._wait_for_lockstate()
resp = v.lun.create_lun(self.container, volume['name'],
volume['size'], 1, "0", "0", "w", 1, 512)
if resp['code'] == 0 and not 'try again later' in resp['message']:
break
if resp['code'] != 0 or 'try again later' in resp['message']:
raise exception.Error(_('Failed to create LUN: %d, %s')
% (resp['code'], resp['message']))
def _delete_lun(self, volume):
"""
Deletes a lun.
The equivalent CLI command is "no lun create container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("[VMEM] Deleting lun %s"), volume['name'])
for i in range(3):
self._wait_for_lockstate()
resp = v.lun.bulk_delete_luns(self.container, volume['name'])
if resp['code'] == 0 and not 'try again later' in resp['message']:
break
if resp['code'] != 0 or 'try again later' in resp['message']:
raise exception.Error(_('Failed to delete LUN: %d, %s')
% (resp['code'], resp['message']))
def _create_iscsi_target(self, volume):
"""
Creates a new target for use in exporting a lun
Openstack does not yet support multipathing. We still create
HA targets but we pick a single random target for the
Openstack infrastructure to use. This at least allows us to
evenly distribute LUN connections across the storage cluster.
The equivalent CLI commands are "iscsi target create
<target_name>" and "iscsi target bind <target_name> to
<ip_of_mg_eth_intf>".
Arguments:
volume -- volume object provided by the Manager
Returns:
reference to randomly selected target object
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
LOG.info(_("[VMEM] Creating iscsi target %s"), target_name)
resp = v.iscsi.create_iscsi_target(target_name)
if resp['code'] != 0:
raise exception.Error(_('Failed to create iscsi target: %d, %s')
% (resp['code'], resp['message']))
resp = self.vmem_mga.iscsi.bind_ip_to_target(
target_name, self.gateway_iscsi_ip_addresses_mga)
if resp['code'] != 0:
raise exception.Error(_("Failed to bind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = self.vmem_mgb.iscsi.bind_ip_to_target(
target_name, self.gateway_iscsi_ip_addresses_mgb)
if resp['code'] != 0:
raise exception.Error(_("Failed to bind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
return self.array_info[random.randint(0, len(self.array_info)-1)]
def _delete_iscsi_target(self, volume):
"""
Deletes the iscsi target for a lun
iSCSI targets must be deleted from each gateway separately.
The CLI equivalent is "no iscsi target create <target_name>".
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
LOG.info(_("[VMEM] Deleting iscsi target for %s"), target_name)
resp = self.vmem_mga.iscsi.unbind_ip_from_target(
target_name, self.gateway_iscsi_ip_addresses_mga)
if resp['code'] != 0:
raise exception.Error(_("Failed to unbind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = self.vmem_mgb.iscsi.unbind_ip_from_target(
target_name, self.gateway_iscsi_ip_addresses_mgb)
if resp['code'] != 0:
raise exception.Error(_("Failed to unbind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = v.iscsi.delete_iscsi_target(target_name)
if resp['code'] != 0:
raise exception.Error(_('Failed to delete iSCSI target: %d, %s')
% (resp['code'], resp['message']))
def _export_lun(self, volume):
"""
Generates the export configuration for the given volume
The equivalent CLI command is "lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
Returns:
lun_id -- the LUN ID assigned by the backend
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
LOG.info(_("[VMEM] Exporting lun %s"), volume['name'])
resp = v.lun.export_lun(self.container, volume['name'], target_name,
FLAGS.gateway_iscsi_igroup_name, -1)
if resp['code'] != 0:
raise exception.Error(_('LUN export failed: %d, %s')
% (resp['code'], resp['message']))
self._wait_for_exportstate(volume['name'], True)
lun_id = self._get_lun_id(self.container, volume['name'], target_name,
FLAGS.gateway_iscsi_igroup_name)
return lun_id
def _unexport_lun(self, volume):
"""
Removes the export configuration for the given volume.
The equivalent CLI command is "no lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("[VMEM] Unexporting lun %s"), volume['name'])
resp = v.lun.unexport_lun(self.container, volume['name'],
"all", "all", -1)
if resp['code'] != 0:
raise exception.Error(_("LUN unexport failed: %d, %s")
% (resp['code'], resp['message']))
self._wait_for_exportstate(volume['name'], False)
def _add_igroup_member(self, connector):
"""
Add an initiator to the openstack igroup so it can see exports.
The equivalent CLI command is "igroup addto name <igroup_name>
initiators <initiator_name>"
Arguments:
connector -- connector object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("[VMEM] Adding initiator %s to igroup"),
connector['initiator'])
resp = v.igroup.add_initiators(FLAGS.gateway_iscsi_igroup_name,
connector['initiator'])
if resp['code'] != 0:
raise exception.Error(_('Failed to add igroup member: %d, %s')
% (resp['code'], resp['message']))
def _remove_igroup_member(self, connector):
"""
Removes an initiator to the openstack igroup.
The equivalent CLI command is "no igroup addto name
<igroup_name> initiators <initiator_name>".
Arguments:
connector -- connector object passed from the manager
"""
v = self.vmem_vip
# do not remove the initiator from the igroup if it still has
# any active sessions on the backend
#
ids = v.basic.get_node_values('/vshare/state/global/*')
for i in ids:
bn = "/vshare/state/global/%d/target/iscsi/**" % ids[i]
iscsi_targets = v.basic.get_node_values(bn)
for t in iscsi_targets:
if iscsi_targets[t] == connector['initiator']:
return
LOG.info(_("[VMEM] Removing initiator %s from igroup"),
connector['initiator'])
resp = v.igroup.delete_initiators(FLAGS.gateway_iscsi_igroup_name,
connector['initiator'])
if resp['code'] != 0 and resp['code'] != 14036:
# -code 14036, message 'Igroup <igroup> doesn't include
# initiator <initiator>'
#
raise exception.Error(_('Failed to remove igroup member: %s, %s')
% (resp['code'], resp['message']))
def _login(self, force=False):
"""
Get new api creds from the backend, only if needed.
Arguments:
force -- re-login on all sessions regardless of last login time
Returns:
True if sessions were refreshed, false otherwise.
"""
now = time.time()
if abs(now - self.session_start_time) >= self.session_timeout or \
force == True:
self.vmem_vip.basic.login()
self.vmem_mga.basic.login()
self.vmem_mgb.basic.login()
self.session_start_time = now
return True
return False
def _get_lun_id(self, container_name, volume_name, target_name, igroup_name):
"""
Queries the gateway to find the lun id for the exported volume.
Arguments:
container_name -- backend array flash container name
volume_name -- LUN to query
target_name -- iSCSI target associated with the LUN
igroup_name -- igroup associated with the LUN
Returns:
LUN ID for the exported lun as an integer.
"""
vip = self.vmem_vip.basic
bn = "/vshare/config/export/container/%s/lun/%s/target/%s/initiator/%s/lun_id" \
% (container_name, volume_name, target_name, igroup_name)
resp = vip.get_node_values(bn)
return resp[bn]
def _get_short_name(self, volume_name):
"""
Creates a vSHARE-compatible iSCSI target name.
The Folsom-style volume names are prefix(7) + uuid(36), which
is too long for vSHARE for target names. To keep things
simple we can just truncate the name to 32 chars.
Arguments:
volume_name -- name of volume/lun
Returns:
Shortened volume name as a string.
"""
return volume_name[:32]
def _iscsi_location(self, ip, port, iqn, lun=None):
"""
Create a properly formatted provider_location string.
Arguments:
ip -- iSCSI target IP address
port -- iSCSI target service port
iqn -- iSCSI target IQN
lun -- ID of the exported LUN
Returns:
provider_location as a formatted string.
"""
# the main driver.py _get_iscsi_properties() function has
# broken field parsing for the location string made here. We
# work around this by putting a blank space for the third
# field
#
return "%s:%s,%s%s %s" % (ip, port, " ", iqn, lun)
def _wait_for_exportstate(self, volume_name, state=False):
"""
Polls volume's export configuration root.
XG sets/queries following a request to create or delete a
lun export may fail on the backend if vshared is still
processing the export action. We can check whether it is
done by polling the export binding for a lun to
ensure it is created or deleted.
Arguments:
volume_name -- name of volume to be polled
state -- True to poll for existence, False for lack of
Returns:
True if the export state was eventually found, false otherwise.
"""
status = False
vip = self.vmem_vip.basic
bn = "/vshare/config/export/container/%s/lun/%s" \
% (self.container, volume_name)
for i in range(30):
resp = vip.get_node_values(bn)
if state and len(resp.keys()):
status = True
break
elif (not state) and (not len(resp.keys())):
break
else:
time.sleep(1)
return status
def _wait_for_lockstate(self):
"""
Polls configured backend LVM lock.
Lun deletion will fail on the backend if vshared is still busy
deleting a lun from a previous request. We can check whether
it is 'ready' by polling the LVM lockstate for each gateway.
"""
vip = self.vmem_vip.basic
opts1 = [ XGNode('container', 'string', self.container),
XGNode('port', 'uint8', 1),
XGNode('dev_id', 'string', self.device_id) ]
opts2 = [ XGNode('container', 'string', self.container),
XGNode('port', 'uint8', 2),
XGNode('dev_id', 'string', self.device_id) ]
for i in range(30):
resp1 = vip.perform_action('/vshare/actions/vlock/lockstate', opts1)
resp2 = vip.perform_action('/vshare/actions/vlock/lockstate', opts2)
if resp1['message'][0] == '0' and resp2['message'][0]:
break
else:
time.sleep(1)
def _get_active_iscsi_ips(self, mg_conn):
"""
Get a list of gateway IP addresses that can be used for iSCSI.
Arguments:
mg_conn -- active XG connection to one of the gateways
Returns:
active_gw_iscsi_ips -- list of IP addresses
"""
active_gw_iscsi_ips = []
interfaces_to_skip = [ 'lo', 'vlan10', 'eth1', 'eth2', 'eth3' ]
bn = "/net/interface/config/*"
intf_list = mg_conn.basic.get_node_values(bn)
for i in intf_list:
do_skip = False
for s in interfaces_to_skip:
if intf_list[i] == s:
do_skip = True
break
if not do_skip:
bn1 = "/net/interface/state/%s/addr/ipv4/1/ip" % intf_list[i]
bn2 = "/net/interface/state/%s/flags/link_up" % intf_list[i]
resp = mg_conn.basic.get_node_values([bn1, bn2])
if len(resp.keys()) == 2 and resp[bn2] == True:
active_gw_iscsi_ips.append(resp[bn1])
return active_gw_iscsi_ips
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ActionGroupList(msrest.serialization.Model):
"""A list of action groups.
:param value: The list of action groups.
:type value: list[~$(python-base-namespace).v2017_04_01.models.ActionGroupResource]
:param next_link: Provides the link to retrieve the next set of elements.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ActionGroupResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ActionGroupPatchBody(msrest.serialization.Model):
"""An action group object for the body of patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param enabled: Indicates whether this action group is enabled. If an action group is not
enabled, then none of its actions will be activated.
:type enabled: bool
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupPatchBody, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.enabled = kwargs.get('enabled', True)
class Resource(msrest.serialization.Model):
"""An azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class ActionGroupResource(Resource):
"""An action group resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param group_short_name: The short name of the action group. This will be used in SMS messages.
:type group_short_name: str
:param enabled: Indicates whether this action group is enabled. If an action group is not
enabled, then none of its receivers will receive communications.
:type enabled: bool
:param email_receivers: The list of email receivers that are part of this action group.
:type email_receivers: list[~$(python-base-namespace).v2017_04_01.models.EmailReceiver]
:param sms_receivers: The list of SMS receivers that are part of this action group.
:type sms_receivers: list[~$(python-base-namespace).v2017_04_01.models.SmsReceiver]
:param webhook_receivers: The list of webhook receivers that are part of this action group.
:type webhook_receivers: list[~$(python-base-namespace).v2017_04_01.models.WebhookReceiver]
:param itsm_receivers: The list of ITSM receivers that are part of this action group.
:type itsm_receivers: list[~$(python-base-namespace).v2017_04_01.models.ItsmReceiver]
:param azure_app_push_receivers: The list of AzureAppPush receivers that are part of this
action group.
:type azure_app_push_receivers:
list[~$(python-base-namespace).v2017_04_01.models.AzureAppPushReceiver]
:param automation_runbook_receivers: The list of AutomationRunbook receivers that are part of
this action group.
:type automation_runbook_receivers:
list[~$(python-base-namespace).v2017_04_01.models.AutomationRunbookReceiver]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'group_short_name': {'max_length': 12, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'group_short_name': {'key': 'properties.groupShortName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'email_receivers': {'key': 'properties.emailReceivers', 'type': '[EmailReceiver]'},
'sms_receivers': {'key': 'properties.smsReceivers', 'type': '[SmsReceiver]'},
'webhook_receivers': {'key': 'properties.webhookReceivers', 'type': '[WebhookReceiver]'},
'itsm_receivers': {'key': 'properties.itsmReceivers', 'type': '[ItsmReceiver]'},
'azure_app_push_receivers': {'key': 'properties.azureAppPushReceivers', 'type': '[AzureAppPushReceiver]'},
'automation_runbook_receivers': {'key': 'properties.automationRunbookReceivers', 'type': '[AutomationRunbookReceiver]'},
}
def __init__(
self,
**kwargs
):
super(ActionGroupResource, self).__init__(**kwargs)
self.group_short_name = kwargs.get('group_short_name', None)
self.enabled = kwargs.get('enabled', True)
self.email_receivers = kwargs.get('email_receivers', None)
self.sms_receivers = kwargs.get('sms_receivers', None)
self.webhook_receivers = kwargs.get('webhook_receivers', None)
self.itsm_receivers = kwargs.get('itsm_receivers', None)
self.azure_app_push_receivers = kwargs.get('azure_app_push_receivers', None)
self.automation_runbook_receivers = kwargs.get('automation_runbook_receivers', None)
class ActivityLogAlertActionGroup(msrest.serialization.Model):
"""A pointer to an Azure Action Group.
All required parameters must be populated in order to send to Azure.
:param action_group_id: Required. The resourceId of the action group. This cannot be null or
empty.
:type action_group_id: str
:param webhook_properties: the dictionary of custom properties to include with the post
operation. These data are appended to the webhook payload.
:type webhook_properties: dict[str, str]
"""
_validation = {
'action_group_id': {'required': True},
}
_attribute_map = {
'action_group_id': {'key': 'actionGroupId', 'type': 'str'},
'webhook_properties': {'key': 'webhookProperties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertActionGroup, self).__init__(**kwargs)
self.action_group_id = kwargs['action_group_id']
self.webhook_properties = kwargs.get('webhook_properties', None)
class ActivityLogAlertActionList(msrest.serialization.Model):
"""A list of activity log alert actions.
:param action_groups: The list of activity log alerts.
:type action_groups:
list[~$(python-base-namespace).v2017_04_01.models.ActivityLogAlertActionGroup]
"""
_attribute_map = {
'action_groups': {'key': 'actionGroups', 'type': '[ActivityLogAlertActionGroup]'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertActionList, self).__init__(**kwargs)
self.action_groups = kwargs.get('action_groups', None)
class ActivityLogAlertAllOfCondition(msrest.serialization.Model):
"""An Activity Log alert condition that is met when all its member conditions are met.
All required parameters must be populated in order to send to Azure.
:param all_of: Required. The list of activity log alert conditions.
:type all_of: list[~$(python-base-namespace).v2017_04_01.models.ActivityLogAlertLeafCondition]
"""
_validation = {
'all_of': {'required': True},
}
_attribute_map = {
'all_of': {'key': 'allOf', 'type': '[ActivityLogAlertLeafCondition]'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertAllOfCondition, self).__init__(**kwargs)
self.all_of = kwargs['all_of']
class ActivityLogAlertLeafCondition(msrest.serialization.Model):
"""An Activity Log alert condition that is met by comparing an activity log field and value.
All required parameters must be populated in order to send to Azure.
:param field: Required. The name of the field that this condition will examine. The possible
values for this field are (case-insensitive): 'resourceId', 'category', 'caller', 'level',
'operationName', 'resourceGroup', 'resourceProvider', 'status', 'subStatus', 'resourceType', or
anything beginning with 'properties.'.
:type field: str
:param equals: Required. The field value will be compared to this value (case-insensitive) to
determine if the condition is met.
:type equals: str
"""
_validation = {
'field': {'required': True},
'equals': {'required': True},
}
_attribute_map = {
'field': {'key': 'field', 'type': 'str'},
'equals': {'key': 'equals', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertLeafCondition, self).__init__(**kwargs)
self.field = kwargs['field']
self.equals = kwargs['equals']
class ActivityLogAlertList(msrest.serialization.Model):
"""A list of activity log alerts.
:param value: The list of activity log alerts.
:type value: list[~$(python-base-namespace).v2017_04_01.models.ActivityLogAlertResource]
:param next_link: Provides the link to retrieve the next set of elements.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ActivityLogAlertResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ActivityLogAlertPatchBody(msrest.serialization.Model):
"""An activity log alert object for the body of patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param enabled: Indicates whether this activity log alert is enabled. If an activity log alert
is not enabled, then none of its actions will be activated.
:type enabled: bool
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertPatchBody, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.enabled = kwargs.get('enabled', True)
class ActivityLogAlertResource(Resource):
"""An activity log alert resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param scopes: A list of resourceIds that will be used as prefixes. The alert will only apply
to activityLogs with resourceIds that fall under one of these prefixes. This list must include
at least one item.
:type scopes: list[str]
:param enabled: Indicates whether this activity log alert is enabled. If an activity log alert
is not enabled, then none of its actions will be activated.
:type enabled: bool
:param condition: The condition that will cause this alert to activate.
:type condition: ~$(python-base-namespace).v2017_04_01.models.ActivityLogAlertAllOfCondition
:param actions: The actions that will activate when the condition is met.
:type actions: ~$(python-base-namespace).v2017_04_01.models.ActivityLogAlertActionList
:param description: A description of this activity log alert.
:type description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'scopes': {'key': 'properties.scopes', 'type': '[str]'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'condition': {'key': 'properties.condition', 'type': 'ActivityLogAlertAllOfCondition'},
'actions': {'key': 'properties.actions', 'type': 'ActivityLogAlertActionList'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ActivityLogAlertResource, self).__init__(**kwargs)
self.scopes = kwargs.get('scopes', None)
self.enabled = kwargs.get('enabled', True)
self.condition = kwargs.get('condition', None)
self.actions = kwargs.get('actions', None)
self.description = kwargs.get('description', None)
class AutomationRunbookReceiver(msrest.serialization.Model):
"""The Azure Automation Runbook notification receiver.
All required parameters must be populated in order to send to Azure.
:param automation_account_id: Required. The Azure automation account Id which holds this
runbook and authenticate to Azure resource.
:type automation_account_id: str
:param runbook_name: Required. The name for this runbook.
:type runbook_name: str
:param webhook_resource_id: Required. The resource id for webhook linked to this runbook.
:type webhook_resource_id: str
:param is_global_runbook: Required. Indicates whether this instance is global runbook.
:type is_global_runbook: bool
:param name: Indicates name of the webhook.
:type name: str
:param service_uri: The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'automation_account_id': {'required': True},
'runbook_name': {'required': True},
'webhook_resource_id': {'required': True},
'is_global_runbook': {'required': True},
}
_attribute_map = {
'automation_account_id': {'key': 'automationAccountId', 'type': 'str'},
'runbook_name': {'key': 'runbookName', 'type': 'str'},
'webhook_resource_id': {'key': 'webhookResourceId', 'type': 'str'},
'is_global_runbook': {'key': 'isGlobalRunbook', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutomationRunbookReceiver, self).__init__(**kwargs)
self.automation_account_id = kwargs['automation_account_id']
self.runbook_name = kwargs['runbook_name']
self.webhook_resource_id = kwargs['webhook_resource_id']
self.is_global_runbook = kwargs['is_global_runbook']
self.name = kwargs.get('name', None)
self.service_uri = kwargs.get('service_uri', None)
class AzureAppPushReceiver(msrest.serialization.Model):
"""The Azure mobile App push notification receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the Azure mobile app push receiver. Names must be unique
across all receivers within an action group.
:type name: str
:param email_address: Required. The email address registered for the Azure mobile app.
:type email_address: str
"""
_validation = {
'name': {'required': True},
'email_address': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureAppPushReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.email_address = kwargs['email_address']
class EmailReceiver(msrest.serialization.Model):
"""An email receiver.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the email receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param email_address: Required. The email address of this receiver.
:type email_address: str
:ivar status: The receiver status of the e-mail. Possible values include: "NotSpecified",
"Enabled", "Disabled".
:vartype status: str or ~$(python-base-namespace).v2017_04_01.models.ReceiverStatus
"""
_validation = {
'name': {'required': True},
'email_address': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EmailReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.email_address = kwargs['email_address']
self.status = None
class EnableRequest(msrest.serialization.Model):
"""Describes a receiver that should be resubscribed.
All required parameters must be populated in order to send to Azure.
:param receiver_name: Required. The name of the receiver to resubscribe.
:type receiver_name: str
"""
_validation = {
'receiver_name': {'required': True},
}
_attribute_map = {
'receiver_name': {'key': 'receiverName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnableRequest, self).__init__(**kwargs)
self.receiver_name = kwargs['receiver_name']
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ItsmReceiver(msrest.serialization.Model):
"""An Itsm receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the Itsm receiver. Names must be unique across all receivers
within an action group.
:type name: str
:param workspace_id: Required. OMS LA instance identifier.
:type workspace_id: str
:param connection_id: Required. Unique identification of ITSM connection among multiple defined
in above workspace.
:type connection_id: str
:param ticket_configuration: Required. JSON blob for the configurations of the ITSM action.
CreateMultipleWorkItems option will be part of this blob as well.
:type ticket_configuration: str
:param region: Required. Region in which workspace resides. Supported
values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope'.
:type region: str
"""
_validation = {
'name': {'required': True},
'workspace_id': {'required': True},
'connection_id': {'required': True},
'ticket_configuration': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'connection_id': {'key': 'connectionId', 'type': 'str'},
'ticket_configuration': {'key': 'ticketConfiguration', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ItsmReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.workspace_id = kwargs['workspace_id']
self.connection_id = kwargs['connection_id']
self.ticket_configuration = kwargs['ticket_configuration']
self.region = kwargs['region']
class SmsReceiver(msrest.serialization.Model):
"""An SMS receiver.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the SMS receiver. Names must be unique across all receivers
within an action group.
:type name: str
:param country_code: Required. The country code of the SMS receiver.
:type country_code: str
:param phone_number: Required. The phone number of the SMS receiver.
:type phone_number: str
:ivar status: The status of the receiver. Possible values include: "NotSpecified", "Enabled",
"Disabled".
:vartype status: str or ~$(python-base-namespace).v2017_04_01.models.ReceiverStatus
"""
_validation = {
'name': {'required': True},
'country_code': {'required': True},
'phone_number': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SmsReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.country_code = kwargs['country_code']
self.phone_number = kwargs['phone_number']
self.status = None
class WebhookReceiver(msrest.serialization.Model):
"""A webhook receiver.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the webhook receiver. Names must be unique across all
receivers within an action group.
:type name: str
:param service_uri: Required. The URI where webhooks should be sent.
:type service_uri: str
"""
_validation = {
'name': {'required': True},
'service_uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'service_uri': {'key': 'serviceUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebhookReceiver, self).__init__(**kwargs)
self.name = kwargs['name']
self.service_uri = kwargs['service_uri']
| |
"""
KBase narrative service and method API.
The main classes defined here are :class:`Service` and :class:`ServiceMethod`.
See :func:`example` for an example of usage.
"""
__author__ = ["Dan Gunter <dkgunter@lbl.gov>", "William Riehl <wjriehl@lbl.gov>"]
__version__ = "0.0.1"
## Imports
# Stdlib
from collections import deque
import json
import logging
import os
import re
import sys
import time
import traceback
# Third-party
import IPython.utils.traitlets as trt
from IPython.core.application import Application
# Local
from biokbase.narrative.common import kbtypes, kblogging
from biokbase.narrative.common.log_common import EVENT_MSG_SEP
from biokbase.narrative.common.url_config import URLS
from biokbase.narrative.common import util
from biokbase.narrative.common.kbjob_manager import KBjobManager
# Logging
_log = logging.getLogger(__name__)
# Init job manager
job_manager = KBjobManager()
## Exceptions
class ServiceError(Exception):
"""Base class for Service errors.
Should not normally be instantiated directly.
"""
def __init__(self, errmsg):
Exception.__init__(self, errmsg)
self._info = {
'severity': 'FATAL',
'type': self.__class__.__name__,
'msg': str(errmsg)
}
def add_info(self, k, v):
self._info[k] = v
def as_json(self):
return json.dumps(self._info)
class DuplicateServiceError(ServiceError):
pass
class ServiceMethodError(ServiceError):
"""Base class for all ServiceMethod errors"""
def __init__(self, method, errmsg, tb=None):
msg = "in function '{}': {}".format(method.name, errmsg)
ServiceError.__init__(self, msg)
self.add_info('method_name', method.name)
if tb is not None:
self.add_info('traceback',
self.traceback_dict(tb))
TB_KEYS = 'filename', 'line', 'function', 'text'
def traceback_dict(self, tb):
"""Extract and reformat traceback as a dict, for reporting in narrative.
:param tb: List of stack trace entries.
:type tb: list
:return: List where each entry is converted into a dict with
key/value pairs corresponding to the quadruple given above.
:rtype: dict
"""
etb = traceback.extract_tb(tb)
return [{self.TB_KEYS[i]: entry[i] for i in xrange(len(entry))}
for entry in etb]
class ServiceMethodParameterError(ServiceMethodError):
"""Bad parameter for ServiceMethod."""
def __init__(self, method, errmsg):
msg = "bad parameter: " + errmsg
ServiceMethodError.__init__(self, method, msg)
self.add_info('details', errmsg)
class ServiceRegistryFormatError(ServiceMethodError):
"""Bad format for Service Registry."""
def __init__(self, method, errmsg):
msg = "bad registry format: " + errmsg
ServiceMethodError.__init__(self, method, msg)
self.add_info('details', errmsg)
## Utility functions / classes
def is_sequence(arg):
"""Returns True only if input acts like a sequence, but does
not act like a string.
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def get_func_desc(fn):
"""Get function description from docstring.
"""
doc, desc = fn.__doc__, []
for line in doc.split("\n"):
line = line.strip()
if line == "":
break
desc.append(line)
return ' '.join(desc)
def get_func_info(fn):
"""Get params and return from docstring
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
# :param
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
# :type (of parameter, should be in kbtypes)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
typeobj = eval(desc.strip())
params[name]['type'] = typeobj
# :default (value of parameter)
elif line.startswith(":default"):
_, name, value = line.split(":", 2)
name = name[8:].strip() # skip 'default '
if not name in params:
raise ValueError("'default' without 'param' for {}".format(name))
params[name]['default'] = value.strip() # XXX: should allow quoting
# :ui_name (of parameter) - the name that should be displayed in the user interface
elif line.startswith(":ui_name"):
_, name, ui_name = line.split(":", 2)
name = name[8:].strip() # skip 'ui_name '
if not name in params:
raise ValueError("'ui_name' without 'param' for {}".format(name))
ui_name = ui_name.strip()
params[name]['ui_name'] = ui_name
# :return - name of thing to return
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc.strip()
# :rtype - type of thing to return
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
typeobj = eval(desc.strip())
return_['type'] = typeobj
# :input_widget - the default visualization widget for this method.
# Should be the name as it's invoked in Javascript.
elif line.startswith(":input_widget"):
_1, _2, widget = line.split(":", 2)
return_['input_widget'] = widget.strip()
# :output_widget - the visualization widget for this method.
# Should be the name as it's invoked in Javascript.
elif line.startswith(":output_widget"):
_1, _2, widget = line.split(":", 2)
return_['output_widget'] = widget.strip()
# :embed - True if the widget should be automatically embedded.
# so, probably always True, but not necessarily
elif line.startswith(":embed"):
_1, _2, embed = line.split(":", 2)
embed = eval(embed.strip())
return_['embed_widget'] = embed
r_params = []
vis_info = {'input_widget': None,
'output_widget': None,
'embed_widget': True}
for i, name in enumerate(param_order):
type_ = params[name]['type']
desc = params[name]['desc']
ui_name = params[name].get('ui_name', name) # use parameter name if no ui_name is given
if 'default' in params[name]:
# set default value
dflt = params[name]['default']
pvalue = type_(dflt, desc=desc, ui_name=ui_name)
else:
# no default value
pvalue = type_(desc=desc, ui_name=ui_name)
r_params.append(pvalue)
if not return_:
r_output = None
else:
r_output = return_['type'](desc=return_['desc'])
vis_info = dict(vis_info.items() + return_.items())
return r_params, r_output, vis_info
## Registry
_services = {}
# def register_job(job_id):
# """Register a long-running job by id.
# This takes a job id from the User and Job Service, and registers it in
# the Narrative interface. This registration process does two things:
# 1. Shares that job with NarrativeJobViewer account on behalf of the current user.
# 2. Sends that job id forward to the Narrative front-end to be stored in the
# Narrative object and made visible to any querying front-end widget.
# :param str job_id: Unique identifier for the long-running job.
# """
# pass
# def poll_job(job_id):
# """Fetch a job from the User and Job Service.
# :param str job_id: Unique identifier for the job.
# """
# pass
def register_service(svc, name=None):
"""Register a service.
This will fail if there is already a service registered by that name.
If you want to replace a service, you must call :func:`unregister_service`
and then this method.
:param Service svc: Service object
:param str name: Service name. If not present, use `svc.name`.
:return: None
:raise: DuplicateServiceError, if service already is registered
"""
if name is None:
name = svc.name
if name in _services:
raise DuplicateServiceError(name)
_services[name] = svc
def unregister_service(svc=None, name=None):
"""Unregister a service.
:param Service svc: Service object. If not present, use `name`.
:param str name: Service name. If not present, use `svc.name`.
:raise: ValueError if bad arguments, KeyError if service not found
"""
if name is None:
if svc is None:
raise ValueError("Service object or name required")
name = svc.name
if name is None:
raise ValueError("Service object has no name")
del _services[name]
def get_service(name):
"""Get a registered service by name.
:param str name: Service name
:return: The service, or None
:rtype: Service
"""
return _services.get(name, None)
def get_all_services(as_json=False, as_json_schema=False):
"""Get all registered services, as objects (default) as JSON, or as JSON schema.
:param bool as_json: If True, return JSON instead of objects. Supersedes as_json_schema.
:param bool as_json_schema: If True, return JSON schema instead of objects.
:return: dict of {service name : Service object or JSON}
"""
if as_json or as_json_schema:
if as_json:
return json.dumps({name: inst.as_json() for name, inst in _services.iteritems()})
else:
return json.dumps({name: inst.as_json_schema() for name, inst in _services.iteritems()})
else:
return _services.copy()
## Service classes
class Service(trt.HasTraits):
"""Base Service class.
"""
__all__ = dict()
#: Name of the service; should be short identifier
name = trt.Unicode()
#: Description of the service
desc = trt.Unicode()
#: Version number of the service, see :class:`VersionNumber` for format
version = kbtypes.VersionNumber()
#: Flag for making all service methods invisible to UI
invisible = trt.Bool(False)
def __init__(self, **meta):
"""Initialize a Service instance.
:param meta: Metadata keywords to set as attributes on the instance.
Special keywords are `name`, `desc`, and `version` (see
documentation for each).
"""
trt.HasTraits.__init__(self)
# set traits from 'meta', if present
for key, val in meta.iteritems():
if hasattr(self, key):
setattr(self, key, val)
# list of all methods
self.methods = []
# register the new instance so long as the service was
# properly declared with a name
if 'name' in meta:
self.__class__.__all__[meta['name']] = self
def add_method(self, method=None, **kw):
"""Add one :class:`ServiceMethod`
:param method: The method. If missing, create an instance from keywords.
:type method: ServiceMethod or None
:param kw: Keywords if creating a ServiceMethod
:type kw: dict
:return: The method (given or created)
:rtype: ServiceMethod
:raise: If method is None, anything raised by :class:`ServiceMethod` constructor
"""
if not method:
# If the service isn't visible, pass that down into the method
if self.invisible:
kw['visible'] = False
method = ServiceMethod(**kw)
self.methods.append(method)
return method
def get_method(self, name):
"""Get a service method, by name.
:param str name: Method name
:return: Method or None
:rtype: ServiceMethod
"""
for m in self.methods:
#print("check vs {}".format(m.name))
if m.name == name:
return m
print("didn't find {}".format(name))
return None
def quiet(self, value=True):
"""Make all methods quiet.
See :meth:`ServiceMethod.quiet`.
"""
for m in self.methods:
m.quiet(value)
def as_json(self):
d = {
'name': self.name,
'desc': self.desc,
'version': self.version,
'methods': [m.as_json() for m in self.methods]
}
return d
def as_json_schema(self):
d = {
'name': self.name,
'desc': self.desc,
'version': self.version,
'methods': [m.as_json_schema() for m in self.methods]
}
return d
class LifecycleSubject(object):
"""Contains the current status of a running process.
The basic model is that a process is in a 'stage', which is
an integer starting at 1 and less than or equal to the total
number of stages. Stages and total numbers of stages can
change as long as the invariants 0 <= stage <= num_stages
and 1 <= num_stages hold. Note that 0 is a special stage number
meaning 'not yet started'.
"""
def __init__(self, stages=1):
if not isinstance(stages, int) or stages < 1:
raise ValueError("Number of stages ({}) must be > 0".format(stages))
self._stages = stages
self.reset()
self.obs = []
def register(self, observer):
self.obs.append(observer)
def unregister(self, observer):
self.obs.remove(observer)
def _event(self, name, *args):
for obs in self.obs:
getattr(obs, name)(*args)
## Events
def reset(self):
self._stage = 0
self._done = False
def advance(self, name):
"""Increments stage, giving it a name."""
if not self._done:
self._stage += 1
self._event('stage', self._stage, self._stages, name)
def started(self, params):
"""Start the process.
Idempotent.
"""
self._done = False
self._event('started', params)
def done(self):
"""Done with process.
Idempotent.
"""
if not self._done:
self._done = True
self._event('done')
def error(self, code, err):
"""Done with process due to an error.
Idempotent.
"""
if not self._done:
self._done = True
self._event('error', code, err)
def debug(self, msg):
"""Debugging message.
"""
self._event('debug', msg)
def register_job(self, job_id):
"""Register a new long-running job.
"""
global job_manager
if job_id is not None:
if job_manager is None:
job_manager = KBjobManager()
self._event('debug', job_id)
# njs jobs start with either
# 'njs:' or 'method:' - either way
# they're the only ones with a colon
if job_id.find(':') == -1:
job_manager.register_job(job_id)
self._event('register_job', job_id)
def register_app(self, app_id):
"""Register a new long-running app process.
"""
global job_manager
if app_id is not None:
if job_manager is None:
job_manager = KBjobManager()
self._event('debug', app_id)
if app_id.find(':') == -1:
job_manager.register_job(app_id)
self._event('register_app', app_id)
# get/set 'stage' property
@property
def stage(self):
return self._stage
@stage.setter
def stage(self, value):
if not isinstance(value, int):
raise ValueError("stage ({}) must be an int")
elif value < 0:
raise ValueError("stage ({}) must be >= 0".format(value))
elif value > self._stages:
raise ValueError("stage ({}) must be <= num. stages ({})"
.format(value, self._stages))
self._stage = value
self._event('stage', self._stage, self._stages, '')
# get/set 'stages' (number of stages) property
@property
def stages(self):
return self._stages
@stages.setter
def stages(self, value):
if not isinstance(value, int):
raise ValueError("stages ({}) must be an int")
elif value < 1:
raise ValueError("stages ({}) must be >= 1".format(value))
elif value < self._stage:
raise ValueError("stages ({}) must be >= cur. stage ({})"
.format(value, self._stage))
self._stages = value
class LifecycleObserver(object):
"""Interface that defines the lifecycle events of a service,
in terms of callbacks. These callbacks will be used by the
:class:`IpService` to communicate with the IPython kernel,
but they can also be extended to perform service-specific actions.
"""
def started(self, params):
"""Called before execution starts"""
pass
def stage(self, num, total, name):
"""Called for stage changes"""
pass
def done(self):
"""Called on successful completion"""
pass
def error(self, code, err):
"""Called on fatal error"""
pass
def debug(self, msg):
"""Debugging message"""
pass
def register_job(self, job_id):
"""Register a long-running job"""
pass
def register_app(self, app_id):
"""Register a an app job that's composed of several subjobs"""
pass
class LifecycleHistory(LifecycleObserver):
"""Record duration between start/end in lifecycle events.
"""
def __init__(self, method, max_save=1000):
self._method = method
self._t = [None, None]
self._p = None
self._hist = deque() # each item: (t0, t1, dur, [params])
self._maxlen = max_save
self._cur_stage, self._nstages = 0, 0
def get_durations(self):
"""Past durations of the method.
:return: All the past durations, in seconds
:rtype: iterable of double
"""
return (x[2] for x in self._hist)
def started(self, params):
"""Called when execution starts
"""
self._t[0] = time.time()
self._p = params
def stage(self, num, ttl, name):
self._cur_stage = num
self._nstages = ttl
def done(self):
"""Called on successful completion
"""
self._t[1] = time.time()
dur = self._t[1] - self._t[0]
self._hist.append(tuple(self._t + [dur, self._p]))
if len(self._hist) > self._maxlen:
self._hist.popleft()
def error(self, code, err):
"""Called on fatal error"""
pass
def estimated_runtime(self, params):
"""Based on history and params, estimate runtime for function.
"""
dur = self.get_durations()
if len(dur) == 0:
estimate = -1 # no @!$%# idea
else:
# dumb: ignore params, take mean
estimate = sum(dur) / len(dur)
return estimate
class LifecyclePrinter(LifecycleObserver):
"""Observe lifecycle events and print out messages to stdout.
This allows the front-end to get the current status of the process
by simply looking for 'special' lines on stdout.
After the prefix there is a 1-letter code:
* S - started
* D - done
* P - progress ; rest of line is '<name>,<num>,<num>' meaning: stage name,current,total
* E - error ; rest of line is JSON object with key/vals about the error.
For details see the :class:`ServiceError` subclasses.
Example:
>>> subj = LifecycleSubject(stages=3)
>>> lpr = LifecyclePrinter()
>>> subj.register(lpr)
>>> subj.started([])
@@S
>>> subj.advance("foo")
@@Pfoo,1,3
>>> subj.done()
@@D
"""
#: Special prefixes for output to stdout
#: that indicates the status of the process.
SPECIAL_PFX = '@@'
def _write(self, s):
sys.stdout.write(self.SPECIAL_PFX + s + "\n")
sys.stdout.flush()
def started(self, params):
self._write('S')
def done(self):
self._write('D')
def stage(self, n, total, name):
self._write('P{},{:d},{:d}'.format(name, n, total))
def error(self, code, err):
self._write('E' + err.as_json())
def debug(self, msg):
self._write('G' + msg)
def register_job(self, job_id):
self._write('J' + job_id)
def register_app(self, app_id):
self._write('A' + app_id)
class LifecycleLogger(LifecycleObserver):
"""Log lifecycle messages in a simple but structured format,
to a file.
"""
MAX_MSG_LEN = 240 # Truncate message to this length, in chars
def __init__(self, name, debug=False):
"""Create a Python logging.Logger with the given name, under the existing
IPython logging framework.
:param name: Name of logger
:type name: str
:param debug: Whether to set debug as the log level
:type debug: bool
"""
self._name = name
# use the IPython application singleton's 'log' trait
# self._log = Application.instance().log
self._log = kblogging.get_logger(name)
if debug:
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.INFO)
self._is_debug = debug
self._start_time = None
def _write(self, level, event, kvp):
kvp['severity'] = logging.getLevelName(level)
kblogging.log_event(self._log, event, kvp)
def started(self, params):
# note: quote params so the logging can handle spaces inside them
#pstr = str(params).replace('"', '\\"') # escape embedded quotes
pstr = str(params).replace('"', "'") # change dbl to single quotes
pstr = pstr.replace('\\','') # eliminate dbl-backslashes
pstr = pstr.replace("\'", "'") # and so on? yeesh!
# now extract actual params
psets = ','.join(re.findall('{\s*\'stepId\'.*?\]', pstr))
self._write(logging.INFO, "func.begin", {'params': psets})
self._start_time = time.time()
def done(self):
t = time.time()
if self._start_time is not None:
dur = t - self._start_time
self._start_time = None
else:
dur = -1
self._write(logging.INFO, "func.end", {'dur': dur})
def stage(self, n, total, name):
self._write(logging.INFO, "func.stage.{}".format(name),
{'num': n, 'total': total})
def error(self, code, err):
if len(str(err)) > self.MAX_MSG_LEN:
err = str(err[:self.MAX_MSG_LEN]) + '[..]'
self._write(logging.ERROR, "func.error", {'errcode': code, 'errmsg':err})
def debug(self, msg):
if self._is_debug:
self._write(logging.DEBUG, "func.debug", {'dbgmsg': msg})
def register_job(self, job_id):
self._write(logging.INFO, "start job", {'jobid': "id={}".format(job_id)})
def register_app(self, app_id):
self._write(logging.INFO, "start app", {'jobid': "id={}".format(app_id)})
class ServiceMethod(trt.HasTraits, LifecycleSubject):
"""A method of a service.
Defines some metadata and a function, using :meth:`set_func`,
to run the service. Call the class instance like a function
to execute the service in its wrapped mode.
Note that for services to be able to chain their results forward to
the next called service, a method _must_ return a value.
Example usage:
>>> svc = Service()
>>> def multiply(m, a,b): return a*b
>>> meth = ServiceMethod(svc, quiet=True)
>>> meth.set_func(multiply, (trt.CFloat(), trt.CFloat()), (trt.Float(),))
>>> c = meth(9, 8)[0]
>>> c
72
>>> # validation catches bad args, function isn't called
>>> c = meth("strawberry", "alarmclock")
>>> print(c)
None
"""
#: Name of the method; should be short identifier
name = trt.Unicode()
#: Description of the method
desc = trt.Unicode()
#: Parameters of method, a Tuple of traits
params = trt.Tuple()
#: Output of the method, a Tuple of traits
outputs = trt.Tuple()
def __init__(self, status_class=LifecycleHistory, quiet=False,
func=None, visible=True, **meta):
"""Constructor.
:param status_class: Subclass of LifecycleObserver to instantiate
and use by default for status queries.
Other observers can be used with :meth:`register`.
:type status_class: type
:param bool quiet: If True, don't add the printed output
:param func: Function to auto-wrap, if present
:param visible: Whether this service is 'visible' to the UI
:param meta: Other key/value pairs to set as traits of the method.
"""
LifecycleSubject.__init__(self)
self.name, self.full_name, self.run = "", "", None
self._visible = visible
self._history = status_class(self)
self.register(self._history)
self._observers = [] # keep our own list of 'optional' observers
# set traits from 'meta', if present
for key, val in meta.iteritems():
if hasattr(self, key):
setattr(self, key, val)
# Call set_func() with metadata from function
# docstring, if function is given
if func is not None:
self.desc = get_func_desc(func)
params, output, vis_info = get_func_info(func)
self.set_func(func, tuple(params), (output,), vis_info)
# Set logging level. Do this last so it can use func. name
self.quiet(quiet)
def quiet(self, value=True):
"""Control printing of status messages.
"""
if value:
# make it quiet
if self._observers: # for idempotence
map(self.unregister, self._observers)
self._observers = []
else:
# make some noise
if not self._observers: # for idempotence
debug = util.kbase_debug_mode()
self._observers = [LifecyclePrinter(),
LifecycleLogger(self.full_name, debug=debug)]
map(self.register, self._observers)
def set_func(self, fn, params, outputs, vis_info):
"""Set the main function to run, and its metadata.
Although params and outputs are normally traits or
subclasses of traits defined in kbtypes, the value
None is also allowed for return values.
:param fn: Function object to run
:param params: tuple of traits describing input parameters
:param outputs: tuple of traits, describing the output value(s)
:param vis_info: visualization information, with two keys:
* 'widget': Name of the default widget.
* 'embed_widget': Whether it should automatically be shown, default = True.
:type vis_info: dict
:raise: ServiceMethodParameterError, if function signature does not match
ValueError, if None is given for a param
"""
self.run = fn
if self.name is None:
self.name = fn.__name__
self.full_name = '.'.join([fn.__module__, self.name])
# Handle parameters
for i, p in enumerate(params):
if p is None:
raise ValueError("None is not allowed for a parameter type")
p.name = "param{:d}".format(i)
self.params = params
# Handle outputs
for i, o in enumerate(outputs):
o.name = "output{:d}".format(i)
# Set widget name
self.input_widget = None
if 'input_widget' in vis_info and vis_info['input_widget'] is not None:
self.input_widget = vis_info['input_widget']
self.output_widget = None
if 'output_widget' in vis_info and vis_info['output_widget'] is not None:
self.output_widget = vis_info['output_widget']
# Set embed_widget
self.embed_widget = True
if 'embed' in vis_info and vis_info['embed_widget'] is not None:
self.embed_widget = vis_info['embed_widget']
self.outputs = outputs
self._one_output_ok = len(outputs) == 1
def __call__(self, *params):
"""Run the method when the class instance is called like
a function.
:param params: List of parameters for the method
:return: From function given with :meth:`set_func`
:raise: ServiceMethodParameterError, if parameters don't validate
"""
result = None
self.reset()
try:
self._validate(params, self.params)
self.started(params)
tmpresult = self.run(self, *params)
if self._one_output_ok and not is_sequence(tmpresult):
tmpresult = (tmpresult,)
self._validate(tmpresult, self.outputs)
result = tmpresult
self.done()
except ServiceMethodError as err:
self.error(-2, err)
except Exception as err:
tb = traceback.sys.exc_traceback
self.error(-1, ServiceMethodError(self, err, tb=tb))
# output object contains:
# data
# default widget name
# whether it should automatically embed the result or not
output_obj = {'data': result,
'widget': self.output_widget,
'embed': self.embed_widget}
sys.stdout.write(json.dumps(output_obj))
return result
def _validate(self, values, specs):
if len(values) != len(specs):
raise ServiceMethodParameterError(self, "Wrong number of arguments. got={} wanted={}"
.format(len(values), len(specs)))
for val, spec in zip(values, specs):
if spec is None:
if val is not None:
err = "None expected, got {}".format(val)
raise ServiceMethodParameterError(self, "Argument type error: {}".format(err))
else:
try:
spec.validate(spec, val)
except trt.TraitError, err:
raise ServiceMethodParameterError(self, "Argument type error: {}".format(err))
def estimated_runtime(self, params=()):
"""Calculate estimated runtime, for the given parameters.
:param tuple params: List of parameter values
:return: Runtime, in seconds. Use -1 for "unknown"
:rtype: double
"""
return self._history.estimated_runtime(params)
## Utility functions
@property
def token(self):
"""Authorization token passed in from front-end.
"""
return os.environ['KB_AUTH_TOKEN']
@property
def workspace_id(self):
"""Workspace ID passed in from front-end.
"""
return os.environ['KB_WORKSPACE_ID']
def poll_job(self, job_id):
global job_manager
if job_manager is None:
job_manager = KBjobManager()
return job_manager.poll_job(job_id)
## JSON serialization
def as_json(self, formatted=False, **kw):
d = {
'name': self.name,
'desc': self.desc,
'input_widget': self.input_widget,
'output_widget': self.output_widget,
'params': [(p.name, p.get_metadata('ui_name'), str(p), p.get_metadata('desc')) for p in self.params],
'outputs': [(p.name, str(p), p.get_metadata('desc')) for p in self.outputs],
'visible': self._visible
}
if formatted:
return json.dumps(d, **kw)
return d
trt_2_jschema = {'a unicode string': 'string',
'an int': 'integer',
'a list or None': 'array',
'a set or None': 'array',
'a tuple or None': 'array',
'a dict or None': 'object',
'a float': 'number',
'a boolean': 'boolean'}
def as_json_schema(self, formatted=False, **kw):
d = {
'title': self.name,
'type': 'object',
'description': self.desc,
'properties': {
'parameters': {p.name: {'type': self.trt_2_jschema.get(p.info(), str(p)),
'description': p.get_metadata('desc'),
'ui_name': p.get_metadata('ui_name'),
'default': p.get_default_value()} for p in self.params},
'widgets': {'input': self.input_widget, 'output': self.output_widget },
},
'visible': self._visible,
'returns': {p.name: {'type': self.trt_2_jschema.get(p.info(), str(p)),
'description': p.get_metadata('desc')} for p in self.outputs}
}
if formatted:
return json.dumps(d, **kw)
return d
def as_json_schema_dumps(self):
return json.dumps(self.as_json_schema())
## Simplified, decorator-based, workflow
_curr_service = None
def init_service(**kw):
"""Call this first, to create & set service.
All arguments must be keywords. See :class:`Service` and
:meth:`Service.__init__`.
"""
global _curr_service
_curr_service = Service(**kw)
def configure_service(**kw):
"""Set service attributes given in input keywords.
:raise: AttributeError if there is no such attribute,
ValueError if service is not initialized
"""
if _curr_service is None:
raise ValueError("Attempt to configure service before init_service()")
for key, value in kw.iteritems():
setattr(_curr_service, key, value)
def method(name=None):
"""Decorator function for creating new services.
Example usage::
@method(name="MyMethod")
def my_service(method, arg1, arg2, etc.):
pass # method body goes here
"""
if _curr_service is None:
raise ValueError("Attempt to call @method decorator before init_service()")
def wrap(fn, name=name):
if name is None:
name = fn.__name__
wrapped_fn = _curr_service.add_method(name=name, func=fn)
# copy docstring from original fn to wrapped fn, so that
# interactive help, autodoc, etc. will show the 'real' docs.
wrapped_fn.__doc__ = fn.__doc__
return wrapped_fn
return wrap
def finalize_service():
"""Call this last, to finalize and register the service.
"""
global _curr_service
register_service(_curr_service)
_curr_service = None # reset to un-initialized
#############################################################################
def example():
# New data type for a Person
class Person(trt.Unicode):
default_value = "Joe Schmoe"
info_text = 'the name of a person'
def validate(self, obj, value):
trt.Unicode.validate(self, obj, value)
# Function that does the work of the "pickup" method
def pick_up_people(method, num, where_from, where_to, who):
method.stages = 3
if num < 1:
raise ValueError("Can't pick up less than one person ({})".format(num))
if num == 99:
return 1, 2, 3
print("{} called for {:d} people to be driven from {} to {}".format(who, num, where_from, where_to))
time.sleep(0.5)
method.advance("pickup: " + where_from)
print("picking up {} and {:d} other bozos at {}".format(who, num - 1, where_from))
time.sleep(0.5)
method.advance('dropoff: ' + where_to)
print("dropping off {} and {:d} other bozos at {}".format(who, num - 1, where_to))
# for one return value, a list/tuple is optional
if num < 5:
return num
else:
return [num]
# Service creation
# =================
# Create a new service
service = Service(name="taxicab", desc="Yellow Cab taxi service", version="0.0.1-alpha")
# Create and initialize a method in the service
method = ServiceMethod(name="pickup", desc="Pick up people in a taxi")
method.set_func(pick_up_people,
(trt.Int(1, desc="number of people"), trt.Unicode("", desc="Pick up location"),
trt.Unicode("", desc="main drop off location"),
Person("", desc="Person who called the taxi")),
(trt.Int([], desc="Number of people dropped off"),))
service.add_method(method)
# Register service
register_service(service)
hdr = lambda s: "\n### " + s + " ###\n"
# Service usage
# ==============
# Registry
# --------
# (pretend this is the start of a new module)
# a. Show all registered services
print(hdr("All registered service schema"))
print(get_all_services(as_json_schema=True))
# b. get service/method from registry
method = get_service("taxicab").get_method("pickup")
# JSON metadata
# -------------
print(hdr("JSON metadata"))
print(method.as_json())
print(hdr("JSON Metadata"))
print(method.as_json(formatted=True, indent=2))
print(hdr("JSON Schema Metadata"))
print(method.as_json_schema(formatted=True, indent=2))
# Validation
# ----------
print(hdr("Bad parameters"))
r = method(1)
assert(r is None)
print(hdr("Function error"))
r = method(0, "here", "there", "me")
assert (r is None)
# Failure, bad output
print(hdr("Bad output type"))
r = method(99, "here", "there", "me")
assert (r is None)
# Successful run
# --------------
print(hdr("Success 1"))
r = method(3, "Berkeley", "San Francisco", "Willie Brown")
assert(r is not None)
print(hdr("Success 2"))
r = method(9, "Dubuque", "Tallahassee", "Cthulhu")
assert (r is not None)
if __name__ == '__main__':
example()
| |
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision is None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)
| |
from copy import copy
from django.utils.module_loading import import_string
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
def flatten(dicts):
flat = {}
for d in dicts:
flat.update(d)
return flat
return flatten(self.dicts) == flatten(other.dicts)
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None,
use_l10n=None, use_tz=None):
self.autoescape = autoescape
self.current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
func = import_string(path)
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None, current_app=None,
use_l10n=None, use_tz=None):
Context.__init__(self, dict_, current_app=current_app,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
updates = dict()
for processor in get_standard_processors() + processors:
updates.update(processor(request))
self.update(updates)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'System.warranty_start'
db.add_column(u'systems', 'warranty_start',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'System.warranty_end'
db.add_column(u'systems', 'warranty_end',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'System.warranty_start'
db.delete_column(u'systems', 'warranty_start')
# Deleting field 'System.warranty_end'
db.delete_column(u'systems', 'warranty_end')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dhcp.dhcp': {
'Meta': {'object_name': 'DHCP', 'db_table': "u'dhcp_scopes'"},
'allow_booting': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'allow_bootp': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'option_domain_name_servers': ('django.db.models.fields.CharField', [], {'max_length': '48', 'null': 'True', 'blank': 'True'}),
'option_ntp_servers': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'option_routers': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'option_subnet_mask': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'pool_deny_dynamic_bootp_agents': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'pool_range_end': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pool_range_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'scope_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scope_netmask': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'scope_notes': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'scope_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'systems.advisorydata': {
'Meta': {'object_name': 'AdvisoryData', 'db_table': "u'advisory_data'"},
'advisory': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'references': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'severity': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.contract': {
'Meta': {'object_name': 'Contract', 'db_table': "u'contracts'"},
'contract_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contract_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'support_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.keyvalue': {
'Meta': {'object_name': 'KeyValue', 'db_table': "u'key_value'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.mac': {
'Meta': {'object_name': 'Mac', 'db_table': "u'macs'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '17'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.networkadapter': {
'Meta': {'object_name': 'NetworkAdapter', 'db_table': "u'network_adapters'"},
'adapter_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dhcp_scope': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dhcp.DHCP']", 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mac_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_host_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'switch_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'switch_port': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'system_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.portdata': {
'Meta': {'object_name': 'PortData', 'db_table': "u'port_data'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'systems.scheduledtask': {
'Meta': {'ordering': "['task']", 'object_name': 'ScheduledTask', 'db_table': "u'scheduled_tasks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.systemchangelog': {
'Meta': {'object_name': 'SystemChangeLog', 'db_table': "u'systems_change_log'"},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {}),
'changed_text': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "u'user_profiles'"},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'current_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epager_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_nick': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'is_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pager_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pager_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['systems']
| |
# ===================================================================
#
# Copyright (c) 2015, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Util.py3compat import tobytes, b, unhexlify
from Cryptodome.Cipher import AES, DES3, DES
from Cryptodome.Hash import SHAKE128
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
from Cryptodome.SelfTest.Cipher.test_CBC import BlockChainingTests
class OfbTests(BlockChainingTests):
aes_mode = AES.MODE_OFB
des3_mode = DES3.MODE_OFB
# Redefine test_unaligned_data_128/64
def test_unaligned_data_128(self):
plaintexts = [ b("7777777") ] * 100
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
def test_unaligned_data_64(self):
plaintexts = [ b("7777777") ] * 100
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
from Cryptodome.SelfTest.Cipher.test_CBC import NistBlockChainingVectors
class NistOfbVectors(NistBlockChainingVectors):
aes_mode = AES.MODE_OFB
des_mode = DES.MODE_OFB
des3_mode = DES3.MODE_OFB
# Create one test method per file
nist_aes_kat_mmt_files = (
# KAT
"OFBGFSbox128.rsp",
"OFBGFSbox192.rsp",
"OFBGFSbox256.rsp",
"OFBKeySbox128.rsp",
"OFBKeySbox192.rsp",
"OFBKeySbox256.rsp",
"OFBVarKey128.rsp",
"OFBVarKey192.rsp",
"OFBVarKey256.rsp",
"OFBVarTxt128.rsp",
"OFBVarTxt192.rsp",
"OFBVarTxt256.rsp",
# MMT
"OFBMMT128.rsp",
"OFBMMT192.rsp",
"OFBMMT256.rsp",
)
nist_aes_mct_files = (
"OFBMCT128.rsp",
"OFBMCT192.rsp",
"OFBMCT256.rsp",
)
for file_name in nist_aes_kat_mmt_files:
def new_func(self, file_name=file_name):
self._do_kat_aes_test(file_name)
setattr(NistOfbVectors, "test_AES_" + file_name, new_func)
for file_name in nist_aes_mct_files:
def new_func(self, file_name=file_name):
self._do_mct_aes_test(file_name)
setattr(NistOfbVectors, "test_AES_" + file_name, new_func)
del file_name, new_func
nist_tdes_files = (
"TOFBMMT2.rsp", # 2TDES
"TOFBMMT3.rsp", # 3TDES
"TOFBinvperm.rsp", # Single DES
"TOFBpermop.rsp",
"TOFBsubtab.rsp",
"TOFBvarkey.rsp",
"TOFBvartext.rsp",
)
for file_name in nist_tdes_files:
def new_func(self, file_name=file_name):
self._do_tdes_test(file_name)
setattr(NistOfbVectors, "test_TDES_" + file_name, new_func)
# END OF NIST OFB TEST VECTORS
class SP800TestVectors(unittest.TestCase):
"""Class exercising the OFB test vectors found in Section F.4
of NIST SP 800-3A"""
def test_aes_128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '3b3fd92eb72dad20333449f8e83cfb4a' +\
'7789508d16918f03f53c52dac54ed825' +\
'9740051e9c5fecf64344f7a82260edcc' +\
'304c6528f659c77866a510d9c1d6ae5e'
key = '2b7e151628aed2a6abf7158809cf4f3c'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext[:-8]), ciphertext[:-8])
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext[:-8]), plaintext[:-8])
def test_aes_192(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'cdc80d6fddf18cab34c25909c99a4174' +\
'fcc28b8d4c63837c09e81700c1100401' +\
'8d9a9aeac0f6596f559c6d4daf59a5f2' +\
'6d9f200857ca6c3e9cac524bd9acc92a'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext[:-8]), ciphertext[:-8])
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext[:-8]), plaintext[:-8])
def test_aes_256(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'dc7e84bfda79164b7ecd8486985d3860' +\
'4febdc6740d20b3ac88f6ad82a4fb08d' +\
'71ab47a086e86eedf39d1c5bba97c408' +\
'0126141d67f37be8538f5a8be740e484'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.encrypt(plaintext[:-8]), ciphertext[:-8])
cipher = AES.new(key, AES.MODE_OFB, iv)
self.assertEqual(cipher.decrypt(ciphertext[:-8]), plaintext[:-8])
def get_tests(config={}):
tests = []
tests += list_test_cases(OfbTests)
tests += list_test_cases(NistOfbVectors)
tests += list_test_cases(SP800TestVectors)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| |
from gooey.gui.widgets import widget_pack
__author__ = 'Chris'
import wx
from gooey.gui import styling
class BaseGuiComponent(object):
def __init__(self, data, widget_pack):
self.data = data
# parent
self.panel = None
# Widgets
self.title = None
self.help_msg = None
# Internal WidgetPack
self.widget_pack = widget_pack
# used to throttle resizing (to avoid widget jiggle)
# TODO: figure out anti-jiggle technology
# self.event_stack = []
def build(self, parent):
return self.do_layout(parent)
def do_layout(self, parent):
self.panel = wx.Panel(parent)
self.title = self.createTitle(self.panel)
self.help_msg = self.createHelpMsgWidget(self.panel)
self.help_msg.SetMinSize((0, -1))
core_widget_set = self.widget_pack.build(self.panel, self.data)
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
vertical_container.AddSpacer(2)
if self.help_msg.GetLabelText():
vertical_container.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.AddSpacer(2)
else:
vertical_container.AddStretchSpacer(1)
vertical_container.Add(core_widget_set, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def createHelpMsgWidget(self, parent):
label_text = (self.formatExtendedHelpMsg(self.data)
if self.data['nargs']
else self.data['help'])
base_text = wx.StaticText(parent, label=label_text or '')
styling.MakeDarkGrey(base_text)
return base_text
def createTitle(self, parent):
text = wx.StaticText(parent, label=self.data['display_name'].title())
styling.MakeBold(text)
return text
def formatExtendedHelpMsg(self, data):
base_text = data.get('help', '')
nargs = data['nargs']
if isinstance(nargs, int):
return '{base}\n(Note: exactly {nargs} arguments are required)'.format(base=base_text, nargs=nargs)
elif nargs == '+':
return '{base}\n(Note: at least 1 or more arguments are required)'.format(base=base_text)
return base_text
def onResize(self, evt):
# handle internal widgets
self.panel.Freeze()
self._onResize(evt)
# propagate event to child widgets
self.widget_pack.onResize(evt)
evt.Skip()
self.panel.Thaw()
def _onResize(self, evt):
if not self.help_msg:
return
self.panel.Size = evt.GetSize()
container_width, _ = self.panel.Size
text_width, _ = self.help_msg.Size
if text_width != container_width:
self.help_msg.SetLabel(self.help_msg.GetLabelText().replace('\n', ' '))
self.help_msg.Wrap(container_width)
evt.Skip()
def GetValue(self):
return self.widget_pack.getValue()
def _GetWidget(self):
# used only for unittesting
return self.widget_pack.widget
class CheckBox(BaseGuiComponent):
def __init__(self, data, widget_pack=None):
BaseGuiComponent.__init__(self, data, widget_pack)
self.widget = None
# data
self.option_strings = data['commands'][0]
def build(self, parent):
return self.do_layout(parent)
def do_layout(self, parent):
self.panel = wx.Panel(parent)
self.widget = wx.CheckBox(self.panel)
self.title = self.createTitle(self.panel)
self.help_msg = self.createHelpMsgWidget(self.panel)
self.help_msg.SetMinSize((0, -1))
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
horizontal_sizer.Add(self.widget, 0, wx.EXPAND | wx.RIGHT, 10)
horizontal_sizer.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.Add(horizontal_sizer, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def onSetter(self, evt):
self.getValue()
def onResize(self, evt):
msg = self.help_msg
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def GetValue(self):
return self.option_strings if self.widget.GetValue() else ''
def _GetWidget(self):
return self.widget
class RadioGroup(object):
def __init__(self, data):
self.panel = None
self.data = data
self.radio_buttons = []
self.option_stings = []
self.help_msgs = []
self.btn_names = []
def build(self, parent):
return self.do_layout(parent)
def do_layout(self, parent):
self.panel = wx.Panel(parent)
self.radio_buttons = [wx.RadioButton(self.panel, -1) for _ in self.data]
self.btn_names = [wx.StaticText(self.panel, label=btn_data['display_name'].title()) for btn_data in self.data]
self.help_msgs = [wx.StaticText(self.panel, label=btn_data['help'].title()) for btn_data in self.data]
self.option_stings = [btn_data['commands'] for btn_data in self.data]
# box = wx.StaticBox(self.panel, -1, label=self.data['group_name'])
box = wx.StaticBox(self.panel, -1, label='Set Verbosity Level')
vertical_container = wx.StaticBoxSizer(box, wx.VERTICAL)
for button, name, help in zip(self.radio_buttons, self.btn_names, self.help_msgs):
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(button, 0, wx.ALIGN_TOP | wx.ALIGN_LEFT)
hbox.Add(name, 0, wx.LEFT, 10)
vertical_container.Add(hbox, 0, wx.EXPAND)
vertical_container.Add(help, 1, wx.EXPAND | wx.LEFT, 25)
vertical_container.AddSpacer(5)
# self.panel.Bind(wx.EVT_RADIOBUTTON, self.onSetter, button)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def onSetter(self, evt):
self.getValue()
def onResize(self, evt):
msg = self.help_msgs[0]
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def GetValue(self):
vals = [button.GetValue() for button in self.radio_buttons]
# print self.option_stings[vals.index(True)]
try:
opts = self.option_stings[vals.index(True)][0]
except:
return ''
def _GetWidget(self):
return self.radio_buttons
FileChooser = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.FileChooserPayload())
MultiFileChooser = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.MultiFileSaverPayload())
DirChooser = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.DirChooserPayload())
FileSaver = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.FileSaverPayload())
DateChooser = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.DateChooserPayload())
TextField = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.TextInputPayload())
Dropdown = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.DropdownPayload())
Counter = lambda data: BaseGuiComponent(data=data, widget_pack=widget_pack.CounterPayload())
| |
import os
from typing import Any, Dict, Sequence
from unittest import mock
from urllib.parse import urlsplit
import ujson
from django.conf import settings
from django.http import HttpResponse
from django.test import override_settings
from corporate.models import Customer
from zerver.lib.integrations import INTEGRATIONS
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.utils import split_by
from zerver.models import Realm, get_realm
from zerver.views.documentation import add_api_uri_context
class DocPageTest(ZulipTestCase):
def get_doc(self, url: str, subdomain: str) -> HttpResponse:
if url[0:23] == "/integrations/doc-html/":
return self.client_get(url, subdomain=subdomain, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
return self.client_get(url, subdomain=subdomain)
def print_msg_if_error(self, url: str, response: HttpResponse) -> None: # nocoverage
if response.status_code == 200:
return
print("Error processing URL:", url)
if response.get('Content-Type') == 'application/json':
content = ujson.loads(response.content)
print()
print("======================================================================")
print("ERROR: {}".format(content.get('msg')))
print()
def _test(self, url: str, expected_content: str, extra_strings: Sequence[str]=[],
landing_missing_strings: Sequence[str]=[], landing_page: bool=True,
doc_html_str: bool=False) -> None:
# Test the URL on the "zephyr" subdomain
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the root subdomain
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
for s in extra_strings:
self.assertIn(s, str(result.content))
if not landing_page:
return
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# Test the URL on the root subdomain with the landing page setting
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
for s in landing_missing_strings:
self.assertNotIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="description" content="Zulip combines'], result)
self.assert_not_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
# Test the URL on the "zephyr" subdomain with the landing page setting
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(['<meta name="robots" content="noindex,nofollow">'], result)
def test_api_doc_endpoints(self) -> None:
current_dir = os.path.dirname(os.path.abspath(__file__))
api_docs_dir = os.path.join(current_dir, '..', '..', 'templates/zerver/api/')
files = os.listdir(api_docs_dir)
def _filter_func(fp: str) -> bool:
ignored_files = ['sidebar_index.md', 'index.md', 'missing.md']
return fp.endswith('.md') and not fp.startswith(".") and fp not in ignored_files
files = list(filter(_filter_func, files))
for f in files:
endpoint = f'/api/{os.path.splitext(f)[0]}'
self._test(endpoint, '', doc_html_str=True)
def test_doc_endpoints(self) -> None:
self._test('/api/', 'The Zulip API')
self._test('/api/api-keys', 'be careful with it')
self._test('/api/installation-instructions', 'No download required!')
self._test('/api/send-message', 'steal away your hearts')
self._test('/api/render-message', '**foo**')
self._test('/api/get-streams', 'include_public')
self._test('/api/get-stream-id', 'The name of the stream to access.')
self._test('/api/get-subscriptions', 'Get all streams that the user is subscribed to.')
self._test('/api/get-users', 'client_gravatar')
self._test('/api/register-queue', 'apply_markdown')
self._test('/api/get-events', 'dont_block')
self._test('/api/delete-queue', 'Delete a previously registered queue')
self._test('/api/update-message', 'propagate_mode')
self._test('/api/get-own-user', 'takes no parameters')
self._test('/api/subscribe', 'authorization_errors_fatal')
self._test('/api/create-user', 'zuliprc-admin')
self._test('/api/unsubscribe', 'not_removed')
self._test('/team/', 'industry veterans')
self._test('/history/', 'Cambridge, Massachusetts')
# Test the i18n version of one of these pages.
self._test('/en/history/', 'Cambridge, Massachusetts')
self._test('/apps/', 'Apps for every platform.')
self._test('/features/', 'Beautiful messaging')
self._test('/hello/', 'Chat for distributed teams', landing_missing_strings=["Login"])
self._test('/why-zulip/', 'Why Zulip?')
self._test('/for/open-source/', 'for open source projects')
self._test('/for/research/', 'for researchers')
self._test('/for/companies/', 'in a company')
self._test('/for/working-groups-and-communities/', 'standards bodies')
self._test('/security/', 'TLS encryption')
self._test('/atlassian/', 'HipChat')
self._test('/devlogin/', 'Normal users', landing_page=False)
self._test('/devtools/', 'Useful development URLs')
self._test('/errors/404/', 'Page not found')
self._test('/errors/5xx/', 'Internal server error')
self._test('/emails/', 'manually generate most of the emails by clicking')
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(result.status_code, 404)
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
def test_portico_pages_open_graph_metadata(self) -> None:
# Why Zulip
url = '/why-zulip/'
title = '<meta property="og:title" content="Team chat with first-class threading">'
description = '<meta property="og:description" content="Most team chats are overwhelming'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Features
url = '/features/'
title = '<meta property="og:title" content="Zulip Features">'
description = '<meta property="og:description" content="First class threading'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
def test_integration_doc_endpoints(self) -> None:
self._test('/integrations/',
'native integrations.',
extra_strings=[
'And hundreds more through',
'Hubot',
'Zapier',
'IFTTT',
])
for integration in INTEGRATIONS.keys():
url = f'/integrations/doc-html/{integration}'
self._test(url, '', doc_html_str=True)
def test_integration_pages_open_graph_metadata(self) -> None:
url = '/integrations/doc/github'
title = '<meta property="og:title" content="Connect GitHub to Zulip">'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Test category pages
url = '/integrations/communication'
title = '<meta property="og:title" content="Connect your Communication tools to Zulip">'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Test integrations page
url = '/integrations/'
title = '<meta property="og:title" content="Connect the tools you use to Zulip">'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
def test_doc_html_str_non_ajax_call(self) -> None:
# We don't need to test all the pages for 404
for integration in list(INTEGRATIONS.keys())[5]:
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
url = f'/en/integrations/doc-html/{integration}'
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
url = f'/en/integrations/doc-html/{integration}'
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True)
self.assertEqual(result.status_code, 404)
def test_electron_detection(self) -> None:
result = self.client_get("/accounts/password/reset/")
# TODO: Ideally, this Mozilla would be the specific browser.
self.assertTrue('data-platform="Mozilla"' in result.content.decode("utf-8"))
result = self.client_get("/accounts/password/reset/",
HTTP_USER_AGENT="ZulipElectron/1.0.0")
self.assertTrue('data-platform="ZulipElectron"' in result.content.decode("utf-8"))
class HelpTest(ZulipTestCase):
def test_help_settings_links(self) -> None:
result = self.client_get('/help/change-the-time-format')
self.assertEqual(result.status_code, 200)
self.assertIn('Go to <a href="/#settings/display-settings">Display settings</a>', str(result.content))
# Check that the sidebar was rendered properly.
self.assertIn('Getting started with Zulip', str(result.content))
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/change-the-time-format', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Display settings</strong>', str(result.content))
self.assertNotIn('/#settings', str(result.content))
def test_help_relative_links_for_gear(self) -> None:
result = self.client_get('/help/analytics')
self.assertIn('<a href="/stats">Statistics</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/analytics', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Statistics</strong>', str(result.content))
self.assertNotIn('/stats', str(result.content))
def test_help_relative_links_for_stream(self) -> None:
result = self.client_get('/help/message-a-stream-by-email')
self.assertIn('<a href="/#streams/subscribed">Your streams</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get('/help/message-a-stream-by-email', subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn('<strong>Manage streams</strong>', str(result.content))
self.assertNotIn('/#streams', str(result.content))
class IntegrationTest(ZulipTestCase):
def test_check_if_every_integration_has_logo_that_exists(self) -> None:
for integration in INTEGRATIONS.values():
path = urlsplit(integration.logo_url).path
self.assertTrue(os.path.isfile(settings.DEPLOY_ROOT + path), integration.name)
def test_api_url_view_subdomains_base(self) -> None:
context: Dict[str, Any] = dict()
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "testserver/api")
self.assertEqual(context["api_url"], "http://testserver/api")
self.assertTrue(context["html_settings_links"])
@override_settings(ROOT_DOMAIN_LANDING_PAGE=True)
def test_api_url_view_subdomains_homepage_base(self) -> None:
context: Dict[str, Any] = dict()
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "yourZulipDomain.testserver/api")
self.assertEqual(context["api_url"], "http://yourZulipDomain.testserver/api")
self.assertFalse(context["html_settings_links"])
def test_api_url_view_subdomains_full(self) -> None:
context: Dict[str, Any] = dict()
request = HostRequestMock(host="mysubdomain.testserver")
add_api_uri_context(context, request)
self.assertEqual(context["api_url_scheme_relative"], "mysubdomain.testserver/api")
self.assertEqual(context["api_url"], "http://mysubdomain.testserver/api")
self.assertTrue(context["html_settings_links"])
def test_html_settings_links(self) -> None:
context: Dict[str, Any] = dict()
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock())
self.assertEqual(
context['settings_html'],
'Zulip settings page')
self.assertEqual(
context['subscriptions_html'],
'streams page')
context = dict()
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock(host="mysubdomain.testserver"))
self.assertEqual(
context['settings_html'],
'<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context['subscriptions_html'],
'<a target="_blank" href="/#streams">streams page</a>')
context = dict()
add_api_uri_context(context, HostRequestMock())
self.assertEqual(
context['settings_html'],
'<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context['subscriptions_html'],
'<a target="_blank" href="/#streams">streams page</a>')
class AboutPageTest(ZulipTestCase):
def test_endpoint(self) -> None:
with self.settings(CONTRIBUTOR_DATA_FILE_PATH="zerver/tests/fixtures/authors.json"):
result = self.client_get('/team/')
self.assert_in_success_response(['Our amazing community'], result)
self.assert_in_success_response(['2017-11-20'], result)
self.assert_in_success_response(['timabbott', 'showell', 'gnprice', 'rishig'], result)
with mock.patch("zerver.views.portico.open", side_effect=FileNotFoundError) as m:
result = self.client_get('/team/')
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(['Never ran'], result)
m.called_once()
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get('/team/')
self.assertEqual(result.status_code, 301)
self.assertEqual(result["Location"], "https://zulip.com/team/")
def test_split_by(self) -> None:
"""Utility function primarily used in authors page"""
flat_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
expected_result = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
self.assertEqual(split_by(flat_list, 3, None), expected_result)
class SmtpConfigErrorTest(ZulipTestCase):
def test_smtp_error(self) -> None:
result = self.client_get("/config-error/smtp")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["email configuration"], result)
class PlansPageTest(ZulipTestCase):
def test_plans_auth(self) -> None:
# Test root domain
result = self.client_get("/plans/", subdomain="")
self.assert_in_success_response(["Sign up now"], result)
# Test non-existent domain
result = self.client_get("/plans/", subdomain="moo")
self.assertEqual(result.status_code, 404)
self.assert_in_response("does not exist", result)
# Test valid domain, no login
realm = get_realm("zulip")
realm.plan_type = Realm.STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/accounts/login/?next=plans")
# Test valid domain, with login
self.login('hamlet')
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response(["Current plan"], result)
# Test root domain, with login on different domain
result = self.client_get("/plans/", subdomain="")
# TODO: works in manual testing, but I suspect something is funny in
# the test environment
# self.assert_in_success_response(["Sign up now"], result)
def test_CTA_text_by_plan_type(self) -> None:
sign_up_now = "Sign up now"
buy_standard = "Buy Standard"
current_plan = "Current plan"
sponsorship_pending = "Sponsorship pending"
# Root domain
result = self.client_get("/plans/", subdomain="")
self.assert_in_success_response([sign_up_now, buy_standard], result)
self.assert_not_in_success_response([current_plan, sponsorship_pending], result)
realm = get_realm("zulip")
realm.plan_type = Realm.SELF_HOSTED
realm.save(update_fields=["plan_type"])
with self.settings(PRODUCTION=True):
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulip.com/plans")
self.login('iago')
# SELF_HOSTED should hide the local plans page, even if logged in
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulip.com/plans")
# But in the development environment, it renders a page
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([sign_up_now, buy_standard], result)
self.assert_not_in_success_response([current_plan, sponsorship_pending], result)
realm.plan_type = Realm.LIMITED
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan, buy_standard], result)
self.assert_not_in_success_response([sign_up_now, sponsorship_pending], result)
realm.plan_type = Realm.STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response([sign_up_now, buy_standard, sponsorship_pending], result)
realm.plan_type = Realm.STANDARD
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response([sign_up_now, buy_standard, sponsorship_pending], result)
realm.plan_type = Realm.LIMITED
realm.save()
Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_id", sponsorship_pending=True)
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_in_success_response([current_plan, sponsorship_pending], result)
self.assert_not_in_success_response([sign_up_now, buy_standard], result)
class AppsPageTest(ZulipTestCase):
def test_apps_view(self) -> None:
result = self.client_get('/apps')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'].endswith('/apps/'))
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'] == 'https://zulip.com/apps/')
with self.settings(ZILENCER_ENABLED=True):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 200)
html = result.content.decode('utf-8')
self.assertIn('Apps for every platform.', html)
class PrivacyTermsTest(ZulipTestCase):
def test_custom_tos_template(self) -> None:
response = self.client_get("/terms/")
self.assert_in_success_response(["Thanks for using our products and services (\"Services\"). ",
"By using our Services, you are agreeing to these terms"],
response)
def test_custom_terms_of_service_template(self) -> None:
not_configured_message = 'This installation of Zulip does not have a configured ' \
'terms of service'
with self.settings(TERMS_OF_SERVICE=None):
response = self.client_get('/terms/')
self.assert_in_success_response([not_configured_message], response)
with self.settings(TERMS_OF_SERVICE='zerver/tests/markdown/test_markdown.md'):
response = self.client_get('/terms/')
self.assert_in_success_response(['This is some <em>bold text</em>.'], response)
self.assert_not_in_success_response([not_configured_message], response)
def test_custom_privacy_policy_template(self) -> None:
not_configured_message = 'This installation of Zulip does not have a configured ' \
'privacy policy'
with self.settings(PRIVACY_POLICY=None):
response = self.client_get('/privacy/')
self.assert_in_success_response([not_configured_message], response)
with self.settings(PRIVACY_POLICY='zerver/tests/markdown/test_markdown.md'):
response = self.client_get('/privacy/')
self.assert_in_success_response(['This is some <em>bold text</em>.'], response)
self.assert_not_in_success_response([not_configured_message], response)
def test_custom_privacy_policy_template_with_absolute_url(self) -> None:
current_dir = os.path.dirname(os.path.abspath(__file__))
abs_path = os.path.join(current_dir, '..', '..',
'templates/zerver/tests/markdown/test_markdown.md')
with self.settings(PRIVACY_POLICY=abs_path):
response = self.client_get('/privacy/')
self.assert_in_success_response(['This is some <em>bold text</em>.'], response)
def test_no_nav(self) -> None:
# Test that our ?nav=0 feature of /privacy and /terms,
# designed to comply with the Apple App Store draconian
# policies that ToS/Privacy pages linked from an iOS app have
# no links to the rest of the site if there's pricing
# information for anything elsewhere on the site.
response = self.client_get("/terms/")
self.assert_in_success_response(["Plans"], response)
response = self.client_get("/terms/?nav=no")
self.assert_not_in_success_response(["Plans"], response)
response = self.client_get("/privacy/?nav=no")
self.assert_not_in_success_response(["Plans"], response)
| |
# Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import os
import sys
import shutil
import subprocess
import hashlib
import json
from glob import glob
from pathlib import Path
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import windows_proof_rmtree, MesonException
from mesonbuild.wrap import wrap
from mesonbuild import mlog, build
archive_choices = ['gztar', 'xztar', 'zip']
archive_extension = {'gztar': '.tar.gz',
'xztar': '.tar.xz',
'zip': '.zip'}
def add_arguments(parser):
parser.add_argument('-C', default='.', dest='wd',
help='directory to cd into before running')
parser.add_argument('--formats', default='xztar',
help='Comma separated list of archive types to create.')
parser.add_argument('--include-subprojects', action='store_true',
help='Include source code of subprojects that have been used for the build.')
parser.add_argument('--no-tests', action='store_true',
help='Do not build and test generated packages.')
def create_hash(fname):
hashname = fname + '.sha256sum'
m = hashlib.sha256()
m.update(open(fname, 'rb').read())
with open(hashname, 'w') as f:
f.write('{} {}\n'.format(m.hexdigest(), os.path.basename(fname)))
print(os.path.relpath(fname), m.hexdigest())
def del_gitfiles(dirname):
for f in glob(os.path.join(dirname, '.git*')):
if os.path.isdir(f) and not os.path.islink(f):
windows_proof_rmtree(f)
else:
os.unlink(f)
def process_submodules(dirname):
module_file = os.path.join(dirname, '.gitmodules')
if not os.path.exists(module_file):
return
subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'], cwd=dirname)
for line in open(module_file):
line = line.strip()
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k != 'path':
continue
del_gitfiles(os.path.join(dirname, v))
def run_dist_scripts(src_root, bld_root, dist_root, dist_scripts):
assert(os.path.isabs(dist_root))
env = os.environ.copy()
env['MESON_DIST_ROOT'] = dist_root
env['MESON_SOURCE_ROOT'] = src_root
env['MESON_BUILD_ROOT'] = bld_root
for d in dist_scripts:
script = d['exe']
args = d['args']
name = ' '.join(script + args)
print('Running custom dist script {!r}'.format(name))
try:
rc = subprocess.call(script + args, env=env)
if rc != 0:
sys.exit('Dist script errored out')
except OSError:
print('Failed to run dist script {!r}'.format(name))
sys.exit(1)
def is_git(src_root):
_git = os.path.join(src_root, '.git')
return os.path.isdir(_git) or os.path.isfile(_git)
def git_have_dirty_index(src_root):
'''Check whether there are uncommitted changes in git'''
ret = subprocess.call(['git', '-C', src_root, 'diff-index', '--quiet', 'HEAD'])
return ret == 1
def git_clone(src_root, distdir):
if git_have_dirty_index(src_root):
mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.makedirs(distdir)
subprocess.check_call(['git', 'clone', '--shared', src_root, distdir])
process_submodules(distdir)
del_gitfiles(distdir)
def create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts, subprojects):
distdir = os.path.join(dist_sub, dist_name)
git_clone(src_root, distdir)
for path in subprojects:
sub_src_root = os.path.join(src_root, path)
sub_distdir = os.path.join(distdir, path)
if os.path.exists(sub_distdir):
continue
if is_git(sub_src_root):
git_clone(sub_src_root, sub_distdir)
else:
shutil.copytree(sub_src_root, sub_distdir)
run_dist_scripts(src_root, bld_root, distdir, dist_scripts)
output_names = []
for a in archives:
compressed_name = distdir + archive_extension[a]
shutil.make_archive(distdir, a, root_dir=dist_sub, base_dir=dist_name)
output_names.append(compressed_name)
shutil.rmtree(distdir)
return output_names
def is_hg(src_root):
return os.path.isdir(os.path.join(src_root, '.hg'))
def hg_have_dirty_index(src_root):
'''Check whether there are uncommitted changes in hg'''
out = subprocess.check_output(['hg', '-R', src_root, 'summary'])
return b'commit: (clean)' not in out
def create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, dist_scripts):
if hg_have_dirty_index(src_root):
mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')
if dist_scripts:
mlog.warning('dist scripts are not supported in Mercurial projects')
os.makedirs(dist_sub, exist_ok=True)
tarname = os.path.join(dist_sub, dist_name + '.tar')
xzname = tarname + '.xz'
gzname = tarname + '.gz'
zipname = os.path.join(dist_sub, dist_name + '.zip')
# Note that -X interprets relative paths using the current working
# directory, not the repository root, so this must be an absolute path:
# https://bz.mercurial-scm.org/show_bug.cgi?id=6267
#
# .hg[a-z]* is used instead of .hg* to keep .hg_archival.txt, which may
# be useful to link the tarball to the Mercurial revision for either
# manual inspection or in case any code interprets it for a --version or
# similar.
subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar',
'-X', src_root + '/.hg[a-z]*', tarname])
output_names = []
if 'xztar' in archives:
import lzma
with lzma.open(xzname, 'wb') as xf, open(tarname, 'rb') as tf:
shutil.copyfileobj(tf, xf)
output_names.append(xzname)
if 'gztar' in archives:
with gzip.open(gzname, 'wb') as zf, open(tarname, 'rb') as tf:
shutil.copyfileobj(tf, zf)
output_names.append(gzname)
os.unlink(tarname)
if 'zip' in archives:
subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'zip', zipname])
output_names.append(zipname)
return output_names
def run_dist_steps(meson_command, unpacked_src_dir, builddir, installdir, ninja_bin):
if subprocess.call(meson_command + ['--backend=ninja', unpacked_src_dir, builddir]) != 0:
print('Running Meson on distribution package failed')
return 1
if subprocess.call([ninja_bin], cwd=builddir) != 0:
print('Compiling the distribution package failed')
return 1
if subprocess.call([ninja_bin, 'test'], cwd=builddir) != 0:
print('Running unit tests on the distribution package failed')
return 1
myenv = os.environ.copy()
myenv['DESTDIR'] = installdir
if subprocess.call([ninja_bin, 'install'], cwd=builddir, env=myenv) != 0:
print('Installing the distribution package failed')
return 1
return 0
def check_dist(packagename, meson_command, extra_meson_args, bld_root, privdir):
print('Testing distribution package {}'.format(packagename))
unpackdir = os.path.join(privdir, 'dist-unpack')
builddir = os.path.join(privdir, 'dist-build')
installdir = os.path.join(privdir, 'dist-install')
for p in (unpackdir, builddir, installdir):
if os.path.exists(p):
windows_proof_rmtree(p)
os.mkdir(p)
ninja_bin = detect_ninja()
shutil.unpack_archive(packagename, unpackdir)
unpacked_files = glob(os.path.join(unpackdir, '*'))
assert(len(unpacked_files) == 1)
unpacked_src_dir = unpacked_files[0]
with open(os.path.join(bld_root, 'meson-info', 'intro-buildoptions.json')) as boptions:
meson_command += ['-D{name}={value}'.format(**o) for o in json.load(boptions)
if o['name'] not in ['backend', 'install_umask', 'buildtype']]
meson_command += extra_meson_args
ret = run_dist_steps(meson_command, unpacked_src_dir, builddir, installdir, ninja_bin)
if ret > 0:
print('Dist check build directory was {}'.format(builddir))
else:
windows_proof_rmtree(unpackdir)
windows_proof_rmtree(builddir)
windows_proof_rmtree(installdir)
print('Distribution package {} tested'.format(packagename))
return ret
def determine_archives_to_generate(options):
result = []
for i in options.formats.split(','):
if i not in archive_choices:
sys.exit('Value "{}" not one of permitted values {}.'.format(i, archive_choices))
result.append(i)
if len(i) == 0:
sys.exit('No archive types specified.')
return result
def run(options):
options.wd = os.path.abspath(options.wd)
buildfile = Path(options.wd) / 'meson-private' / 'build.dat'
if not buildfile.is_file():
raise MesonException('Directory {!r} does not seem to be a Meson build directory.'.format(options.wd))
b = build.load(options.wd)
# This import must be load delayed, otherwise it will get the default
# value of None.
from mesonbuild.mesonlib import meson_command
src_root = b.environment.source_dir
bld_root = b.environment.build_dir
priv_dir = os.path.join(bld_root, 'meson-private')
dist_sub = os.path.join(bld_root, 'meson-dist')
dist_name = b.project_name + '-' + b.project_version
archives = determine_archives_to_generate(options)
subprojects = []
extra_meson_args = []
if options.include_subprojects:
subproject_dir = os.path.join(src_root, b.subproject_dir)
for sub in b.subprojects:
directory = wrap.get_directory(subproject_dir, sub)
subprojects.append(os.path.join(b.subproject_dir, directory))
extra_meson_args.append('-Dwrap_mode=nodownload')
if is_git(src_root):
names = create_dist_git(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts, subprojects)
elif is_hg(src_root):
if subprojects:
print('--include-subprojects option currently not supported with Mercurial')
return 1
names = create_dist_hg(dist_name, archives, src_root, bld_root, dist_sub, b.dist_scripts)
else:
print('Dist currently only works with Git or Mercurial repos')
return 1
if names is None:
return 1
rc = 0
if not options.no_tests:
# Check only one.
rc = check_dist(names[0], meson_command, extra_meson_args, bld_root, priv_dir)
if rc == 0:
for name in names:
create_hash(name)
return rc
| |
"""Config flow for HomeKit integration."""
import random
import re
import string
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_DOMAINS,
CONF_ENTITIES,
CONF_ENTITY_ID,
CONF_NAME,
CONF_PORT,
)
from homeassistant.core import callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
CONF_EXCLUDE_DOMAINS,
CONF_EXCLUDE_ENTITIES,
CONF_INCLUDE_DOMAINS,
CONF_INCLUDE_ENTITIES,
)
from .const import (
CONF_AUTO_START,
CONF_ENTITY_CONFIG,
CONF_EXCLUDE_ACCESSORY_MODE,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_VIDEO_CODEC,
DEFAULT_AUTO_START,
DEFAULT_CONFIG_FLOW_PORT,
DEFAULT_HOMEKIT_MODE,
DOMAIN,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODE_BRIDGE,
HOMEKIT_MODES,
SHORT_BRIDGE_NAME,
VIDEO_CODEC_COPY,
)
from .util import async_find_next_available_port, state_needs_accessory_mode
CONF_CAMERA_COPY = "camera_copy"
CONF_INCLUDE_EXCLUDE_MODE = "include_exclude_mode"
MODE_INCLUDE = "include"
MODE_EXCLUDE = "exclude"
INCLUDE_EXCLUDE_MODES = [MODE_EXCLUDE, MODE_INCLUDE]
DOMAINS_NEED_ACCESSORY_MODE = [CAMERA_DOMAIN, MEDIA_PLAYER_DOMAIN, REMOTE_DOMAIN]
NEVER_BRIDGED_DOMAINS = [CAMERA_DOMAIN]
CAMERA_ENTITY_PREFIX = f"{CAMERA_DOMAIN}."
SUPPORTED_DOMAINS = [
"alarm_control_panel",
"automation",
"binary_sensor",
CAMERA_DOMAIN,
"climate",
"cover",
"demo",
"device_tracker",
"fan",
"humidifier",
"input_boolean",
"light",
"lock",
MEDIA_PLAYER_DOMAIN,
"person",
REMOTE_DOMAIN,
"scene",
"script",
"sensor",
"switch",
"vacuum",
"water_heater",
]
DEFAULT_DOMAINS = [
"alarm_control_panel",
"climate",
CAMERA_DOMAIN,
"cover",
"humidifier",
"fan",
"light",
"lock",
MEDIA_PLAYER_DOMAIN,
REMOTE_DOMAIN,
"switch",
"vacuum",
"water_heater",
]
_EMPTY_ENTITY_FILTER = {
CONF_INCLUDE_DOMAINS: [],
CONF_EXCLUDE_DOMAINS: [],
CONF_INCLUDE_ENTITIES: [],
CONF_EXCLUDE_ENTITIES: [],
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HomeKit."""
VERSION = 1
def __init__(self) -> None:
"""Initialize config flow."""
self.hk_data = {}
async def async_step_user(self, user_input=None):
"""Choose specific domains in bridge mode."""
if user_input is not None:
entity_filter = _EMPTY_ENTITY_FILTER.copy()
entity_filter[CONF_INCLUDE_DOMAINS] = user_input[CONF_INCLUDE_DOMAINS]
self.hk_data[CONF_FILTER] = entity_filter
return await self.async_step_pairing()
self.hk_data[CONF_HOMEKIT_MODE] = HOMEKIT_MODE_BRIDGE
default_domains = [] if self._async_current_names() else DEFAULT_DOMAINS
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_INCLUDE_DOMAINS, default=default_domains
): cv.multi_select(SUPPORTED_DOMAINS),
}
),
)
async def async_step_pairing(self, user_input=None):
"""Pairing instructions."""
if user_input is not None:
port = await async_find_next_available_port(
self.hass, DEFAULT_CONFIG_FLOW_PORT
)
await self._async_add_entries_for_accessory_mode_entities(port)
self.hk_data[CONF_PORT] = port
include_domains_filter = self.hk_data[CONF_FILTER][CONF_INCLUDE_DOMAINS]
for domain in NEVER_BRIDGED_DOMAINS:
if domain in include_domains_filter:
include_domains_filter.remove(domain)
return self.async_create_entry(
title=f"{self.hk_data[CONF_NAME]}:{self.hk_data[CONF_PORT]}",
data=self.hk_data,
)
self.hk_data[CONF_NAME] = self._async_available_name(SHORT_BRIDGE_NAME)
self.hk_data[CONF_EXCLUDE_ACCESSORY_MODE] = True
return self.async_show_form(
step_id="pairing",
description_placeholders={CONF_NAME: self.hk_data[CONF_NAME]},
)
async def _async_add_entries_for_accessory_mode_entities(self, last_assigned_port):
"""Generate new flows for entities that need their own instances."""
accessory_mode_entity_ids = _async_get_entity_ids_for_accessory_mode(
self.hass, self.hk_data[CONF_FILTER][CONF_INCLUDE_DOMAINS]
)
exiting_entity_ids_accessory_mode = _async_entity_ids_with_accessory_mode(
self.hass
)
next_port_to_check = last_assigned_port + 1
for entity_id in accessory_mode_entity_ids:
if entity_id in exiting_entity_ids_accessory_mode:
continue
port = await async_find_next_available_port(self.hass, next_port_to_check)
next_port_to_check = port + 1
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "accessory"},
data={CONF_ENTITY_ID: entity_id, CONF_PORT: port},
)
)
async def async_step_accessory(self, accessory_input):
"""Handle creation a single accessory in accessory mode."""
entity_id = accessory_input[CONF_ENTITY_ID]
port = accessory_input[CONF_PORT]
state = self.hass.states.get(entity_id)
name = state.attributes.get(ATTR_FRIENDLY_NAME) or state.entity_id
entity_filter = _EMPTY_ENTITY_FILTER.copy()
entity_filter[CONF_INCLUDE_ENTITIES] = [entity_id]
entry_data = {
CONF_PORT: port,
CONF_NAME: self._async_available_name(name),
CONF_HOMEKIT_MODE: HOMEKIT_MODE_ACCESSORY,
CONF_FILTER: entity_filter,
}
if entity_id.startswith(CAMERA_ENTITY_PREFIX):
entry_data[CONF_ENTITY_CONFIG] = {
entity_id: {CONF_VIDEO_CODEC: VIDEO_CODEC_COPY}
}
return self.async_create_entry(
title=f"{name}:{entry_data[CONF_PORT]}", data=entry_data
)
async def async_step_import(self, user_input=None):
"""Handle import from yaml."""
if not self._async_is_unique_name_port(user_input):
return self.async_abort(reason="port_name_in_use")
return self.async_create_entry(
title=f"{user_input[CONF_NAME]}:{user_input[CONF_PORT]}", data=user_input
)
@callback
def _async_current_names(self):
"""Return a set of bridge names."""
return {
entry.data[CONF_NAME]
for entry in self._async_current_entries(include_ignore=False)
if CONF_NAME in entry.data
}
@callback
def _async_available_name(self, requested_name):
"""Return an available for the bridge."""
current_names = self._async_current_names()
valid_mdns_name = re.sub("[^A-Za-z0-9 ]+", " ", requested_name)
if valid_mdns_name not in current_names:
return valid_mdns_name
acceptable_mdns_chars = string.ascii_uppercase + string.digits
suggested_name = None
while not suggested_name or suggested_name in current_names:
trailer = "".join(random.choices(acceptable_mdns_chars, k=2))
suggested_name = f"{valid_mdns_name} {trailer}"
return suggested_name
@callback
def _async_is_unique_name_port(self, user_input):
"""Determine is a name or port is already used."""
name = user_input[CONF_NAME]
port = user_input[CONF_PORT]
return not any(
entry.data[CONF_NAME] == name or entry.data[CONF_PORT] == port
for entry in self._async_current_entries(include_ignore=False)
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for homekit."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self.hk_options = {}
self.included_cameras = set()
async def async_step_yaml(self, user_input=None):
"""No options for yaml managed entries."""
if user_input is not None:
# Apparently not possible to abort an options flow
# at the moment
return self.async_create_entry(title="", data=self.config_entry.options)
return self.async_show_form(step_id="yaml")
async def async_step_advanced(self, user_input=None):
"""Choose advanced options."""
if not self.show_advanced_options or user_input is not None:
if user_input:
self.hk_options.update(user_input)
self.hk_options[CONF_AUTO_START] = self.hk_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
)
for key in (CONF_DOMAINS, CONF_ENTITIES):
if key in self.hk_options:
del self.hk_options[key]
return self.async_create_entry(title="", data=self.hk_options)
return self.async_show_form(
step_id="advanced",
data_schema=vol.Schema(
{
vol.Optional(
CONF_AUTO_START,
default=self.hk_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
),
): bool
}
),
)
async def async_step_cameras(self, user_input=None):
"""Choose camera config."""
if user_input is not None:
entity_config = self.hk_options[CONF_ENTITY_CONFIG]
for entity_id in self.included_cameras:
if entity_id in user_input[CONF_CAMERA_COPY]:
entity_config.setdefault(entity_id, {})[
CONF_VIDEO_CODEC
] = VIDEO_CODEC_COPY
elif (
entity_id in entity_config
and CONF_VIDEO_CODEC in entity_config[entity_id]
):
del entity_config[entity_id][CONF_VIDEO_CODEC]
return await self.async_step_advanced()
cameras_with_copy = []
entity_config = self.hk_options.setdefault(CONF_ENTITY_CONFIG, {})
for entity in self.included_cameras:
hk_entity_config = entity_config.get(entity, {})
if hk_entity_config.get(CONF_VIDEO_CODEC) == VIDEO_CODEC_COPY:
cameras_with_copy.append(entity)
data_schema = vol.Schema(
{
vol.Optional(
CONF_CAMERA_COPY,
default=cameras_with_copy,
): cv.multi_select(self.included_cameras),
}
)
return self.async_show_form(step_id="cameras", data_schema=data_schema)
async def async_step_include_exclude(self, user_input=None):
"""Choose entities to include or exclude from the domain."""
if user_input is not None:
entity_filter = _EMPTY_ENTITY_FILTER.copy()
if isinstance(user_input[CONF_ENTITIES], list):
entities = user_input[CONF_ENTITIES]
else:
entities = [user_input[CONF_ENTITIES]]
if (
self.hk_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY
or user_input[CONF_INCLUDE_EXCLUDE_MODE] == MODE_INCLUDE
):
entity_filter[CONF_INCLUDE_ENTITIES] = entities
# Include all of the domain if there are no entities
# explicitly included as the user selected the domain
domains_with_entities_selected = _domains_set_from_entities(entities)
entity_filter[CONF_INCLUDE_DOMAINS] = [
domain
for domain in self.hk_options[CONF_DOMAINS]
if domain not in domains_with_entities_selected
]
self.included_cameras = {
entity_id
for entity_id in entities
if entity_id.startswith(CAMERA_ENTITY_PREFIX)
}
else:
entity_filter[CONF_INCLUDE_DOMAINS] = self.hk_options[CONF_DOMAINS]
entity_filter[CONF_EXCLUDE_ENTITIES] = entities
if CAMERA_DOMAIN in entity_filter[CONF_INCLUDE_DOMAINS]:
camera_entities = _async_get_matching_entities(
self.hass,
domains=[CAMERA_DOMAIN],
)
self.included_cameras = {
entity_id
for entity_id in camera_entities
if entity_id not in entities
}
else:
self.included_cameras = set()
self.hk_options[CONF_FILTER] = entity_filter
if self.included_cameras:
return await self.async_step_cameras()
return await self.async_step_advanced()
entity_filter = self.hk_options.get(CONF_FILTER, {})
all_supported_entities = _async_get_matching_entities(
self.hass,
domains=self.hk_options[CONF_DOMAINS],
)
data_schema = {}
entities = entity_filter.get(CONF_INCLUDE_ENTITIES, [])
if self.hk_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY:
entity_schema = vol.In
else:
if entities:
include_exclude_mode = MODE_INCLUDE
else:
include_exclude_mode = MODE_EXCLUDE
entities = entity_filter.get(CONF_EXCLUDE_ENTITIES, [])
data_schema[
vol.Required(CONF_INCLUDE_EXCLUDE_MODE, default=include_exclude_mode)
] = vol.In(INCLUDE_EXCLUDE_MODES)
entity_schema = cv.multi_select
data_schema[vol.Optional(CONF_ENTITIES, default=entities)] = entity_schema(
all_supported_entities
)
return self.async_show_form(
step_id="include_exclude", data_schema=vol.Schema(data_schema)
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.source == SOURCE_IMPORT:
return await self.async_step_yaml(user_input)
if user_input is not None:
self.hk_options.update(user_input)
return await self.async_step_include_exclude()
self.hk_options = dict(self.config_entry.options)
entity_filter = self.hk_options.get(CONF_FILTER, {})
homekit_mode = self.hk_options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
domains = entity_filter.get(CONF_INCLUDE_DOMAINS, [])
include_entities = entity_filter.get(CONF_INCLUDE_ENTITIES)
if include_entities:
domains.extend(_domains_set_from_entities(include_entities))
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(CONF_HOMEKIT_MODE, default=homekit_mode): vol.In(
HOMEKIT_MODES
),
vol.Required(
CONF_DOMAINS,
default=domains,
): cv.multi_select(SUPPORTED_DOMAINS),
}
),
)
def _async_get_matching_entities(hass, domains=None):
"""Fetch all entities or entities in the given domains."""
return {
state.entity_id: f"{state.attributes.get(ATTR_FRIENDLY_NAME, state.entity_id)} ({state.entity_id})"
for state in sorted(
hass.states.async_all(domains and set(domains)),
key=lambda item: item.entity_id,
)
}
def _domains_set_from_entities(entity_ids):
"""Build a set of domains for the given entity ids."""
return {split_entity_id(entity_id)[0] for entity_id in entity_ids}
@callback
def _async_get_entity_ids_for_accessory_mode(hass, include_domains):
"""Build a list of entities that should be paired in accessory mode."""
accessory_mode_domains = {
domain for domain in include_domains if domain in DOMAINS_NEED_ACCESSORY_MODE
}
if not accessory_mode_domains:
return []
return [
state.entity_id
for state in hass.states.async_all(accessory_mode_domains)
if state_needs_accessory_mode(state)
]
@callback
def _async_entity_ids_with_accessory_mode(hass):
"""Return a set of entity ids that have config entries in accessory mode."""
entity_ids = set()
current_entries = hass.config_entries.async_entries(DOMAIN)
for entry in current_entries:
# We have to handle the case where the data has not yet
# been migrated to options because the data was just
# imported and the entry was never started
target = entry.options if CONF_HOMEKIT_MODE in entry.options else entry.data
if target.get(CONF_HOMEKIT_MODE) != HOMEKIT_MODE_ACCESSORY:
continue
entity_ids.add(target[CONF_FILTER][CONF_INCLUDE_ENTITIES][0])
return entity_ids
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for Pulsar Dataset"""
import sys
import numpy as np
import tensorflow as tf
import tensorflow_io as tfio
import pytest
default_pulsar_timeout = 5000
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_simple_messages():
"""Test consuming simple messages from a Pulsar topic with PulsarIODataset.
NOTE: After the pulsar standalone is setup during the testing phase, 6 messages
(D0, D1, ..., D5) are sent to the `test` topic.
"""
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic="test",
subscription="subscription-0",
timeout=default_pulsar_timeout,
)
assert np.all(
[k.numpy() for (k, _) in dataset] == [("D" + str(i)).encode() for i in range(6)]
)
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_keyed_messages():
"""Test consuming keyed messages from a Pulsar topic with PulsarIODataset
NOTE: After the pulsar standalone is setup during the testing phase, 6 messages
are sent to the `test` topic:
K0:D0, K1:D1, K0:D2, K1:D3, K0:D4, K1:D5.
"""
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic="key-test",
subscription="subscription-0",
timeout=default_pulsar_timeout,
)
kv = dict()
for (msg, key) in dataset:
kv.setdefault(key.numpy().decode(), []).append(msg.numpy())
assert kv["K0"] == [("D" + str(i)).encode() for i in range(0, 6, 2)]
assert kv["K1"] == [("D" + str(i)).encode() for i in range(1, 6, 2)]
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_resubscribe():
"""Test resubscribing the same topic.
If a topic is resubscribed with an existed subscription, the consumer will continue
consuming from the last position.
NOTE: This test must be run after `test_pulsar_simple_messages`.
"""
topic = "test"
writer = tfio.experimental.streaming.PulsarWriter(
service_url="pulsar://localhost:6650", topic=topic
)
# 1. Append new messages to topic
for i in range(6, 10):
writer.write("D" + str(i))
writer.flush()
# 2. Use the same subscription with `test_pulsar_simple_messages` to continue consuming
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic=topic,
subscription="subscription-0",
timeout=default_pulsar_timeout,
)
assert np.all(
[k.numpy() for (k, _) in dataset]
== [("D" + str(i)).encode() for i in range(6, 10)]
)
# 3. Use another subscription to consume messages from beginning
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic=topic,
subscription="subscription-1",
timeout=default_pulsar_timeout,
)
assert np.all(
[k.numpy() for (k, _) in dataset]
== [("D" + str(i)).encode() for i in range(10)]
)
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_invalid_arguments():
"""Test the invalid arguments when a PulsarIODataset is created
The following cases are included:
1. timeout is non-positive
2. poll_timeout is non-positive
3. poll_timeout is larger than timeout
"""
INVALID_TIMEOUT = -123
try:
tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic="test",
subscription="subscription-0",
timeout=INVALID_TIMEOUT,
)
except ValueError as e:
assert str(e) == "Invalid timeout value: {}, must be > 0".format(
INVALID_TIMEOUT
)
VALID_TIMEOUT = default_pulsar_timeout
INVALID_POLL_TIMEOUT = -45
try:
tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic="test",
subscription="subscription-0",
timeout=VALID_TIMEOUT,
poll_timeout=INVALID_POLL_TIMEOUT,
)
except ValueError as e:
assert str(e) == "Invalid poll_timeout value: {}, must be > 0".format(
INVALID_POLL_TIMEOUT
)
LARGE_POLL_TIMEOUT = VALID_TIMEOUT + 1
try:
tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic="test",
subscription="subscription-0",
timeout=VALID_TIMEOUT,
poll_timeout=LARGE_POLL_TIMEOUT,
)
except ValueError as e:
assert str(
e
) == "Invalid poll_timeout value: {}, must be <= timeout({})".format(
LARGE_POLL_TIMEOUT, VALID_TIMEOUT
)
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_write_simple_messages():
"""Test writing simple messages to a Pulsar topic with PulsarWriter"""
topic = "test-write-simple-messages"
writer = tfio.experimental.streaming.PulsarWriter(
service_url="pulsar://localhost:6650", topic=topic
)
# 1. Write 10 messages
for i in range(10):
writer.write("msg-" + str(i))
writer.flush()
# 2. Consume messages and verify
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic=topic,
subscription="subscription-0",
timeout=default_pulsar_timeout,
)
assert np.all(
[k.numpy() for (k, _) in dataset]
== [("msg-" + str(i)).encode() for i in range(10)]
)
@pytest.mark.skipif(
sys.platform in ("win32",),
reason="TODO Pulsar not setup properly on Windows yet",
)
def test_pulsar_write_keyed_messages():
"""Test writing keyed messages to a Pulsar topic with PulsarWriter"""
topic = "test-write-keyed-messages"
writer = tfio.experimental.streaming.PulsarWriter(
service_url="pulsar://localhost:6650", topic=topic
)
# 1. Write 10 keyed messages, the key set is 0,1,2,0,1,2,...
for i in range(10):
value = "msg-" + str(i)
key = str(i % 3)
writer.write(value=value, key=key)
writer.flush()
# 2. Consume messages and verify
dataset = tfio.experimental.streaming.PulsarIODataset(
service_url="pulsar://localhost:6650",
topic=topic,
subscription="subscription-0",
timeout=default_pulsar_timeout,
)
kv = dict()
for (msg, key) in dataset:
kv.setdefault(key.numpy().decode(), []).append(msg.numpy())
assert kv["0"] == [("msg-" + str(i)).encode() for i in range(0, 10, 3)]
assert kv["1"] == [("msg-" + str(i)).encode() for i in range(1, 10, 3)]
assert kv["2"] == [("msg-" + str(i)).encode() for i in range(2, 10, 3)]
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.