gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import json
import configparser
from test_framework.test_particl import (
ParticlTestFramework,
isclose,
getIndexAtProperty,
)
from test_framework.test_framework import SkipTest
from test_framework.util import assert_raises_rpc_error
from test_framework.authproxy import JSONRPCException
class USBDeviceTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000', '-txindex'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.connect_nodes_bi(1, 2)
self.sync_all()
def run_test(self):
# Check that particl has been built with USB device enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_USBDEVICE"):
raise SkipTest("particld has not been built with usb device enabled.")
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
ro = nodes[1].listdevices()
assert(len(ro) == 1)
assert(ro[0]['vendor'] == 'Debug')
assert(ro[0]['product'] == 'Device')
ro = nodes[1].getdeviceinfo()
assert(ro['device'] == 'debug')
ro = nodes[1].getdevicepublickey('0')
assert(ro['address'] == 'praish9BVxVdhykpqBYEs6L65AQ7iKd9z1')
assert(ro['path'] == "m/44'/1'/0'/0")
ro = nodes[1].getdevicepublickey('0/1')
assert(ro['address'] == 'peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR')
assert(ro['path'] == "m/44'/1'/0'/0/1")
ro = nodes[1].getdevicexpub("m/44'/1'/0'", "")
assert(ro == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
message = 'This is just a test message'
sig = nodes[1].devicesignmessage('0/1', message)
assert(True == nodes[1].verifymessage('peWvjy33QptC2Gz3ww7jTTLPjC2QJmifBR', sig, message))
ro = nodes[1].initaccountfromdevice('test_acc')
assert(ro['extkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro['path'] == "m/44'/1'/0'")
ro = nodes[1].extkey('list', 'true')
assert(len(ro) == 1)
assert(ro[0]['path'] == "m/44h/1h/0h")
assert(ro[0]['epkey'] == 'pparszKXPyRegWYwPacdPduNPNEryRbZDCAiSyo8oZYSsbTjc6FLP4TCPEX58kAeCB6YW9cSdR6fsbpeWDBTgjbkYjXCoD9CNoFVefbkg3exzpQE')
assert(ro[0]['label'] == 'test_acc')
assert(ro[0]['hardware_device'] == '0xffff 0x0001')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addr1_0 = nodes[1].getnewaddress('lbl1_0')
ro = nodes[1].filteraddresses()
assert(len(ro) == 1)
assert(ro[0]['path'] == 'm/0/0')
assert(ro[0]['owned'] == 'true')
assert(ro[0]['label'] == 'lbl1_0')
va_addr1_0 = nodes[1].getaddressinfo(addr1_0)
assert(va_addr1_0['ismine'] == True)
assert(va_addr1_0['iswatchonly'] == False)
assert(va_addr1_0['isondevice'] == True)
assert(va_addr1_0['path'] == 'm/0/0')
try:
nodes[1].getnewstealthaddress()
raise AssertionError('Should have failed.')
except JSONRPCException as e:
pass
extaddr1_0 = nodes[1].getnewextaddress()
txnid0 = nodes[0].sendtoaddress(addr1_0, 6)
txnid1 = nodes[0].sendtoaddress(extaddr1_0, 6)
self.stakeBlocks(1)
block_txns = nodes[0].getblock(nodes[0].getblockhash(nodes[0].getblockcount()))['tx']
assert(txnid0 in block_txns)
assert(txnid1 in block_txns)
ro = nodes[1].getwalletinfo()
assert(isclose(ro['balance'], 12.0))
addr0_0 = nodes[0].getnewaddress()
hexRaw = nodes[1].createrawtransaction([], {addr0_0:10})
hexFunded = nodes[1].fundrawtransaction(hexRaw)['hex']
txDecoded = nodes[1].decoderawtransaction(hexFunded)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['complete'] == True)
txnid1 = nodes[1].sendrawtransaction(ro['hex'])
self.sync_all()
self.stakeBlocks(1)
ro = nodes[1].devicesignrawtransactionwithwallet(hexFunded)
assert(ro['errors'][0]['error'] == 'Input not found or already spent')
prevtxns = []
for vin in txDecoded['vin']:
rtx = nodes[1].getrawtransaction(vin['txid'], True)
prev_out = rtx['vout'][vin['vout']]
prevtxns.append({'txid': vin['txid'], 'vout': vin['vout'], 'scriptPubKey': prev_out['scriptPubKey']['hex'], 'amount': prev_out['value']})
ro = nodes[1].devicesignrawtransaction(hexFunded, prevtxns, ['0/0', '2/0'])
assert(ro['complete'] == True)
ro = nodes[1].listunspent()
assert(ro[0]['ondevice'] == True)
txnid2 = nodes[1].sendtoaddress(addr0_0, 0.1)
self.sync_all()
nodes[0].syncwithvalidationinterfacequeue()
assert(nodes[0].filtertransactions()[0]['txid'] == txnid2)
hwsxaddr = nodes[1].devicegetnewstealthaddress()
assert(hwsxaddr == 'tps1qqpdwu7gqjqz9s9wfek843akvkzvw0xq3tkzs93sj4ceq60cp54mvzgpqf4tp6d7h0nza2xe362am697dax24hcr33yxqwvq58l5cf6j6q5hkqqqgykgrc')
hwsxaddr2 = nodes[1].devicegetnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True)
assert(hwsxaddr2 == 'tps1qqpewyspjp93axk82zahx5xfjyprpvypfgnp95n9aynxxw3w0qs63acpq0s5z2rwk0raczg8jszl9qy5stncud76ahr5etn9hqmp30e3e86w2qqypgh9sgv0')
ro = nodes[1].getaddressinfo(hwsxaddr2)
assert(ro['prefix_num_bits'] == 4)
assert(ro['prefix_bitfield'] == '0x000a')
assert(ro['isondevice'] == True)
ro = nodes[1].liststealthaddresses()
assert(len(ro[0]['Stealth Addresses']) == 2)
ro = nodes[1].filteraddresses()
assert(len(ro) == 3)
txnid3 = nodes[0].sendtoaddress(hwsxaddr, 0.1, '', '', False, 'test msg')
self.stakeBlocks(1)
ro = nodes[1].listtransactions()
assert(len(ro) == 5)
assert('test msg' in self.dumpj(ro[4]))
ro = nodes[1].listunspent()
inputs = []
for output in ro:
if output['txid'] == txnid3:
inputs.append({'txid' : txnid3, 'vout' : output['vout']})
break
assert(len(inputs) > 0)
hexRaw = nodes[1].createrawtransaction(inputs, {addr0_0:0.09})
ro = nodes[1].devicesignrawtransactionwithwallet(hexRaw)
assert(ro['complete'] == True)
# import privkey in node2
rootkey = nodes[2].extkeyaltversion('xparFdrwJK7K2nfYzrkEqAKr5EcJNdY4c6ZNoLFFx1pMXQSQpo5MAufjogrS17RkqsLAijZJaBDHhG3G7SuJjtsTmRRTEKZDzGMnVCeX59cQCiR')
ro = nodes[2].extkey('import', rootkey, 'master key', True)
ro = nodes[2].extkey('setmaster', ro['id'])
assert(ro['result'] == 'Success.')
ro = nodes[2].extkey('deriveaccount', 'test account')
ro = nodes[2].extkey('setdefaultaccount', ro['account'])
assert(ro['result'] == 'Success.')
ro = nodes[1].extkey('account')
n = getIndexAtProperty(ro['chains'], 'use_type', 'stealth_spend')
assert(n > -1)
assert(ro['chains'][n]['path'] == "m/0h/444445h")
addrtest = nodes[2].getnewaddress()
ro = nodes[1].getdevicepublickey('0/0')
assert(addrtest == ro['address'])
addrtest = nodes[2].getnewstealthaddress('', '0', '', True, True)
assert(addrtest == hwsxaddr)
addrtest2 = nodes[2].getnewstealthaddress('lbl2 4bits', '4', '0xaaaa', True, True)
assert(addrtest2 == hwsxaddr2)
extaddr2_0 = nodes[2].getnewextaddress()
assert(extaddr1_0 == extaddr2_0)
# Ensure account matches after node restarts
account1 = nodes[1].extkey('account')
self.restart_node(1, extra_args=self.extra_args[1] + ['-wallet=default_wallet',])
account1_r = nodes[1].extkey('account')
assert(json.dumps(account1) == json.dumps(account1_r))
# Test for coverage
assert(nodes[1].promptunlockdevice()['sent'] is True)
assert(nodes[1].unlockdevice('123')['unlocked'] is True)
assert_raises_rpc_error(-8, 'Neither a pin nor a passphraseword was provided.', nodes[1].unlockdevice)
assert('complete' in nodes[1].devicebackup())
assert('complete' in nodes[1].deviceloadmnemonic())
if __name__ == '__main__':
USBDeviceTest().main()
|
|
"""Support for Honeywell Lyric climate platform."""
from __future__ import annotations
import logging
from time import localtime, strftime, time
from aiolyric.objects.device import LyricDevice
from aiolyric.objects.location import LyricLocation
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity, ClimateEntityDescription
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import LyricDeviceEntity
from .const import (
DOMAIN,
LYRIC_EXCEPTIONS,
PRESET_HOLD_UNTIL,
PRESET_NO_HOLD,
PRESET_PERMANENT_HOLD,
PRESET_TEMPORARY_HOLD,
PRESET_VACATION_HOLD,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
LYRIC_HVAC_ACTION_OFF = "EquipmentOff"
LYRIC_HVAC_ACTION_HEAT = "Heat"
LYRIC_HVAC_ACTION_COOL = "Cool"
LYRIC_HVAC_MODE_OFF = "Off"
LYRIC_HVAC_MODE_HEAT = "Heat"
LYRIC_HVAC_MODE_COOL = "Cool"
LYRIC_HVAC_MODE_HEAT_COOL = "Auto"
LYRIC_HVAC_MODES = {
HVAC_MODE_OFF: LYRIC_HVAC_MODE_OFF,
HVAC_MODE_HEAT: LYRIC_HVAC_MODE_HEAT,
HVAC_MODE_COOL: LYRIC_HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL: LYRIC_HVAC_MODE_HEAT_COOL,
}
HVAC_MODES = {
LYRIC_HVAC_MODE_OFF: HVAC_MODE_OFF,
LYRIC_HVAC_MODE_HEAT: HVAC_MODE_HEAT,
LYRIC_HVAC_MODE_COOL: HVAC_MODE_COOL,
LYRIC_HVAC_MODE_HEAT_COOL: HVAC_MODE_HEAT_COOL,
}
HVAC_ACTIONS = {
LYRIC_HVAC_ACTION_OFF: CURRENT_HVAC_OFF,
LYRIC_HVAC_ACTION_HEAT: CURRENT_HVAC_HEAT,
LYRIC_HVAC_ACTION_COOL: CURRENT_HVAC_COOL,
}
SERVICE_HOLD_TIME = "set_hold_time"
ATTR_TIME_PERIOD = "time_period"
SCHEMA_HOLD_TIME = {
vol.Required(ATTR_TIME_PERIOD, default="01:00:00"): vol.All(
cv.time_period,
cv.positive_timedelta,
lambda td: strftime("%H:%M:%S", localtime(time() + td.total_seconds())),
)
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Honeywell Lyric climate platform based on a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities = []
for location in coordinator.data.locations:
for device in location.devices:
entities.append(
LyricClimate(
coordinator,
ClimateEntityDescription(
key=f"{device.macID}_thermostat",
name=device.name,
),
location,
device,
hass.config.units.temperature_unit,
)
)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_HOLD_TIME,
SCHEMA_HOLD_TIME,
"async_set_hold_time",
)
class LyricClimate(LyricDeviceEntity, ClimateEntity):
"""Defines a Honeywell Lyric climate entity."""
coordinator: DataUpdateCoordinator
entity_description: ClimateEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: ClimateEntityDescription,
location: LyricLocation,
device: LyricDevice,
temperature_unit: str,
) -> None:
"""Initialize Honeywell Lyric climate entity."""
self._temperature_unit = temperature_unit
# Setup supported hvac modes
self._hvac_modes = [HVAC_MODE_OFF]
# Add supported lyric thermostat features
if LYRIC_HVAC_MODE_HEAT in device.allowedModes:
self._hvac_modes.append(HVAC_MODE_HEAT)
if LYRIC_HVAC_MODE_COOL in device.allowedModes:
self._hvac_modes.append(HVAC_MODE_COOL)
if (
LYRIC_HVAC_MODE_HEAT in device.allowedModes
and LYRIC_HVAC_MODE_COOL in device.allowedModes
):
self._hvac_modes.append(HVAC_MODE_HEAT_COOL)
super().__init__(
coordinator,
location,
device,
f"{device.macID}_thermostat",
)
self.entity_description = description
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return self._temperature_unit
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.device.indoorTemperature
@property
def hvac_action(self) -> str:
"""Return the current hvac action."""
action = HVAC_ACTIONS.get(self.device.operationStatus.mode, None)
if action == CURRENT_HVAC_OFF and self.hvac_mode != HVAC_MODE_OFF:
action = CURRENT_HVAC_IDLE
return action
@property
def hvac_mode(self) -> str:
"""Return the hvac mode."""
return HVAC_MODES[self.device.changeableValues.mode]
@property
def hvac_modes(self) -> list[str]:
"""List of available hvac modes."""
return self._hvac_modes
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
device = self.device
if not device.hasDualSetpointStatus:
if self.hvac_mode == HVAC_MODE_COOL:
return device.changeableValues.coolSetpoint
return device.changeableValues.heatSetpoint
return None
@property
def target_temperature_low(self) -> float | None:
"""Return the upper bound temperature we try to reach."""
device = self.device
if device.hasDualSetpointStatus:
return device.changeableValues.coolSetpoint
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the upper bound temperature we try to reach."""
device = self.device
if device.hasDualSetpointStatus:
return device.changeableValues.heatSetpoint
return None
@property
def preset_mode(self) -> str | None:
"""Return current preset mode."""
return self.device.changeableValues.thermostatSetpointStatus
@property
def preset_modes(self) -> list[str] | None:
"""Return preset modes."""
return [
PRESET_NO_HOLD,
PRESET_HOLD_UNTIL,
PRESET_PERMANENT_HOLD,
PRESET_TEMPORARY_HOLD,
PRESET_VACATION_HOLD,
]
@property
def min_temp(self) -> float:
"""Identify min_temp in Lyric API or defaults if not available."""
device = self.device
if LYRIC_HVAC_MODE_COOL in device.allowedModes:
return device.minCoolSetpoint
return device.minHeatSetpoint
@property
def max_temp(self) -> float:
"""Identify max_temp in Lyric API or defaults if not available."""
device = self.device
if LYRIC_HVAC_MODE_HEAT in device.allowedModes:
return device.maxHeatSetpoint
return device.maxCoolSetpoint
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
device = self.device
if device.hasDualSetpointStatus:
if target_temp_low is None or target_temp_high is None:
raise HomeAssistantError(
"Could not find target_temp_low and/or target_temp_high in arguments"
)
_LOGGER.debug("Set temperature: %s - %s", target_temp_low, target_temp_high)
try:
await self._update_thermostat(
self.location,
device,
coolSetpoint=target_temp_low,
heatSetpoint=target_temp_high,
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Set temperature: %s", temp)
try:
if self.hvac_mode == HVAC_MODE_COOL:
await self._update_thermostat(
self.location, device, coolSetpoint=temp
)
else:
await self._update_thermostat(
self.location, device, heatSetpoint=temp
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set hvac mode."""
_LOGGER.debug("Set hvac mode: %s", hvac_mode)
try:
await self._update_thermostat(
self.location, self.device, mode=LYRIC_HVAC_MODES[hvac_mode]
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set preset (PermanentHold, HoldUntil, NoHold, VacationHold) mode."""
_LOGGER.debug("Set preset mode: %s", preset_mode)
try:
await self._update_thermostat(
self.location, self.device, thermostatSetpointStatus=preset_mode
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
async def async_set_hold_time(self, time_period: str) -> None:
"""Set the time to hold until."""
_LOGGER.debug("set_hold_time: %s", time_period)
try:
await self._update_thermostat(
self.location,
self.device,
thermostatSetpointStatus=PRESET_HOLD_UNTIL,
nextPeriodTime=time_period,
)
except LYRIC_EXCEPTIONS as exception:
_LOGGER.error(exception)
await self.coordinator.async_refresh()
|
|
import os
#Testing of ping-pong behaviour with more conditions added to make it more reliable
import signal
import sys
import socket, time
import cv
import json
from PIL import Image
from numpy import array
import ImageFilter
# This makes sure the path which python uses to find things when using import
# can find all our code.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Import qt modules (platform independant)
import ardrone.util.qtcompat as qt
QtCore = qt.import_module('QtCore')
QtNetwork = qt.import_module('QtNetwork')
# Import other objects
from ardrone.core.controlloop import ControlLoop
from ardrone.platform import qt as platform
import ardrone.core.videopacket as videopacket
"""A global socket object which can be used to send commands to the GUI program."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
"""A global sequence counter. The GUI uses this to determine if two commands
have been received in the wrong order: the command with the largest (latest)
sequence will always 'win'."""
seq_m = 0
def send_state(state):
"""Send the state dictionary to the drone GUI.
state is a dictionary with (at least) the keys roll, pitch, yaw, gas,
take_off, reset and hover. The first four are floating point values on the
interval [-1,1] which specify the setting of the corresponding attitude
angle/vertical speed. The last three are True or False to indicate if that
virtual 'button' is pressed.
"""
global seq_m, sock
seq_m += 1
HOST, PORT = ('127.0.0.1', 5560)
#print('state is', json.dumps({'seq': seq_m, 'state': state}))
sock.sendto(json.dumps({'seq': seq_m, 'state': state}), (HOST, PORT))
normal_state = {
'roll': 0.0,
'pitch': 0.0,
'yaw': 0.0,
'gas': 0.0,
'take_off': False,
'reset': False,
'hover': True,
}
turn_left_state = {
'roll': 0.0,
'pitch': 0.0,
'yaw': -0.6,
'gas': 0.0,
'take_off': False,
'reset': False,
'hover': False,
}
turn_right_state = {
'roll': 0.0,
'pitch': 0.0,
'yaw': 0.6,
'gas': 0.0,
'take_off': False,
'reset': False,
'hover': False,
}
move_forward_state = {
'roll':0.0,
'pitch': -0.06,
'yaw': 0.0,
'gas': 0.0,
'take_off': False,
'reset': False,
'hover': False,
}
#function to convert angles from gyro output to usable values
def convertAngle(angle):
if angle<0:
return (angle+360000.0)
else:
return angle
class navdataUpdate(object):
def __init__(self,_im_proc):
# Assign image processor pointer
self._im_proc = _im_proc
# Set up a UDP listening socket on port 5561 which calls readData upon socket activity
self.data_socket = QtNetwork.QUdpSocket()
if not self.data_socket.bind(QtNetwork.QHostAddress.Any, 5561):
raise RuntimeError('Error binding to port: %s' % (self.data_socket.errorString()))
self.data_socket.readyRead.connect(self.readNavigation_Data)
def readNavigation_Data(self):
"""Called when there is some interesting data to read on the video socket."""
while self.data_socket.hasPendingDatagrams():
sz = self.data_socket.pendingDatagramSize()
(data, host, port) = self.data_socket.readDatagram(sz)
# Some hack to account for PySide vs. PyQt differences
if qt.USES_PYSIDE:
data = data.data()
# Parse the packet
packet = json.loads(data.decode())
# Find the movement data we are looking for
if 'type' in packet:
if packet['type'] == 'demo':
#pass on yaw angle data to the image processor
#packet['psi'] is the yaw angle value
self._im_proc.yaw_angle = packet['psi']
class imageProcessor(object):
#variable that stores the yaw angle for a given point in time
yaw_angle=0.0
init_angle=0.0
#counts if we are going one way or the other to determine the right
#angle to check
direction=-1
#time a box was found
box_time=0
detected_time=0
def __init__(self):
self._navdata_update=navdataUpdate(self)
def detect_markers (self, frame):
#convert prep
cv.SaveImage("frame.png", frame)
#load greyscale image
img = cv.LoadImageM("frame.png",cv.CV_LOAD_IMAGE_GRAYSCALE)
#load colour image for displaying
im = cv.LoadImageM("frame.png");
#canny edge detector
edges= cv.CreateImage(cv.GetSize(img), 8, 1)
cv.Canny(img,edges, 50, 400.0)
#low-pass filter the image
cv.Smooth(edges, edges, cv.CV_GAUSSIAN,25)
#create space to store the cvseq sequence seq containing the contours
storage = cv.CreateMemStorage(0)
#find countours returns a sequence of countours so we need to go through all of them
#to find rectangles. see http://opencv.willowgarage.com/wiki/PythonInterface
#for details
#find all contours and draw inner ones in green, outter in blues
seq=cv.FindContours(edges, storage,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_SIMPLE,(0, 0))
cv.DrawContours(im, seq, (255,0,0), (0,255,0), 20,1)
#find external contours
seq_ext=cv.FindContours(edges, storage,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE,(0, 0))
found_box = False
box_in_distance = False
while seq:
#do not take into account external countours
if not(list(seq)==list(seq_ext)):
perim= cv.ArcLength(seq) #contour perimeter
area=cv.ContourArea(seq) #contour area
polygon=cv.ApproxPoly(list(seq), storage,cv.CV_POLY_APPROX_DP,perim*0.02,0)
sqr=cv.BoundingRect(polygon,0) #get square approximation for the contour
#check if there are any rectangles in the distance that have appropriate width/height ratio
#and area close enough to that of the approximated rectangle
#this is used to correct drone orientation when moving towards box
if (float(sqr[2]*sqr[3])/(edges.height*edges.width)>0.004)&(abs(sqr[2]-sqr[3])<((sqr[2]+sqr[3])/4))& (area/float(sqr[2]*sqr[3])>0.7):
box_in_distance = True
cv.PolyLine(im,[polygon], True, (0,255,255),2, cv.CV_AA, 0)
self.detect_time=time.clock()
#Only keep rectangles big enough to be of interest,
#that have an appropriate width/height ratio
#and whose area is close enough to that of the approximated rectangle
if (float(sqr[2]*sqr[3])/(edges.height*edges.width)>0.06)&(abs(sqr[2]-sqr[3])<((sqr[2]+sqr[3])/4))& (area/float(sqr[2]*sqr[3])>0.7):
#draw polygon and approximated rectangle
cv.PolyLine(im,[polygon], True, (0,0,255),2, cv.CV_AA, 0)
cv.Rectangle(im,(sqr[0],sqr[1]),(sqr[0]+sqr[2],sqr[1]+sqr[3]),(255,0,255),1,8,0)
#check whether the box is too close and whether it could be green
if ((sqr[2]>100) or (sqr[3]>80)):
print 'warning', sqr[2],sqr[3]
found_box = True
#record the time the box was found
self.box_time=time.clock()
else:
#move on to the next outter contour
seq_ext=seq_ext.h_next()
#h_next: points to sequences on the same level
seq=seq.h_next()
if found_box:
#find whether we are going 'forward' or back depending on whether the
#angle magnitude is more or less that 90 degrees (values given by the drone
#are degrees*1000
self.direction=cmp(convertAngle(self.yaw_angle), 180000.0)
#turn if box found
if self.direction==-1:
send_state(turn_right_state)
if self.direction==1:
send_state(turn_left_state)
self.init_angle=self.yaw_angle
print self.direction
#box_in_distance = False
# provided we have detected a box and it has not rotated more than 180 degrees:
elif ((not self.box_time==0) and abs(-convertAngle(self.init_angle)+convertAngle(self.yaw_angle))<150000.0 and self.direction==-1): #(time.clock()- self.box_time <2) :
send_state(turn_right_state)
print 'turn', abs(convertAngle(self.init_angle)-convertAngle(self.yaw_angle))
print 'right', self.init_angle
elif ((not self.box_time==0) and abs(convertAngle(self.init_angle)-convertAngle(self.yaw_angle))<150000.0 and self.direction==1): #(time.clock()- self.box_time <2) :
send_state(turn_left_state)
print 'turn', convertAngle(self.init_angle)-convertAngle(self.yaw_angle)
print 'left' ,self.init_angle
else:
#if we are not facing the box in the distance
##
## if box_in_distance and self.box_time<4:
## if sqr[0]< 0.2*edges.width:
## send_state(turn_right_state)
## box_in_distance=False
## print 'rrrrrrr', sqr[0],edges.width
## elif sqr[0]> 0.85*edges.width:
## send_state(turn_left_state)
## print 'lllllll',sqr[0],edges.width
## print self.box_time
## box_in_distance=False
##
## else:
#reset the timer
self.box_time=0
send_state(move_forward_state)
print ' forwars' , convertAngle(self.init_angle)-convertAngle(self.yaw_angle)
return im
class imageViewer(object):
win_title = "Drone Video Feed"
def __init__(self):
# Create a QtCoreApplication loop (NB remember to use QApplication instead if wanting GUI features)
self.app = QtCore.QCoreApplication(sys.argv)
# Wire up Ctrl-C to call QApplication.quit()
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# Initialise the drone control loop and attempt to open a connection.
connection = platform.Connection()
#self._control = ControlLoop(connection, video_cb=None, navdata_cb=None)
# Create a window in which to place frames
cv.NamedWindow(self.win_title, cv.CV_WINDOW_AUTOSIZE) #probably no need to autosize
# Set up a UDP listening socket on port 5562 which calls readData upon socket activity
self.socket = QtNetwork.QUdpSocket()
if not self.socket.bind(QtNetwork.QHostAddress.Any, 5562):
raise RuntimeError('Error binding to port: %s' % (self.socket.errorString()))
self.socket.readyRead.connect(self.readData)
# Create decoder object
self._vid_decoder = videopacket.Decoder(self.showImage)
# Create imageProcessor object
self._img_processor = imageProcessor()
# Start video on drone
#self._control.start_video()
def run(self):
self.app.exec_()
def readData(self):
"""Called when there is some interesting data to read on the video socket."""
while self.socket.hasPendingDatagrams():
sz = self.socket.pendingDatagramSize()
(data, host, port) = self.socket.readDatagram(sz)
# Some hack to account for PySide vs. PyQt differences
if qt.USES_PYSIDE:
data = data.data()
# Decode video data and pass result to showImage
self._vid_decoder.decode(data)
def showImage(self, data):
"""
Displays argument image in window using openCV.
data argument must be a string containing a 16 bit unsigned RGB image (RGB16 == RGB565).
"""
# Create OpenCV header and read in drone video data as RGB565
iplimage = cv.CreateImageHeader((320,240), cv.IPL_DEPTH_8U, 2)
cv.SetData(iplimage, data)
# Convert image to RGB888 which is more OpenCV friendly
RGBimage = cv.CreateImage((320,240), cv.IPL_DEPTH_8U, 3)
cv.CvtColor(iplimage, RGBimage, cv.CV_BGR5652BGR)
# Add labels for any markers present
RGBimage = self._img_processor.detect_markers(RGBimage)
# Show image
cv.ShowImage(self.win_title, RGBimage)
if (__name__ == '__main__'):
image_app = imageViewer()
image_app.run()
#if we haven't seen another box 3 seconds after we saw the last one then we most probably missed it
## #so look for it
## elif not box_in_distance and time.clock()- self.box_time >3 and time.clock()- self.box_time <6:
## send_state(turn_right_state)
## print ' can i see a box? ', box_in_distance,' how long since i dodged?', self.box_time
##
## elif not box_in_distance and time.clock()- self.box_time >6 and time.clock()- self.box_time <9:
## send_state(turn_left_state)
## print ' can i see a box? ', box_in_distance,' how long since i dodged?', self.box_time
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import calendar
import numpy as np
import pytz
import zipline.finance.risk as risk
from zipline.utils import factory
from zipline.finance.trading import SimulationParameters
from . answer_key import AnswerKey
ANSWER_KEY = AnswerKey()
RETURNS = ANSWER_KEY.RETURNS
class TestRisk(unittest.TestCase):
def setUp(self):
start_date = datetime.datetime(
year=2006,
month=1,
day=1,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006, month=12, day=31, tzinfo=pytz.utc)
self.sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date
)
self.algo_returns_06 = factory.create_returns_from_list(
RETURNS,
self.sim_params
)
self.metrics_06 = risk.RiskReport(
self.algo_returns_06,
self.sim_params
)
start_08 = datetime.datetime(
year=2008,
month=1,
day=1,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_08 = datetime.datetime(
year=2008,
month=12,
day=31,
tzinfo=pytz.utc
)
self.sim_params08 = SimulationParameters(
period_start=start_08,
period_end=end_08
)
def tearDown(self):
return
def test_factory(self):
returns = [0.1] * 100
r_objects = factory.create_returns_from_list(returns, self.sim_params)
self.assertTrue(r_objects.index[-1] <=
datetime.datetime(
year=2006, month=12, day=31, tzinfo=pytz.utc))
def test_drawdown(self):
returns = factory.create_returns_from_list(
[1.0, -0.5, 0.8, .17, 1.0, -0.1, -0.45], self.sim_params)
# 200, 100, 180, 210.6, 421.2, 379.8, 208.494
metrics = risk.RiskMetricsPeriod(returns.index[0],
returns.index[-1],
returns)
self.assertEqual(metrics.max_drawdown, 0.505)
def test_benchmark_returns_06(self):
returns = factory.create_returns_from_range(self.sim_params)
metrics = risk.RiskReport(returns, self.sim_params)
np.testing.assert_almost_equal(
[x.benchmark_period_returns
for x in metrics.month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['Monthly'])
np.testing.assert_almost_equal(
[x.benchmark_period_returns
for x in metrics.three_month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['3-Month'])
np.testing.assert_almost_equal(
[x.benchmark_period_returns
for x in metrics.six_month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['6-month'])
np.testing.assert_almost_equal(
[x.benchmark_period_returns
for x in metrics.year_periods],
ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['year'])
def test_trading_days_06(self):
returns = factory.create_returns_from_range(self.sim_params)
metrics = risk.RiskReport(returns, self.sim_params)
self.assertEqual([x.num_trading_days for x in metrics.year_periods],
[251])
self.assertEqual([x.num_trading_days for x in metrics.month_periods],
[20, 19, 23, 19, 22, 22, 20, 23, 20, 22, 21, 20])
def test_benchmark_volatility_06(self):
returns = factory.create_returns_from_range(self.sim_params)
metrics = risk.RiskReport(returns, self.sim_params)
np.testing.assert_almost_equal(
[x.benchmark_volatility
for x in metrics.month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['Monthly'])
np.testing.assert_almost_equal(
[x.benchmark_volatility
for x in metrics.three_month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['3-Month'])
np.testing.assert_almost_equal(
[x.benchmark_volatility
for x in metrics.six_month_periods],
ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['6-month'])
np.testing.assert_almost_equal(
[x.benchmark_volatility
for x in metrics.year_periods],
ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['year'])
def test_algorithm_returns_06(self):
np.testing.assert_almost_equal(
[x.algorithm_period_returns
for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['Monthly'])
np.testing.assert_almost_equal(
[x.algorithm_period_returns
for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['3-Month'])
np.testing.assert_almost_equal(
[x.algorithm_period_returns
for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['6-month'])
np.testing.assert_almost_equal(
[x.algorithm_period_returns
for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['year'])
def test_algorithm_volatility_06(self):
np.testing.assert_almost_equal(
[x.algorithm_volatility
for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['Monthly'])
np.testing.assert_almost_equal(
[x.algorithm_volatility
for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['3-Month'])
np.testing.assert_almost_equal(
[x.algorithm_volatility
for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['6-month'])
np.testing.assert_almost_equal(
[x.algorithm_volatility
for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['year'])
def test_algorithm_sharpe_06(self):
np.testing.assert_almost_equal(
[x.sharpe for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['Monthly'])
np.testing.assert_almost_equal(
[x.sharpe for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['3-Month'])
np.testing.assert_almost_equal(
[x.sharpe for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['6-month'])
np.testing.assert_almost_equal(
[x.sharpe for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['year'])
def test_algorithm_sortino_06(self):
self.assertEqual([round(x.sortino, 3)
for x in self.metrics_06.month_periods],
[4.491,
-2.842,
-2.052,
3.898,
7.023,
-8.532,
3.079,
-0.354,
-1.125,
3.009,
3.277,
-3.122])
self.assertEqual([round(x.sortino, 3)
for x in self.metrics_06.three_month_periods],
[-0.769,
-1.043,
6.677,
-2.77,
-3.209,
-6.769,
1.253,
1.085,
3.659,
1.674])
self.assertEqual([round(x.sortino, 3)
for x in self.metrics_06.six_month_periods],
[-2.728,
-3.258,
-1.84,
-1.366,
-1.845,
-3.415,
2.238])
self.assertEqual([round(x.sortino, 3)
for x in self.metrics_06.year_periods],
[-0.524])
def test_algorithm_information_06(self):
self.assertEqual([round(x.information, 3)
for x in self.metrics_06.month_periods],
[0.131,
-0.11,
-0.067,
0.136,
0.301,
-0.387,
0.107,
-0.032,
-0.058,
0.069,
0.095,
-0.123])
self.assertEqual([round(x.information, 3)
for x in self.metrics_06.three_month_periods],
[-0.013,
-0.009,
0.111,
-0.014,
-0.017,
-0.108,
0.011,
-0.004,
0.032,
0.011])
self.assertEqual([round(x.information, 3)
for x in self.metrics_06.six_month_periods],
[-0.013,
-0.014,
-0.003,
-0.002,
-0.011,
-0.041,
0.011])
self.assertEqual([round(x.information, 3)
for x in self.metrics_06.year_periods],
[-0.001])
def test_algorithm_beta_06(self):
np.testing.assert_almost_equal(
[x.beta for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BETA['Monthly'])
np.testing.assert_almost_equal(
[x.beta for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BETA['3-Month'])
np.testing.assert_almost_equal(
[x.beta for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BETA['6-month'])
np.testing.assert_almost_equal(
[x.beta for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BETA['year'])
def test_algorithm_alpha_06(self):
np.testing.assert_almost_equal(
[x.alpha for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['Monthly'])
np.testing.assert_almost_equal(
[x.alpha for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['3-Month'])
np.testing.assert_almost_equal(
[x.alpha for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['6-month'])
np.testing.assert_almost_equal(
[x.alpha for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['year'])
# FIXME: Covariance is not matching excel precisely enough to run the test.
# Month 4 seems to be the problem. Variance is disabled
# just to avoid distraction - it is much closer than covariance
# and can probably pass with 6 significant digits instead of 7.
# re-enable variance, alpha, and beta tests once this is resolved
def test_algorithm_covariance_06(self):
np.testing.assert_almost_equal(
[x.algorithm_covariance for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['Monthly'])
np.testing.assert_almost_equal(
[x.algorithm_covariance
for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['3-Month'])
np.testing.assert_almost_equal(
[x.algorithm_covariance
for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['6-month'])
np.testing.assert_almost_equal(
[x.algorithm_covariance
for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['year'])
def test_benchmark_variance_06(self):
np.testing.assert_almost_equal(
[x.benchmark_variance
for x in self.metrics_06.month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['Monthly'])
np.testing.assert_almost_equal(
[x.benchmark_variance
for x in self.metrics_06.three_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['3-Month'])
np.testing.assert_almost_equal(
[x.benchmark_variance
for x in self.metrics_06.six_month_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['6-month'])
np.testing.assert_almost_equal(
[x.benchmark_variance
for x in self.metrics_06.year_periods],
ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['year'])
def test_benchmark_returns_08(self):
returns = factory.create_returns_from_range(self.sim_params08)
metrics = risk.RiskReport(returns, self.sim_params08)
self.assertEqual([round(x.benchmark_period_returns, 3)
for x in metrics.month_periods],
[-0.061,
-0.035,
-0.006,
0.048,
0.011,
-0.086,
-0.01,
0.012,
-0.091,
-0.169,
-0.075,
0.008])
self.assertEqual([round(x.benchmark_period_returns, 3)
for x in metrics.three_month_periods],
[-0.099,
0.005,
0.052,
-0.032,
-0.085,
-0.084,
-0.089,
-0.236,
-0.301,
-0.226])
self.assertEqual([round(x.benchmark_period_returns, 3)
for x in metrics.six_month_periods],
[-0.128,
-0.081,
-0.036,
-0.118,
-0.301,
-0.36,
-0.294])
self.assertEqual([round(x.benchmark_period_returns, 3)
for x in metrics.year_periods],
[-0.385])
def test_trading_days_08(self):
returns = factory.create_returns_from_range(self.sim_params08)
metrics = risk.RiskReport(returns, self.sim_params08)
self.assertEqual([x.num_trading_days for x in metrics.year_periods],
[253])
self.assertEqual([x.num_trading_days for x in metrics.month_periods],
[21, 20, 20, 22, 21, 21, 22, 21, 21, 23, 19, 22])
def test_benchmark_volatility_08(self):
returns = factory.create_returns_from_range(self.sim_params08)
metrics = risk.RiskReport(returns, self.sim_params08)
self.assertEqual([round(x.benchmark_volatility, 3)
for x in metrics.month_periods],
[0.07,
0.058,
0.082,
0.054,
0.041,
0.057,
0.068,
0.06,
0.157,
0.244,
0.195,
0.145])
self.assertEqual([round(x.benchmark_volatility, 3)
for x in metrics.three_month_periods],
[0.12,
0.113,
0.105,
0.09,
0.098,
0.107,
0.179,
0.293,
0.344,
0.34])
self.assertEqual([round(x.benchmark_volatility, 3)
for x in metrics.six_month_periods],
[0.15,
0.149,
0.15,
0.2,
0.308,
0.36,
0.383])
# TODO: ugly, but I can't get the rounded float to match.
# maybe we need a different test that checks the
# difference between the numbers
self.assertEqual([round(x.benchmark_volatility, 3)
for x in metrics.year_periods],
[0.411])
def test_treasury_returns_06(self):
returns = factory.create_returns_from_range(self.sim_params)
metrics = risk.RiskReport(returns, self.sim_params)
self.assertEqual([round(x.treasury_period_return, 4)
for x in metrics.month_periods],
[0.0037,
0.0034,
0.0039,
0.0038,
0.0040,
0.0037,
0.0043,
0.0043,
0.0038,
0.0044,
0.0043,
0.004])
self.assertEqual([round(x.treasury_period_return, 4)
for x in metrics.three_month_periods],
[0.0114,
0.0116,
0.0122,
0.0125,
0.0129,
0.0127,
0.0123,
0.0128,
0.0125,
0.0127])
self.assertEqual([round(x.treasury_period_return, 4)
for x in metrics.six_month_periods],
[0.0260,
0.0257,
0.0258,
0.0252,
0.0259,
0.0256,
0.0257])
self.assertEqual([round(x.treasury_period_return, 4)
for x in metrics.year_periods],
[0.0500])
def test_benchmarkrange(self):
self.check_year_range(
datetime.datetime(
year=2008, month=1, day=1, tzinfo=pytz.utc),
2)
def test_partial_month(self):
start = datetime.datetime(
year=1991,
month=1,
day=1,
hour=0,
minute=0,
tzinfo=pytz.utc)
# 1992 and 1996 were leap years
total_days = 365 * 5 + 2
end = start + datetime.timedelta(days=total_days)
sim_params90s = SimulationParameters(
period_start=start,
period_end=end
)
returns = factory.create_returns_from_range(sim_params90s)
returns = returns[:-10] # truncate the returns series to end mid-month
metrics = risk.RiskReport(returns, sim_params90s)
total_months = 60
self.check_metrics(metrics, total_months, start)
def check_year_range(self, start_date, years):
sim_params = SimulationParameters(
period_start=start_date,
period_end=start_date.replace(year=(start_date.year + years))
)
returns = factory.create_returns_from_range(sim_params)
metrics = risk.RiskReport(returns, self.sim_params)
total_months = years * 12
self.check_metrics(metrics, total_months, start_date)
def check_metrics(self, metrics, total_months, start_date):
"""
confirm that the right number of riskmetrics were calculated for each
window length.
"""
self.assert_range_length(
metrics.month_periods,
total_months,
1,
start_date
)
self.assert_range_length(
metrics.three_month_periods,
total_months,
3,
start_date
)
self.assert_range_length(
metrics.six_month_periods,
total_months,
6,
start_date
)
self.assert_range_length(
metrics.year_periods,
total_months,
12,
start_date
)
def assert_last_day(self, period_end):
# 30 days has september, april, june and november
if period_end.month in [9, 4, 6, 11]:
self.assertEqual(period_end.day, 30)
# all the rest have 31, except for february
elif(period_end.month != 2):
self.assertEqual(period_end.day, 31)
else:
if calendar.isleap(period_end.year):
self.assertEqual(period_end.day, 29)
else:
self.assertEqual(period_end.day, 28)
def assert_month(self, start_month, actual_end_month):
if start_month == 1:
expected_end_month = 12
else:
expected_end_month = start_month - 1
self.assertEqual(expected_end_month, actual_end_month)
def assert_range_length(self, col, total_months,
period_length, start_date):
if(period_length > total_months):
self.assertEqual(len(col), 0)
else:
self.assertEqual(
len(col),
total_months - (period_length - 1),
"mismatch for total months - \
expected:{total_months}/actual:{actual}, \
period:{period_length}, start:{start_date}, \
calculated end:{end}".format(total_months=total_months,
period_length=period_length,
start_date=start_date,
end=col[-1].end_date,
actual=len(col))
)
self.assert_month(start_date.month, col[-1].end_date.month)
self.assert_last_day(col[-1].end_date)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for model_search.generators.
search_candidate_generator.
"""
import collections
import os
from absl import flags
from model_search import hparam as hp
from model_search.architecture import architecture_utils
from model_search.generators import search_candidate_generator
from model_search.generators import trial_utils
from model_search.metadata import ml_metadata_db
from model_search.metadata import trial as trial_module
from model_search.proto import distillation_spec_pb2
from model_search.proto import ensembling_spec_pb2
from model_search.proto import phoenix_spec_pb2
from model_search.proto import transfer_learning_spec_pb2
import numpy as np
import tensorflow.compat.v2 as tf
def _create_trials(root_dir):
trials = [{
'model_dir': os.path.join(root_dir, str(1)),
'id': 1,
'status': 'COMPLETED',
'trial_infeasible': False,
'final_measurement': {
'objective_value': 0.97
},
}, {
'model_dir': os.path.join(root_dir, str(2)),
'id': 2,
'status': 'COMPLETED',
'trial_infeasible': False,
'final_measurement': {
'objective_value': 0.94
},
}]
return [trial_module.Trial(t) for t in trials]
_FIRST_GRAPH_NODE_SUBSET = [
u'zeros/shape_as_tensor',
u'zeros/Const',
u'zeros',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/kernel',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/bias',
u'Phoenix/search_generator_0/last_dense_4/dense/kernel',
u'Phoenix/search_generator_0/last_dense_4/dense/bias',
u'Phoenix/search_generator_0/last_dense_4/logits',
u'architectures/search_generator_0/Initializer/Const',
u'params/search_generator_0/dropout_rate',
u'params/search_generator_0/is_frozen',
u'number_of_towers/search_generator',
]
_SUGGESTIONS_GRAPH_NODE_SUBSET = [
u'zeros/shape_as_tensor',
u'zeros/Const',
u'zeros',
u'Phoenix/search_generator_0/1_DILATED_CONVOLUTION_4_20/conv2d/kernel',
u'Phoenix/search_generator_0/1_DILATED_CONVOLUTION_4_20/conv2d/bias',
u'Phoenix/search_generator_0/last_dense_20/dense/kernel',
u'Phoenix/search_generator_0/last_dense_20/dense/bias',
u'Phoenix/search_generator_0/last_dense_20/logits',
u'architectures/search_generator_0/Initializer/Const',
u'params/search_generator_0/dropout_rate',
u'params/search_generator_0/is_frozen',
u'number_of_towers/search_generator',
]
_DROPOUT_GRAPH_NODE = [
u'zeros/shape_as_tensor',
u'zeros/Const',
u'zeros',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/kernel',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/bias',
u'Phoenix/search_generator_0/last_dense_4/dense/kernel',
u'Phoenix/search_generator_0/last_dense_4/dense/bias',
u'Phoenix/search_generator_0/last_dense_4/logits',
u'architectures/search_generator_0/Initializer/Const',
u'params/search_generator_0/dropout_rate',
u'params/search_generator_0/is_frozen',
u'number_of_towers/search_generator',
]
_DISTILLATION_GRAPH_NODE_SUBSET = [
u'zeros/shape_as_tensor',
u'zeros/Const',
u'zeros',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/kernel/Initializer/random_uniform',
u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/conv2d/kernel',
# Removing until we enable batch norm
# u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/batch_normalization/Const',
# u'Phoenix/search_generator_0/1_CONVOLUTION_3X3_4/batch_normalization/beta',
u'Phoenix/search_generator_0/last_dense_4/dense/kernel/Initializer/random_uniform',
u'Phoenix/search_generator_0/last_dense_4/dense/kernel',
u'Phoenix/search_generator_0/last_dense_4/dense/bias',
u'number_of_towers/search_generator',
]
class SearchCandidateGeneratorTest(tf.test.TestCase):
def _create_checkpoint(self, towers, trial_id):
with self.test_session(graph=tf.Graph()) as sess:
architecture = np.array([4])
input_tensor = tf.zeros([100, 32, 32, 3])
phoenix_spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.CNN)
dirname = os.path.join(flags.FLAGS.test_tmpdir, str(trial_id))
if dirname and not tf.io.gfile.exists(dirname):
tf.io.gfile.makedirs(dirname)
for tower in towers:
_ = architecture_utils.construct_tower(
phoenix_spec=phoenix_spec,
input_tensor=input_tensor,
tower_name=str(tower) + '_0',
architecture=architecture,
is_training=True,
lengths=None,
logits_dimension=10,
hparams=hp.HParams(),
model_directory=dirname,
is_frozen=False,
dropout_rate=None)
architecture_utils.set_number_of_towers(tower, 1)
directory = flags.FLAGS.test_tmpdir
saver = tf.compat.v1.train.Saver()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.local_variables_initializer())
saver.save(sess, os.path.join(directory, str(trial_id)) + '/ckpt')
def test_generator(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
spec.search_type = phoenix_spec_pb2.PhoenixSpec.NONADAPTIVE_RANDOM_SEARCH
spec.is_input_shared = True
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
input_tensor = tf.zeros([20, 32, 32, 3])
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/1', is_chief=True)
towers = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
logits_dimension=10,
hparams=hp.HParams(initial_architecture=['CONVOLUTION_3X3']),
run_config=run_config,
is_training=True,
trials=[])
for t in towers:
t(input_tensor, training=True)
all_nodes = [
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
]
self.assertAllInSet(_FIRST_GRAPH_NODE_SUBSET, all_nodes)
def test_generator_with_suggestions(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
spec.search_type = phoenix_spec_pb2.PhoenixSpec.NONADAPTIVE_RANDOM_SEARCH
spec.ensemble_spec.ensemble_search_type = (
ensembling_spec_pb2.EnsemblingSpec.NONADAPTIVE_ENSEMBLE_SEARCH)
spec.is_input_shared = True
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
input_tensor = tf.zeros([20, 32, 32, 3])
suggestion = spec.user_suggestions.add()
suggestion.architecture[:] = ['DILATED_CONVOLUTION_4']
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/1', is_chief=True)
towers = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.ENSEMBLE_SEARCH,
logits_dimension=10,
hparams=hp.HParams(initial_architecture=['CONVOLUTION_3X3']),
run_config=run_config,
is_training=True,
trials=[])
for t in towers:
t(input_tensor, training=True)
all_nodes = [
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
]
self.assertAllInSet(_SUGGESTIONS_GRAPH_NODE_SUBSET, all_nodes)
def test_generator_with_dropouts(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
spec.search_type = phoenix_spec_pb2.PhoenixSpec.NONADAPTIVE_RANDOM_SEARCH
spec.is_input_shared = True
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
input_tensor = tf.zeros([20, 32, 32, 3])
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/1', is_chief=True)
towers = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
logits_dimension=10,
hparams=hp.HParams(
initial_architecture=['CONVOLUTION_3X3'], dropout_rate=0.3),
run_config=run_config,
is_training=True,
trials=[])
for t in towers:
t(input_tensor, training=True)
all_nodes = [
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
]
tf.compat.v1.logging.info(all_nodes)
self.assertAllInSet(_DROPOUT_GRAPH_NODE, all_nodes)
def test_generator_with_snapshot(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.CNN)
spec.search_type = phoenix_spec_pb2.PhoenixSpec.ADAPTIVE_COORDINATE_DESCENT
spec.transfer_learning_spec.transfer_learning_type = (
transfer_learning_spec_pb2.TransferLearningSpec
.SNAPSHOT_TRANSFER_LEARNING)
spec.is_input_shared = True
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
input_tensor = tf.zeros([20, 32, 32, 3])
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
tf.io.gfile.makedirs(flags.FLAGS.test_tmpdir + '/3')
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/3', is_chief=True)
self._create_checkpoint(['search_generator'], 2)
towers = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.ENSEMBLE_SEARCH,
logits_dimension=10,
hparams=hp.HParams(
initial_architecture=['CONVOLUTION_3X3'],
dropout_rate=0.3,
new_block_type='CONVOLUTION_3X3'),
run_config=run_config,
is_training=True,
trials=_create_trials(flags.FLAGS.test_tmpdir))
for t in towers:
t(input_tensor, training=True)
all_nodes = [
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
]
self.assertAllInSet(_DROPOUT_GRAPH_NODE, all_nodes)
def test_generator_with_distillation(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
spec.search_type = phoenix_spec_pb2.PhoenixSpec.NONADAPTIVE_RANDOM_SEARCH
spec.distillation_spec.distillation_type = (
distillation_spec_pb2.DistillationSpec.DistillationType.MSE_LOGITS)
spec.is_input_shared = True
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
input_tensor = tf.zeros([20, 32, 32, 3])
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/1', is_chief=True)
towers = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.DISTILLATION,
logits_dimension=10,
hparams=hp.HParams(initial_architecture=['CONVOLUTION_3X3']),
run_config=run_config,
is_training=True,
trials=[])
for t in towers:
t(input_tensor, training=True)
all_nodes = [
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
]
self.assertAllInSet(_DISTILLATION_GRAPH_NODE_SUBSET, all_nodes)
def test_generator_with_distillation_and_intermixed(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.CNN)
spec.is_input_shared = True
spec.search_type = phoenix_spec_pb2.PhoenixSpec.NONADAPTIVE_RANDOM_SEARCH
spec.ensemble_spec.ensemble_search_type = (
ensembling_spec_pb2.EnsemblingSpec
.INTERMIXED_NONADAPTIVE_ENSEMBLE_SEARCH)
spec.ensemble_spec.intermixed_search.width = 2
spec.ensemble_spec.intermixed_search.try_ensembling_every = 4
spec.ensemble_spec.intermixed_search.num_trials_to_consider = 3
spec.distillation_spec.distillation_type = (
distillation_spec_pb2.DistillationSpec.DistillationType.MSE_LOGITS)
generator = search_candidate_generator.SearchCandidateGenerator(
phoenix_spec=spec,
metadata=ml_metadata_db.MLMetaData(
phoenix_spec=spec, study_name='', study_owner=''))
fake_config = collections.namedtuple('RunConfig',
['model_dir', 'is_chief'])
run_config = fake_config(
model_dir=flags.FLAGS.test_tmpdir + '/10000', is_chief=True)
tf.io.gfile.makedirs(run_config.model_dir)
self._create_checkpoint(['search_generator'], 2)
self._create_checkpoint(['search_generator'], 3)
self._create_checkpoint(['search_generator'], 5)
_ = generator.generate(
input_layer_fn=lambda: None,
trial_mode=trial_utils.TrialMode.DISTILLATION,
logits_dimension=10,
hparams=hp.HParams(initial_architecture=['CONVOLUTION_3X3']),
run_config=run_config,
is_training=True,
trials=trial_utils.create_test_trials_intermixed(
flags.FLAGS.test_tmpdir))
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
|
import re
try:
from django.utils.unittest import TestCase
except ImportError:
from django.test import TestCase
from .ansi import ANSIString
from evennia import utils
class ANSIStringTestCase(TestCase):
def checker(self, ansi, raw, clean):
"""
Verifies the raw and clean strings of an ANSIString match expected
output.
"""
self.assertEqual(unicode(ansi.clean()), clean)
self.assertEqual(unicode(ansi.raw()), raw)
def table_check(self, ansi, char, code):
"""
Verifies the indexes in an ANSIString match what they should.
"""
self.assertEqual(ansi._char_indexes, char)
self.assertEqual(ansi._code_indexes, code)
def test_instance(self):
"""
Make sure the ANSIString is always constructed correctly.
"""
clean = u'This isA{r testTest'
encoded = u'\x1b[1m\x1b[32mThis is\x1b[1m\x1b[31mA{r test\x1b[0mTest\x1b[0m'
target = ANSIString(r'{gThis is{rA{{r test{nTest{n')
char_table = [9, 10, 11, 12, 13, 14, 15, 25, 26, 27, 28, 29, 30, 31,
32, 37, 38, 39, 40]
code_table = [0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 17, 18, 19, 20, 21, 22,
23, 24, 33, 34, 35, 36, 41, 42, 43, 44]
self.checker(target, encoded, clean)
self.table_check(target, char_table, code_table)
self.checker(ANSIString(target), encoded, clean)
self.table_check(ANSIString(target), char_table, code_table)
self.checker(ANSIString(encoded, decoded=True), encoded, clean)
self.table_check(ANSIString(encoded, decoded=True), char_table,
code_table)
self.checker(ANSIString('Test'), u'Test', u'Test')
self.table_check(ANSIString('Test'), [0, 1, 2, 3], [])
self.checker(ANSIString(''), u'', u'')
def test_slice(self):
"""
Verifies that slicing an ANSIString results in expected color code
distribution.
"""
target = ANSIString(r'{gTest{rTest{n')
result = target[:3]
self.checker(result, u'\x1b[1m\x1b[32mTes', u'Tes')
result = target[:4]
self.checker(result, u'\x1b[1m\x1b[32mTest\x1b[1m\x1b[31m', u'Test')
result = target[:]
self.checker(
result,
u'\x1b[1m\x1b[32mTest\x1b[1m\x1b[31mTest\x1b[0m',
u'TestTest')
result = target[:-1]
self.checker(
result,
u'\x1b[1m\x1b[32mTest\x1b[1m\x1b[31mTes',
u'TestTes')
result = target[0:0]
self.checker(
result,
u'',
u'')
def test_split(self):
"""
Verifies that re.split and .split behave similarly and that color
codes end up where they should.
"""
target = ANSIString("{gThis is {nA split string{g")
first = (u'\x1b[1m\x1b[32mThis is \x1b[0m', u'This is ')
second = (u'\x1b[1m\x1b[32m\x1b[0m split string\x1b[1m\x1b[32m',
u' split string')
re_split = re.split('A', target)
normal_split = target.split('A')
self.assertEqual(re_split, normal_split)
self.assertEqual(len(normal_split), 2)
self.checker(normal_split[0], *first)
self.checker(normal_split[1], *second)
def test_join(self):
"""
Verify that joining a set of ANSIStrings works.
"""
# This isn't the desired behavior, but the expected one. Python
# concatinates the in-memory representation with the built-in string's
# join.
l = [ANSIString("{gTest{r") for s in range(0, 3)]
# Force the generator to be evaluated.
result = "".join(l)
self.assertEqual(unicode(result), u'TestTestTest')
result = ANSIString("").join(l)
self.checker(result, u'\x1b[1m\x1b[32mTest\x1b[1m\x1b[31m\x1b[1m\x1b'
u'[32mTest\x1b[1m\x1b[31m\x1b[1m\x1b[32mTest'
u'\x1b[1m\x1b[31m', u'TestTestTest')
def test_len(self):
"""
Make sure that length reporting on ANSIStrings does not include
ANSI codes.
"""
self.assertEqual(len(ANSIString('{gTest{n')), 4)
def test_capitalize(self):
"""
Make sure that capitalization works. This is the simplest of the
_transform functions.
"""
target = ANSIString('{gtest{n')
result = u'\x1b[1m\x1b[32mTest\x1b[0m'
self.checker(target.capitalize(), result, u'Test')
def test_mxp_agnostic(self):
"""
Make sure MXP tags are not treated like ANSI codes, but normal text.
"""
mxp1 = "{lclook{ltat{le"
mxp2 = "Start to {lclook here{ltclick somewhere here{le first"
self.assertEqual(15, len(ANSIString(mxp1)))
self.assertEqual(53, len(ANSIString(mxp2)))
# These would indicate an issue with the tables.
self.assertEqual(len(ANSIString(mxp1)), len(ANSIString(mxp1).split("\n")[0]))
self.assertEqual(len(ANSIString(mxp2)), len(ANSIString(mxp2).split("\n")[0]))
self.assertEqual(mxp1, ANSIString(mxp1))
self.assertEqual(mxp2, unicode(ANSIString(mxp2)))
def test_add(self):
"""
Verify concatination works correctly.
"""
a = ANSIString("{gTest")
b = ANSIString("{cString{n")
c = a + b
result = u'\x1b[1m\x1b[32mTest\x1b[1m\x1b[36mString\x1b[0m'
self.checker(c, result, u'TestString')
char_table = [9, 10, 11, 12, 22, 23, 24, 25, 26, 27]
code_table = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 28, 29, 30, 31
]
self.table_check(c, char_table, code_table)
class TestIsIter(TestCase):
def test_is_iter(self):
self.assertEqual(True, utils.is_iter([1,2,3,4]))
self.assertEqual(False, utils.is_iter("This is not an iterable"))
class TestCrop(TestCase):
def test_crop(self):
# No text, return no text
self.assertEqual("", utils.crop("", width=10, suffix="[...]"))
# Input length equal to max width, no crop
self.assertEqual("0123456789", utils.crop("0123456789", width=10, suffix="[...]"))
# Input length greater than max width, crop (suffix included in width)
self.assertEqual("0123[...]", utils.crop("0123456789", width=9, suffix="[...]"))
# Input length less than desired width, no crop
self.assertEqual("0123", utils.crop("0123", width=9, suffix="[...]"))
# Width too small or equal to width of suffix
self.assertEqual("012", utils.crop("0123", width=3, suffix="[...]"))
self.assertEqual("01234", utils.crop("0123456", width=5, suffix="[...]"))
class TestDedent(TestCase):
def test_dedent(self):
#print "Did TestDedent run?"
# Empty string, return empty string
self.assertEqual("", utils.dedent(""))
# No leading whitespace
self.assertEqual("TestDedent", utils.dedent("TestDedent"))
# Leading whitespace, single line
self.assertEqual("TestDedent", utils.dedent(" TestDedent"))
# Leading whitespace, multi line
input_string = " hello\n world"
expected_string = "hello\nworld"
self.assertEqual(expected_string, utils.dedent(input_string))
class TestListToString(TestCase):
"""
Default function header from utils.py:
list_to_string(inlist, endsep="and", addquote=False)
Examples:
no endsep:
[1,2,3] -> '1, 2, 3'
with endsep=='and':
[1,2,3] -> '1, 2 and 3'
with addquote and endsep
[1,2,3] -> '"1", "2" and "3"'
"""
def test_list_to_string(self):
self.assertEqual('1, 2, 3', utils.list_to_string([1,2,3], endsep=""))
self.assertEqual('"1", "2", "3"', utils.list_to_string([1,2,3], endsep="", addquote=True))
self.assertEqual('1, 2 and 3', utils.list_to_string([1,2,3]))
self.assertEqual('"1", "2" and "3"', utils.list_to_string([1,2,3], endsep="and", addquote=True))
class TestMLen(TestCase):
"""
Verifies that m_len behaves like len in all situations except those
where MXP may be involved.
"""
def test_non_mxp_string(self):
self.assertEqual(utils.m_len('Test_string'), 11)
def test_mxp_string(self):
self.assertEqual(utils.m_len('{lclook{ltat{le'), 2)
def test_mxp_ansi_string(self):
self.assertEqual(utils.m_len(ANSIString('{lcl{gook{ltat{le{n')), 2)
def test_non_mxp_ansi_string(self):
self.assertEqual(utils.m_len(ANSIString('{gHello{n')), 5)
def test_list(self):
self.assertEqual(utils.m_len([None, None]), 2)
def test_dict(self):
self.assertEqual(utils.m_len({'hello': True, 'Goodbye': False}), 2)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Project.is_draft'
db.delete_column(u'sfpirgapp_project', 'is_draft')
# Adding field 'Project.is_submitted'
db.add_column(u'sfpirgapp_project', 'is_submitted',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'Project.is_draft'
db.add_column(u'sfpirgapp_project', 'is_draft',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Deleting field 'Project.is_submitted'
db.delete_column(u'sfpirgapp_project', 'is_submitted')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.actiongroup': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ActionGroup'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'announcements': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_groups'", 'to': u"orm['sfpirgapp.Category']"}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'featured_image': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'google_plus_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'links': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mailing_list_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meetings': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actiongroups'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.application': {
'Meta': {'object_name': 'Application'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorys'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sfpirgapp.liaison': {
'Meta': {'object_name': 'Liaison'},
'alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'liaisons'", 'to': u"orm['sfpirgapp.Organization']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sfpirgapp.organization': {
'Meta': {'object_name': 'Organization'},
'communities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mailing_city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mandate': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'sources_of_funding': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_mailing_list': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Organization']", 'null': 'True', 'blank': 'True'}),
'photo': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sfpirgapp.project': {
'Meta': {'object_name': 'Project'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'arx_projects'", 'to': u"orm['sfpirgapp.Category']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description_long': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_short': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'larger_goal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'liaison': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Liaison']", 'null': 'True', 'blank': 'True'}),
'logo': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_subject': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectSubject']", 'symmetrical': 'False'}),
'project_subject_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectType']", 'symmetrical': 'False'}),
'project_type_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'researcher_qualities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'results_plan': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'support_method': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time_per_week': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'sfpirgapp.projectsubject': {
'Meta': {'object_name': 'ProjectSubject'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.projecttype': {
'Meta': {'object_name': 'ProjectType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.testimonial': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Testimonial'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('sfpirgapp.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sfpirgapp']
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('couchpotato_list')
class CouchPotatoBase(object):
@staticmethod
def movie_list_request(base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received movie list request')
return '%s://%s:%s%s/api/%s/movie.list?status=active' % (
parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
@staticmethod
def profile_list_request(base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
return '%s://%s:%s%s/api/%s/profile.list' % (
parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
@staticmethod
def movie_add_request(base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received movie add request')
return '%s://%s:%s%s/api/%s/movie.add' % (
parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
@staticmethod
def movie_delete_request(base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received movie delete request')
return '%s://%s:%s%s/api/%s/movie.delete?delete_from=wanted' % (
parsedurl.scheme, parsedurl.netloc, port, parsedurl.path, api_key)
@staticmethod
def build_url(base_url, request_type, port, api_key):
if request_type == 'active':
return CouchPotatoBase.movie_list_request(base_url, port, api_key)
elif request_type == 'profiles':
return CouchPotatoBase.profile_list_request(base_url, port, api_key)
elif request_type == 'add':
return CouchPotatoBase.movie_add_request(base_url, port, api_key)
elif request_type == 'delete':
return CouchPotatoBase.movie_delete_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
@staticmethod
def get_json(url):
try:
return requests.get(url).json()
except RequestException as e:
raise plugin.PluginError('Unable to connect to Couchpotato at %s. Error: %s' % (url, e))
@staticmethod
def quality_requirement_builder(quality_profile):
"""
Converts CP's quality profile to a format that can be converted to FlexGet QualityRequirement
"""
# TODO: Not all values have exact matches in flexget, need to update flexget qualities
sources = {'BR-Disk': 'remux', # Not a perfect match, but as close as currently possible
'brrip': 'bluray',
'dvdr': 'dvdrip', # Not a perfect match, but as close as currently possible
'dvdrip': 'dvdrip',
'scr': 'dvdscr',
'r5': 'r5',
'tc': 'tc',
'ts': 'ts',
'cam': 'cam'}
resolutions = {'1080p': '1080p',
'720p': '720p'}
# Separate strings are needed for each QualityComponent
# TODO list is converted to set because if a quality has 3d type in CP, it gets duplicated during the conversion
# TODO when (and if) 3d is supported in flexget this will be needed to removed
res_string = '|'.join(
set([resolutions[quality] for quality in quality_profile['qualities'] if quality in resolutions]))
source_string = '|'.join(
set([sources[quality] for quality in quality_profile['qualities'] if quality in sources]))
quality_requirement = (res_string + ' ' + source_string).rstrip()
log.debug('quality requirement is %s', quality_requirement)
return quality_requirement
@staticmethod
def list_entries(config, test_mode=None):
log.verbose('Connecting to CouchPotato to retrieve movie list.')
active_movies_url = CouchPotatoBase.build_url(config.get('base_url'), 'active', config.get('port'),
config.get('api_key'))
active_movies_json = CouchPotatoBase.get_json(active_movies_url)
# Gets profile and quality lists if include_data is TRUE
if config.get('include_data'):
log.verbose('Connecting to CouchPotato to retrieve profile data.')
profile_url = CouchPotatoBase.build_url(config.get('base_url'), 'profiles', config.get('port'),
config.get('api_key'))
profile_json = CouchPotatoBase.get_json(profile_url)
entries = []
for movie in active_movies_json['movies']:
# Related to #1444, corrupt data from CP
if not all([movie.get('status'), movie.get('title'), movie.get('info')]):
log.warning('corrupt movie data received, skipping')
continue
quality_req = ''
log.debug('movie data: %s', movie)
if movie['status'] == 'active':
if config.get('include_data') and profile_json:
for profile in profile_json['list']:
if profile['_id'] == movie['profile_id']: # Matches movie profile with profile JSON
quality_req = CouchPotatoBase.quality_requirement_builder(profile)
entry = Entry(title=movie["title"],
url='',
imdb_id=movie['info'].get('imdb'),
tmdb_id=movie['info'].get('tmdb_id'),
quality_req=quality_req,
couchpotato_id=movie.get('_id'))
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s', entry)
continue
# Test mode logging
if entry and test_mode:
log.info("Test mode. Entry includes:")
for key, value in entry.items():
log.info(' %s: %s', key.capitalize(), value)
return entries
@staticmethod
def add_movie(config, entry, test_mode=None):
if not entry.get('imdb_id'):
log.error('Cannot add movie to couchpotato without an imdb ID: %s', entry)
return
log.verbose('Connection to CouchPotato to add a movie to list.')
add_movie_url = CouchPotatoBase.build_url(config.get('base_url'), 'add', config.get('port'),
config.get('api_key'))
title = entry.get('movie_name')
imdb_id = entry.get('imdb_id')
add_movie_url += '?title=%s&identifier=%s' % (title, imdb_id)
add_movie_json = CouchPotatoBase.get_json(add_movie_url)
return add_movie_json['movie']
@staticmethod
def remove_movie(config, movie_id, test_mode=None):
log.verbose('Deleting movie from Couchpotato')
delete_movie_url = CouchPotatoBase.build_url(config.get('base_url'), 'delete', config.get('port'),
config.get('api_key'))
delete_movie_url += '&id=%s' % movie_id
CouchPotatoBase.get_json(delete_movie_url)
class CouchPotatoSet(MutableSet):
supported_ids = ['couchpotato_id', 'imdb_id', 'tmdb_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
@property
def movies(self):
if not self._movies:
self._movies = CouchPotatoBase.list_entries(self.config)
return self._movies
def _find_entry(self, entry):
for cp_entry in self.movies:
for sup_id in self.supported_ids:
if entry.get(sup_id) is not None and entry[sup_id] == cp_entry[sup_id] or entry.get(
'title').lower() == cp_entry.get('title').lower():
return cp_entry
def __init__(self, config):
self.config = config
self._movies = None
def __iter__(self):
return (entry for entry in self.movies)
def __len__(self):
return len(self.movies)
def __contains__(self, entry):
return self._find_entry(entry) is not None
def add(self, entry):
if not self._find_entry(entry):
self._movies = None
movie = CouchPotatoBase.add_movie(self.config, entry)
log.verbose('Successfully added movie %s to CouchPotato', movie['movie']['info']['original_title'])
else:
log.debug('entry %s already exists in couchpotato list', entry)
def discard(self, entry):
for movie in self.movies:
title = entry.get('movie_name') or entry.get('title')
if movie.get('title').lower() == title.lower():
movie_id = movie.get('couchpotato_id')
log.verbose('Trying to remove movie %s from CouchPotato', title)
CouchPotatoBase.remove_movie(self.config, movie_id)
self._movies = None
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
def get(self, entry):
return self._find_entry(entry)
class CouchPotatoList(object):
schema = CouchPotatoSet.schema
@staticmethod
def get_list(config):
return CouchPotatoSet(config)
def on_task_input(self, task, config):
return list(CouchPotatoSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(CouchPotatoList, 'couchpotato_list', api_ver=2, groups=['list'])
|
|
from django.contrib.contenttypes.models import ContentType, ContentTypeManager
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpRequest
from django.test import TestCase, override_settings
from .models import (
ConcreteModel, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl,
ProxyModel,
)
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes', 'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes', 'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes', 'contenttype')
def test_get_for_models_creation(self):
ContentType.objects.all().delete()
with self.assertNumQueries(4):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl, ProxyModel, ConcreteModel)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
})
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl, ProxyModel, ConcreteModel)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct, ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct, ContentType.objects.get_for_model(ConcreteModel, for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel, for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct, ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(
concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel, for_concrete_model=False)
)
self.assertEqual(concrete_model_ct, ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(
proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel, for_concrete_model=False)
)
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel, for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel, for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel, DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(
DeferredConcreteModel, DeferredProxyModel, for_concrete_models=False
)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
def test_cache_not_shared_between_managers(self):
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_model(ContentType)
other_manager = ContentTypeManager()
other_manager.model = ContentType
with self.assertNumQueries(1):
other_manager.get_for_model(ContentType)
with self.assertNumQueries(0):
other_manager.get_for_model(ContentType)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
The shortcut view (used for the admin "view on site" functionality)
returns a complete URL regardless of whether the sites framework is
installed.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual(
"http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1]
)
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/", response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
The shortcut view (used for the admin "view on site" functionality)
returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
with self.assertRaises(Http404):
shortcut(request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
The shortcut view does not catch an AttributeError raised by
the model's get_absolute_url() method (#8997).
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
with self.assertRaises(AttributeError):
shortcut(request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Displaying content types in admin (or anywhere) doesn't break on
leftover content type records in the DB for which no model is defined
anymore.
"""
ct = ContentType.objects.create(
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(str(ct), 'OldModel')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import shutil
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
class _MyEveryN(learn.monitors.EveryN):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(_MyEveryN, self).__init__(
every_n_steps=every_n_steps, first_n_steps=first_n_steps)
self._steps_begun = []
self._steps_ended = []
self._post_steps = []
@property
def steps_begun(self):
return self._steps_begun
@property
def steps_ended(self):
return self._steps_ended
@property
def post_steps(self):
return self._post_steps
def every_n_step_begin(self, step):
super(_MyEveryN, self).every_n_step_begin(step)
self._steps_begun.append(step)
return []
def every_n_step_end(self, step, outputs):
super(_MyEveryN, self).every_n_step_end(step, outputs)
self._steps_ended.append(step)
return False
def every_n_post_step(self, step, session):
super(_MyEveryN, self).every_n_post_step(step, session)
self._post_steps.append(step)
return False
class MonitorsTest(tf.test.TestCase):
"""Monitors tests."""
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = logging.info
def mockLog(*args, **kwargs): # pylint: disable=invalid-name
self.logged_message = args
self._actual_log(*args, **kwargs)
logging.info = mockLog
def tearDown(self):
logging.info = self._actual_log
def _run_monitor(self, monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = tf.get_default_session().run(tensors) if tensors else {}
output = dict(zip(
[t.name if isinstance(t, tf.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_base_monitor(self):
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(learn.monitors.BaseMonitor())
def test_every_0(self):
monitor = _MyEveryN(every_n_steps=0, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(30))
self.assertAllEqual(expected_steps, monitor.steps_begun)
self.assertAllEqual(expected_steps, monitor.steps_ended)
self.assertAllEqual(expected_steps, monitor.post_steps)
def test_every_1(self):
monitor = _MyEveryN(every_n_steps=1, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(1, 30))
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_2(self):
monitor = _MyEveryN(every_n_steps=2, first_n_steps=-1)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = list(range(2, 29, 2)) + [29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_8_no_max_steps(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=False)
begin_end_steps = [0, 1, 2, 10, 18, 26]
post_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(begin_end_steps, monitor.steps_begun)
self.assertEqual(begin_end_steps, monitor.steps_ended)
self.assertEqual(post_steps, monitor.post_steps)
def test_every_8_recovered_after_step_begin(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should call begin again since, end was not called
self.assertEqual([8, 8, 16, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_recovered_after_step_end(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_every_8_call_post_step_at_the_end(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(19)
monitor.step_end(19, output=None)
monitor.post_step(19, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16, 19], monitor.post_steps)
def test_every_8_call_post_step_should_not_be_called_twice(self):
monitor = _MyEveryN(every_n_steps=8)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin()
for step in [8, 16]:
monitor.step_begin(step)
monitor.step_end(step, output=None)
monitor.post_step(step, session=None)
monitor.step_begin(16)
monitor.step_end(16, output=None)
monitor.post_step(16, session=None)
monitor.end(session=None)
# It should not call begin twice since end was called
self.assertEqual([8, 16], monitor.steps_begun)
self.assertEqual([8, 16], monitor.steps_ended)
self.assertEqual([8, 16], monitor.post_steps)
def test_print(self):
with tf.Graph().as_default() as g, self.test_session(g):
t = tf.constant(42.0, name='foo')
self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_logging_trainable(self):
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(tf.constant(42.0), name='foo')
var.initializer.run()
cof = tf.constant(1.0)
loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
tf.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_summary_saver(self):
with tf.Graph().as_default() as g, self.test_session(g):
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = tf.Variable(0.0)
var.initializer.run()
tensor = tf.assign_add(var, 1.0)
summary_op = tf.summary.scalar('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op, save_steps=8,
summary_writer=summary_writer),
num_epochs=3, num_steps_per_epoch=10)
summary_writer.assert_summaries(
test_case=self, expected_logdir=log_dir, expected_graph=g,
expected_summaries={
0: {'my_summary': 1.0},
1: {'my_summary': 2.0},
9: {'my_summary': 3.0},
17: {'my_summary': 4.0},
25: {'my_summary': 5.0},
29: {'my_summary': 6.0},
})
def _assert_validation_monitor(
self, monitor, expected_early_stopped=False, expected_best_step=None,
expected_best_value=None):
self.assertEqual(expected_early_stopped, monitor.early_stopped)
self.assertEqual(expected_best_step, monitor.best_step)
self.assertEqual(expected_best_value, monitor.best_value)
def test_validation_monitor_no_estimator(self):
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
with tf.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'set_estimator'):
self._run_monitor(monitor)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_ckpt(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
mock_latest_checkpoint.return_value = None
# Do nothing with no checkpoint.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
mock_latest_checkpoint.assert_called_with(model_dir)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_no_early_stopping_rounds(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Do nothing with early_stopping_rounds=None.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor)
self._assert_validation_monitor(monitor)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor_invalid_metric(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
estimator.evaluate.return_value = {}
mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir
# Fail for missing metric.
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0, early_stopping_rounds=1)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'missing from outputs'):
self._run_monitor(monitor, num_epochs=1, num_steps_per_epoch=1)
@tf.test.mock.patch('tensorflow.contrib.learn.python.learn.estimators.Estimator', autospec=True) # pylint: disable=line-too-long
@tf.test.mock.patch.object(saver, 'latest_checkpoint')
def test_validation_monitor(
self, mock_latest_checkpoint, mock_estimator_class):
estimator = mock_estimator_class()
model_dir = 'model/dir'
estimator.model_dir = model_dir
validation_outputs = {'loss': None}
estimator.evaluate.return_value = validation_outputs
monitor = learn.monitors.ValidationMonitor(
x=tf.constant(2.0), every_n_steps=0, early_stopping_rounds=2)
self._assert_validation_monitor(monitor)
monitor.set_estimator(estimator)
with tf.Graph().as_default() as g, self.test_session(g):
monitor.begin(max_steps=100)
monitor.epoch_begin(epoch=0)
self.assertEqual(0, estimator.evaluate.call_count)
# Step 0, initial loss.
step = 0
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 42.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0)
monitor.post_step(step=step, session=None)
# Step 1, same checkpoint, no eval.
step = 1
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(1, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=0, expected_best_value=42.0)
monitor.post_step(step=step, session=None)
# Step 2, lower loss.
step = 2
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 40.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(2, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0)
monitor.post_step(step=step, session=None)
# Step 3, higher loss.
step = 3
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 44.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertFalse(monitor.step_end(step=step, output={}))
self.assertEqual(3, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_best_step=2, expected_best_value=40.0)
monitor.post_step(step=step, session=None)
# Step 4, higher loss for 2 steps, early stopping.
step = 4
mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)
validation_outputs['loss'] = 43.0
self.assertEqual(0, len(monitor.step_begin(step=step)))
self.assertTrue(monitor.step_end(step=step, output={}))
self.assertEqual(4, estimator.evaluate.call_count)
self._assert_validation_monitor(
monitor, expected_early_stopped=True, expected_best_step=2,
expected_best_value=40.0)
monitor.post_step(step=step, session=None)
monitor.epoch_end(epoch=0)
monitor.end()
def test_graph_dump(self):
monitor0 = learn.monitors.GraphDump()
monitor1 = learn.monitors.GraphDump()
with tf.Graph().as_default() as g, self.test_session(g):
const_var = tf.Variable(42.0, name='my_const')
counter_var = tf.Variable(0.0, name='my_counter')
assign_add = tf.assign_add(counter_var, 1.0, name='my_assign_add')
tf.global_variables_initializer().run()
self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 1.0,
assign_add.name: step + 1.0,
} for step in xrange(30)
}, monitor0.data)
self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 31.0,
assign_add.name: step + 31.0,
} for step in xrange(30)
}, monitor1.data)
for step in xrange(30):
matched, non_matched = monitor1.compare(monitor0, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 31.0, step + 1.0),
counter_var.name: (step + 31.0, step + 1.0),
}, non_matched)
matched, non_matched = monitor0.compare(monitor1, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 1.0, step + 31.0),
counter_var.name: (step + 1.0, step + 31.0),
}, non_matched)
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(
var_name='my_assign_add:0', every_n=8, first_n=2)
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(0.0, name='my_var')
var.initializer.run()
tf.assign_add(var, 1.0, name='my_assign_add')
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
0: 1.0,
1: 2.0,
2: 3.0,
10: 4.0,
18: 5.0,
26: 6.0,
29: 7.0,
}, monitor.values)
class StopAtStepTest(tf.test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
learn.monitors.StopAtStep(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
m = learn.monitors.StopAtStep(last_step=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(9)
self.assertFalse(m.step_end(9, None))
m.step_begin(10)
self.assertTrue(m.step_end(10, None))
m.step_begin(11)
self.assertTrue(m.step_end(11, None))
def test_stop_based_on_num_step(self):
m = learn.monitors.StopAtStep(num_steps=10)
m.step_begin(5)
self.assertFalse(m.step_end(5, None))
m.step_begin(13)
self.assertFalse(m.step_end(13, None))
m.step_begin(14)
self.assertTrue(m.step_end(14, None))
m.step_begin(15)
self.assertTrue(m.step_end(15, None))
class CheckpointSaverTest(tf.test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = tf.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.train_op = tf.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def _run(self, monitor, step, train_op, sess):
monitor.step_begin(step)
sess.run(train_op)
monitor.post_step(step, sess)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(
self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
learn.monitors.CheckpointSaver(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
# TODO(gunan): Reenable this test after b/32446874 is fixed.
def disabled_test_save_secs_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
self._run(monitor, 5, self.train_op, sess)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
self._run(monitor, 6, self.train_op, sess)
# saved
self.assertEqual(6, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_steps=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 3, self.train_op, sess)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 4, self.train_op, sess)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
self._run(monitor, 5, self.train_op, sess)
# saved
self.assertEqual(5, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
monitor = learn.monitors.CheckpointSaver(
self.model_dir, save_secs=2, scaffold=self.scaffold)
monitor.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
self._run(monitor, 1, self.train_op, sess)
self._run(monitor, 2, self.train_op, sess)
monitor.end(sess)
self.assertEqual(2, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
class FakeMonitor(learn.monitors.BaseMonitor):
def __init__(self):
learn.monitors.BaseMonitor.__init__(self)
self.should_stop = False
self.requested_tensors = []
self.call_counter = Counter()
self.last_begin_step = None
self.last_end_step = None
self.last_post_step = None
def begin(self, max_steps):
self.call_counter['begin'] += 1
def end(self, session):
self.call_counter['end'] += 1
def step_begin(self, step):
self.call_counter['step_begin'] += 1
self.last_begin_step = step
return self.requested_tensors
def step_end(self, step, output):
self.call_counter['step_end'] += 1
self.last_end_step = step
self.output = output
return self.should_stop
def post_step(self, step, session):
self.call_counter['post_step'] += 1
self.last_post_step = step
self.session = session
class RunHookAdapterForMonitorsTest(tf.test.TestCase):
def test_calls_and_steps(self):
with tf.Graph().as_default(), tf.Session() as sess:
global_step_tensor = tf.contrib.framework.create_global_step()
inc_5 = tf.assign_add(global_step_tensor, 5)
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['begin'], 1)
sess.run(tf.global_variables_initializer())
sess.run(global_step_tensor.assign(10))
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 11)
self.assertEqual(mon.last_end_step, 11)
self.assertEqual(mon.last_post_step, 11)
self.assertEqual(mon.call_counter['step_end'], 1)
self.assertEqual(mon.call_counter['step_begin'], 1)
self.assertEqual(mon.call_counter['post_step'], 1)
mon_sess.run(inc_5)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.output, {})
self.assertEqual(mon.last_begin_step, 16)
self.assertEqual(mon.last_end_step, 16)
self.assertEqual(mon.last_post_step, 16)
self.assertEqual(mon.call_counter['step_end'], 2)
self.assertEqual(mon.call_counter['step_begin'], 2)
self.assertEqual(mon.call_counter['post_step'], 2)
hook.end(sess)
for mon in [mock_mon, mock_mon2]:
self.assertEqual(mon.call_counter['end'], 1)
def test_requests(self):
with tf.Graph().as_default(), tf.Session() as sess:
tf.contrib.framework.create_global_step()
mock_mon = FakeMonitor()
mock_mon2 = FakeMonitor()
hook = learn.monitors.RunHookAdapterForMonitors([mock_mon, mock_mon2])
hook.begin()
mon_sess = monitored_session._HookedSession(sess=sess, hooks=[hook])
a_tensor = tf.constant([0], name='a_tensor')
tf.constant([5], name='another_tensor')
tf.constant([10], name='third_tensor')
mock_mon.requested_tensors = ['another_tensor']
mock_mon2.requested_tensors = ['third_tensor']
sess.run(tf.global_variables_initializer())
output = mon_sess.run(a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_mon.output['another_tensor'], [5])
self.assertEqual(mock_mon2.output['third_tensor'], [10])
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import (
bidding_seasonality_adjustment,
)
from google.ads.googleads.v9.services.types import (
bidding_seasonality_adjustment_service,
)
from .base import (
BiddingSeasonalityAdjustmentServiceTransport,
DEFAULT_CLIENT_INFO,
)
class BiddingSeasonalityAdjustmentServiceGrpcTransport(
BiddingSeasonalityAdjustmentServiceTransport
):
"""gRPC backend transport for BiddingSeasonalityAdjustmentService.
Service to manage bidding seasonality adjustments.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_bidding_seasonality_adjustment(
self,
) -> Callable[
[
bidding_seasonality_adjustment_service.GetBiddingSeasonalityAdjustmentRequest
],
bidding_seasonality_adjustment.BiddingSeasonalityAdjustment,
]:
r"""Return a callable for the get bidding seasonality
adjustment method over gRPC.
Returns the requested seasonality adjustment in full
detail.
Returns:
Callable[[~.GetBiddingSeasonalityAdjustmentRequest],
~.BiddingSeasonalityAdjustment]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_bidding_seasonality_adjustment" not in self._stubs:
self._stubs[
"get_bidding_seasonality_adjustment"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.BiddingSeasonalityAdjustmentService/GetBiddingSeasonalityAdjustment",
request_serializer=bidding_seasonality_adjustment_service.GetBiddingSeasonalityAdjustmentRequest.serialize,
response_deserializer=bidding_seasonality_adjustment.BiddingSeasonalityAdjustment.deserialize,
)
return self._stubs["get_bidding_seasonality_adjustment"]
@property
def mutate_bidding_seasonality_adjustments(
self,
) -> Callable[
[
bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsRequest
],
bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsResponse,
]:
r"""Return a callable for the mutate bidding seasonality
adjustments method over gRPC.
Creates, updates, or removes seasonality adjustments.
Operation statuses are returned.
Returns:
Callable[[~.MutateBiddingSeasonalityAdjustmentsRequest],
~.MutateBiddingSeasonalityAdjustmentsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_bidding_seasonality_adjustments" not in self._stubs:
self._stubs[
"mutate_bidding_seasonality_adjustments"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.BiddingSeasonalityAdjustmentService/MutateBiddingSeasonalityAdjustments",
request_serializer=bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsRequest.serialize,
response_deserializer=bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsResponse.deserialize,
)
return self._stubs["mutate_bidding_seasonality_adjustments"]
__all__ = ("BiddingSeasonalityAdjustmentServiceGrpcTransport",)
|
|
#!/usr/bin/env python
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5_cccl.resource.ltm.policy import Condition
from mock import Mock
import pytest
conditions = {
'http_host': {
'httpHost': True,
'host': True,
'equals': True,
'values': ["www.my-site.com", "www.your-site.com"],
},
'http_uri': {
'httpUri': True,
'host': True,
'equals': True,
'values': ["bar.com", "foo.com"],
},
'http_uri_path': {
'httpUri': True,
'path': True,
'not': True,
'equals': True,
'values': ["/", "/home.htm"]
},
'http_uri_path_segment': {
'httpUri': True,
'pathSegment': True,
'index': 2,
'startsWith': True,
'values': ["articles"],
},
'http_uri_extension': {
'httpUri': True,
'extension': True,
'startsWith': True,
'values': ["htm"]
},
'http_uri_unsupported': {
'httpUri': True,
'queryString': True,
'equals': True,
'values': ["expandSubcollections=true"]
},
'http_unsupported_operand_type': {
'httpMethod': True,
'equals': True,
'values': ["GET"]
},
'http_cookie': {
'httpCookie': True,
'tmName': "Cookie",
'contains': True,
'values': ["sessionToken=abc123"]
},
'http_header': {
'httpHeader': True,
'tmName': "Host",
'contains': True,
'values': ["www.acme.com"]
},
'tcp_address': {
'tcp': True,
'address': True,
'matches': True,
'values': ["10.10.10.10/32", "10.0.0.0/16"]
}
}
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
def test_create_http_host_match():
name="0"
condition = Condition(name, conditions['http_host'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpHost')
assert data.get('host')
assert data.get('equals')
assert data.get('values') == ["www.my-site.com",
"www.your-site.com"]
assert not data.get('startsWith')
assert not data.get('endsWith')
assert not data.get('contains')
assert 'httpUri' not in data
assert 'httpCookie' not in data
assert 'httpHeader' not in data
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
assert not data.get('extension')
assert not data.get('httpCookie')
assert not data.get('httpHeader')
assert not data.get('tmName')
def test_create_http_uri_match():
name="0"
condition = Condition(name, conditions['http_uri'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpUri')
assert data.get('host')
assert data.get('equals')
assert data.get('values') == ["bar.com", "foo.com"]
assert not data.get('startsWith')
assert not data.get('endsWith')
assert not data.get('contains')
assert 'httpHost' not in data
assert 'httpCookie' not in data
assert 'httpHeader' not in data
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
assert not data.get('extension')
assert not data.get('httpCookie')
assert not data.get('httpHeader')
assert not data.get('tmName')
def test_create_http_uri_path_match():
name="0"
condition = Condition(name, conditions['http_uri_path'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpUri')
assert data.get('path')
assert data.get('values') == ["/", "/home.htm"]
assert 'httpHost' not in data
assert 'httpCookie' not in data
assert 'httpHeader' not in data
assert data.get('equals')
assert not data.get('startsWith')
assert not data.get('endsWith')
assert not data.get('contains')
assert not data.get('missing')
assert data.get('not')
assert not data.get('caseSensitive')
assert not data.get('index')
assert not data.get('pathSegment')
assert not data.get('extension')
assert not data.get('httpCookie')
assert not data.get('httpHeader')
assert not data.get('tmName')
def test_create_http_uri_unsupported_match():
name="0"
with pytest.raises(ValueError):
Condition(name, conditions['http_uri_unsupported'])
def test_create_http_unsupported_operand_type():
name="0"
with pytest.raises(ValueError):
Condition(name, conditions['http_unsupported_operand_type'])
def test_create_http_uri_path_segment_match():
name="0"
condition = Condition(name, conditions['http_uri_path_segment'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpUri')
assert data.get('pathSegment')
assert data.get('values') == ["articles"]
assert data.get('index') == 2
assert 'httpHost' not in data
assert 'httpCookie' not in data
assert 'httpHeader' not in data
assert not data.get('equals')
assert data.get('startsWith')
assert not data.get('endsWith')
assert not data.get('contains')
assert not data.get('missing')
assert not data.get('not')
assert not data.get('caseSensitive')
assert not data.get('path')
assert not data.get('extension')
assert not data.get('httpCookie')
assert not data.get('httpHeader')
assert not data.get('tmName')
def test_create_http_uri_extension_match():
name="0"
condition = Condition(name, conditions['http_uri_extension'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpUri')
assert data.get('extension')
assert data.get('values') == ["htm"]
assert 'httpHost' not in data
assert 'httpCookie' not in data
assert 'httpHeader' not in data
assert not data.get('equals')
assert data.get('startsWith')
assert not data.get('endsWith')
assert not data.get('contains')
assert not data.get('missing')
assert not data.get('not')
assert not data.get('caseSensitive')
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
assert not data.get('httpCookie')
assert not data.get('httpHeader')
assert not data.get('tmName')
def test_create_http_cookie_match():
name="0"
condition = Condition(name, conditions['http_cookie'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpCookie')
assert data.get('tmName') == "Cookie"
assert data.get('values') == ["sessionToken=abc123"]
assert 'httpHost' not in data
assert 'httpUri' not in data
assert 'httpHeader' not in data
assert not data.get('equals')
assert not data.get('startsWith')
assert not data.get('endsWith')
assert data.get('contains')
assert not data.get('missing')
assert not data.get('not')
assert not data.get('caseSensitive')
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
def test_create_http_header_match():
name="0"
condition = Condition(name, conditions['http_header'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('httpHeader')
assert data.get('tmName') == "Host"
assert data.get('values') == ["www.acme.com"]
assert 'httpHost' not in data
assert 'httpUri' not in data
assert 'httpCookie' not in data
assert not data.get('equals')
assert not data.get('startsWith')
assert not data.get('endsWith')
assert data.get('contains')
assert not data.get('missing')
assert not data.get('not')
assert not data.get('caseSensitive')
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
def test_equal_conditions():
name="0"
condition_1 = Condition(name, conditions['http_host'])
condition_2 = Condition(name, conditions['http_host'])
assert id(condition_1) != id(condition_2)
assert condition_1 == condition_2
condition_1.data['values'].pop()
assert not condition_1 == condition_2
assert condition_1 != condition_2
fake_condition = {
"httpHost": False,
"values": ["www.my-site.com"]
}
assert condition_1 != fake_condition
assert condition_1 != conditions['http_uri_path']
def test_str_condition():
name="0"
condition = Condition(name, conditions['http_host'])
assert str(condition)
def test_uri_path(bigip):
name="0"
condition = Condition(name, conditions['http_host'])
with pytest.raises(NotImplementedError):
condition._uri_path(bigip)
def test_create_tcp_address_match():
name="0"
condition = Condition(name, conditions['tcp_address'])
data = condition.data
assert condition.name == "0"
assert not condition.partition
assert data.get('tcp')
assert data.get('values') == ["10.0.0.0/16", "10.10.10.10/32"]
assert 'httpHost' not in data
assert 'httpUri' not in data
assert 'httpCookie' not in data
assert not data.get('equals')
assert not data.get('startsWith')
assert not data.get('endsWith')
assert data.get('matches')
assert not data.get('missing')
assert not data.get('not')
assert not data.get('caseSensitive')
assert not data.get('index')
assert not data.get('path')
assert not data.get('pathSegment')
|
|
'''
Copyright 2014 Tyler Palsulich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# This bot is a result of a tutoral covered on http://shellium.org/wiki.
import socket
import sys
import argparse
import atexit
import shelve
from datetime import datetime
from datetime import timedelta
# imports for Twitter functionality
import ConfigParser
import json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from nltk.chat import eliza
# imports for Facebook functionality
import facebook
# Time used to prevent sending messages while in quiet mode.
can_send_after = datetime.now()
# Beginning of Twitter variables
config = ConfigParser.ConfigParser()
config.read('.twitter')
consumer_key = config.get('apikey', 'key')
consumer_secret = config.get('apikey', 'secret')
access_token = config.get('token', 'token')
access_token_secret = config.get('token', 'secret')
stream_rule = config.get('app', 'rule')
account_screen_name = config.get('app', 'account_screen_name').lower()
account_user_id = config.get('app', 'account_user_id')
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
twitterApi = API(auth)
chatbot = eliza.Chat(eliza.pairs)
# End of Twitter variables
# Beginning of Facebook variables
config.read('.facebook')
facebook_token = config.get('token', 'token')
# End of Facebook variables
# Check whether the given string is a positive number.
# Based on http://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-in-python.
def is_positive_number(s):
try:
s = float(s)
return s > 0
except ValueError:
return False
# Respond to a PING from the server.
def pong(data):
ircsock.send("PONG " + data.split()[1] + "\n")
# Send a message to the connected server.
def send(message, user = None, force = False):
if datetime.now() > can_send_after:
if user is None:
ircsock.send("PRIVMSG " + args.channel + " :" + message + "\n")
else:
ircsock.send("PRIVMSG " + args.channel + " :" + user + ": " + message + "\n")
# Join the given channel.
def join_channel(channel):
ircsock.send("JOIN " + channel + "\n")
# Respond to any keywords from the map `commands` in the string `message`.
def handle(message, commands):
for key in commands:
if key in message:
send((commands[key][0] + " ") * message.count(key, 0))
# Store the given key and value in the user_commands map. But, do not
# allow the users to change default commands.
def learn(key, value, user):
if key not in default_commands:
if key in user_commands:
send("Relearned " + key)
else:
send("Learned " + key)
user_commands[key] = [" ".join(value), user]
else:
send("Go away, " + user + "!")
# Forget the user command with the given key.
def forget(key):
if key in default_commands:
send("No.")
elif key in user_commands:
user_commands.pop(key)
send("Dropped like a bad habit.")
else:
send("Maybe you're the one forgetting...")
# Monitor the user account for the given twitter account.
# Auto-reply Ack to any tweet to that user stream
# https://dev.twitter.com/docs/streaming-apis/streams/user
class ReplyToTweet(StreamListener):
def on_data(self, data):
print data
tweet = json.loads(data.strip())
retweeted = tweet.get('retweeted')
from_self = tweet.get('user',{}).get('id_str','') == account_user_id
if retweeted is not None and not retweeted and not from_self:
tweetId = tweet.get('id_str')
screenName = tweet.get('user',{}).get('screen_name')
tweetText = tweet.get('text')
#chatResponse = chatbot.respond(tweetText)
replyText = '@' + screenName + ' ' + 'ACK ' + tweetText #This could be chatResponse but for now is just ACK
#check if repsonse is over 140 char
if len(replyText) > 140:
replyText = replyText[0:137] + '...'
print('Tweet ID: ' + tweetId)
print('From: ' + screenName)
print('Tweet Text: ' + tweetText)
print('Reply Text: ' + replyText)
# If rate limited, the status posts should be queued up and sent on an interval
twitterApi.update_status(replyText, tweetId)
def on_error(self, status):
print status
# Simply posts to a users wall
class PostToWall():
def on_data(self, data):
graph = facebook.GraphAPI(access_token=facebook_token, version='2.2')
attachment = {
'name': '!!AutoAck AckAttack BABY!!',
'link': 'https://github.com/tpalsulich/AutoAck',
'caption': '#gitpush #ACK #seen #bewm',
'description': 'A binary bitblob of AckAttack with some +1, #Aye #gitpush, #rebase and #bewm',
'picture': 'https://github.com/tpalsulich/AutoAck/blob/master/logos/auto-ack.logo.png'
}
graph.put_wall_post(message=data.strip(), attachment=attachment)
def on_error(self, status):
print status
def send_help():
send("Available commands:")
send(" " + args.nick + ": autotweet (monitor the defined twitter account and AutoAck Tweets)")
send(" " + args.nick + ": blame [key] (show user who created [key])")
send(" " + args.nick + ": forget [key] (forget user learned keyword [key])")
send(" " + args.nick + ": help (print this help message)")
send(" " + args.nick + ": learn [key] [value] (learn to say [value] after [key])")
send(" " + args.nick + ": list (print list of available keywords)")
send(" " + args.nick + ": quiet [seconds] (don't talk for optional number of [seconds])")
send(" " + args.nick + ": speak (override a previous quiet command)")
send(" " + args.nick + ": autotweet (send a tweet to the defined twitter account)")
send(" " + args.nick + ": autofbook (post a message to the wall of the defined facebook account)")
# Loop forever, waiting for messages to arrive.
def main_loop():
global can_send_after
while 1:
message = ircsock.recv(2048) # Receive data from the server.
message = message.strip('\n\r') # Remove any unnecessary linebreaks.
print message
if "PING :" in message: pong(message)
# Only respond to chat from the current chatroom (not private or administrative log in messages).
if splitter not in message: continue
# Get the content of the message.
user = message.split("!")[0][1:]
message = message.split(splitter)[1]
# Convert to lowercase and split the message based on whitespace.
split = message.lower().split()
if split[0] == args.nick.lower() + ":": # Command addressed to the bot (e.g. learn or forget).
if split[1] == "learn" and len(split) > 2:
learn(split[2], message.split()[3:], user)
elif split[1] == "forget" and len(split) == 3:
forget(split[2])
elif split[1] == "help":
send_help()
elif split[1] == "quiet" and len(split) == 2:
can_send_after = datetime.now() + timedelta(seconds=args.quiet)
send("Whatever you say.", user, True)
elif split[1] == "quiet" and len(split) == 3 and is_positive_number(split[2]):
can_send_after = datetime.now() + timedelta(seconds=int(split[2]))
send("Whatever you say.", user, True)
elif split[1] == "speak" and len(split) == 2:
can_send_after = datetime.now()
elif split[1] == "list" and len(split) == 2:
send("Builtin commands: [" + ", ".join(default_commands) + "]")
send("User commands: [" + ", ".join(user_commands) + "]")
elif split[1] == "blame" and len(split) == 3:
if split[2] in default_commands:
send(split[2] + " is a default command.", user)
elif split[2] in user_commands:
send(split[2] + " was created by " + user_commands[split[2]][1], user)
else:
send("That's not a valid keyword!", user)
elif split[1] == "autotweet" and len(split) > 2:
streamListener = ReplyToTweet()
twitterStream = Stream(auth, streamListener)
twitterStream.userstream(_with='user')
send("AutoTweeting...", user, True)
elif split[1] == "autofbook" and len(split) > 2:
PostToWall()
send("AutoFbooking...", user, True)
else:
send("How may I help you?", user)
else: # Only handle messages that aren't sent directly to the bot.
handle(message.lower(), default_commands)
handle(message.lower(), user_commands)
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description='An IRC bot used to respond to keywords automatically.')
parser.add_argument('-n', '--nick', default='AutoAck', help='Username of the bot')
parser.add_argument('-s', '--server', default='chat.freenode.net', help='Server to connect to')
parser.add_argument('-q', '--quiet', default=30, type=int, help='Default number of seconds to stay quiet when told')
parser.add_argument('-p', '--port', default=6667, type=int, help='Port to use when connecting to the server.')
parser.add_argument('channel', help='Channel to connect to.')
args = parser.parse_args()
# If the channel name doesn't start with a '#', prepend one.
if args.channel[0] != "#": args.channel = "#" + args.channel
# Substring used to split the received message into the actual message content
splitter = "PRIVMSG " + args.channel + " :"
# Map from keywords to how the bot will respond in chat.
default_commands = {
"ack": ["ack", args.nick],
"git": ["#gitpush", args.nick],
"aye": ["aye, mate!", args.nick],
"+1": ["+1", args.nick],
"boom": ["kaboom!!!", args.nick],
"beum": ["kabeum!!!", args.nick],
"bewm": ["ba-bewm!!!", args.nick],
"seen": ["seen like an eaten jelly bean", args.nick]}
# Map where chatroom members can have the bot "learn" commands.
user_commands = shelve.open("autoack.shelf")
# Connect to the server.
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Attempting to connect to " + args.server + ":" + args.channel + " on port " + str(args.port) + " with username " + args.nick)
ircsock.connect((args.server, args.port)) # Connect to the server.
ircsock.send("USER " + args.nick + " " + args.nick + " " + args.nick + " :.\n") # Authenticate the bot.
ircsock.send("NICK " + args.nick + "\n") # Assign the nickname to the bot.
join_channel(args.channel)
atexit.register(user_commands.close)
main_loop()
except KeyboardInterrupt:
print >> sys.stderr, '\nExiting by user request.\n'
sys.exit(0)
|
|
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
import weakref, re, os, sys
from ConfigParser import SafeConfigParser as ConfigParser,\
NoSectionError, NoOptionError
from urlparse import urlparse
from ZSI import TC
from ZSI.client import _Binding
from ZSI.generate import commands,containers
from ZSI.schema import GED, GTD
import wstools
#url_to_mod = re.compile(r'<([^ \t\n\r\f\v:]+:)?include\s+location\s*=\s*"(\S+)"')
def _urn_to_module(urn): return '%s_types' %re.sub(_urn_to_module.regex, '_', urn)
_urn_to_module.regex = re.compile(r'[\W]')
class ServiceProxy:
"""A ServiceProxy provides a convenient way to call a remote web
service that is described with WSDL. The proxy exposes methods
that reflect the methods of the remote web service."""
def __init__(self, wsdl, url=None, service=None, port=None,
cachedir=os.path.join(os.path.expanduser('~'), '.zsi_service_proxy_dir'),
asdict=True, lazy=False, pyclass=False, force=False, **kw):
"""
Parameters:
wsdl -- URL of WSDL.
url -- override WSDL SOAP address location
service -- service name or index
port -- port name or index
cachedir -- where to store generated files
asdict -- use dicts, else use generated pyclass
lazy -- use lazy typecode evaluation
pyclass -- use pyclass_type metaclass adds properties, "new_", "set_,
"get_" methods for schema element and attribute declarations.
force -- regenerate all WSDL code, write over cache.
NOTE: all other **kw will be passed to the underlying
ZSI.client._Binding constructor.
"""
self._asdict = asdict
# client._Binding
self._url = url
self._kw = kw
# WSDL
self._wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl)
self._service = self._wsdl.services[service or 0]
self.__doc__ = self._service.documentation
self._port = self._service.ports[port or 0]
self._name = self._service.name
self._methods = {}
self._cachedir = cachedir
self._lazy = lazy
self._pyclass = pyclass
self._force = force
# Set up rpc methods for service/port
port = self._port
binding = port.getBinding()
portType = binding.getPortType()
for port in self._service.ports:
for item in port.getPortType().operations:
try:
callinfo = wstools.WSDLTools.callInfoFromWSDL(port, item.name)
except:
# ignore non soap-1.1 bindings
continue
method = MethodProxy(self, callinfo)
setattr(self, item.name, method)
self._methods.setdefault(item.name, []).append(method)
self._mod = self._load(wsdl)
def _load(self, location):
"""
location -- URL or file location
isxsd -- is this a xsd file?
"""
cachedir = self._cachedir
# wsdl2py: deal with XML Schema
if not os.path.isdir(cachedir): os.mkdir(cachedir)
file = os.path.join(cachedir, '.cache')
section = 'TYPES'
cp = ConfigParser()
try:
cp.readfp(open(file, 'r'))
except IOError:
del cp; cp = None
option = location.replace(':', '-') # colons seem to screw up option
if (not self._force and cp is not None and cp.has_section(section) and
cp.has_option(section, option)):
types = cp.get(section, option)
else:
# dont do anything to anames
if not self._pyclass:
containers.ContainerBase.func_aname = lambda instnc,n: str(n)
args = ['-o', cachedir, location]
if self._lazy: args.insert(0, '-l')
if self._pyclass: args.insert(0, '-b')
files = commands.wsdl2py(args)
if cp is None: cp = ConfigParser()
if not cp.has_section(section): cp.add_section(section)
types = filter(lambda f: f.endswith('_types.py'), files)[0]
cp.set(section, option, types)
cp.write(open(file, 'w'))
if os.path.abspath(cachedir) not in sys.path:
sys.path.append(os.path.abspath(cachedir))
mod = os.path.split(types)[-1].rstrip('.py')
return __import__(mod)
def _load_schema(self, location, xml=None):
"""
location -- location of schema, also used as a key
xml -- optional string representation of schema
"""
cachedir = self._cachedir
# wsdl2py: deal with XML Schema
if not os.path.isdir(cachedir): os.mkdir(cachedir)
file = os.path.join(cachedir, '.cache')
section = 'TYPES'
cp = ConfigParser()
try:
cp.readfp(open(file, 'r'))
except IOError:
del cp; cp = None
option = location.replace(':', '-') # colons seem to screw up option
if (cp is not None and cp.has_section(section) and
cp.has_option(section, option)):
types = cp.get(section, option)
else:
# dont do anything to anames
if not self._pyclass:
containers.ContainerBase.func_aname = lambda instnc,n: str(n)
from ZSI.wstools import XMLSchema
reader = XMLSchema.SchemaReader(base_url=location)
if xml is not None and isinstance(xml, basestring):
schema = reader.loadFromString(xml)
elif xml is not None:
raise RuntimeError, 'Unsupported: XML must be string'
elif not os.path.isfile(location):
schema = reader.loadFromURL(location)
else:
schema = reader.reader.loadFromFile(location)
# TODO: change this to keyword list
class options:
output_dir = cachedir
schema = True
simple_naming = False
address = False
lazy = self._lazy
complexType = self._pyclass
schema.location = location
files = commands._wsdl2py(options, schema)
if cp is None: cp = ConfigParser()
if not cp.has_section(section): cp.add_section(section)
types = filter(lambda f: f.endswith('_types.py'), files)[0]
cp.set(section, option, types)
cp.write(open(file, 'w'))
if os.path.abspath(cachedir) not in sys.path:
sys.path.append(os.path.abspath(cachedir))
mod = os.path.split(types)[-1].rstrip('.py')
return __import__(mod)
def _call(self, name, soapheaders):
"""return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters
"""
def call_closure(*args, **kwargs):
"""Call the named remote web service method."""
if len(args) and len(kwargs):
raise TypeError, 'Use positional or keyword argument only.'
if len(args) > 0:
raise TypeError, 'Not supporting SOAPENC:Arrays or XSD:List'
if len(kwargs):
args = kwargs
callinfo = getattr(self, name).callinfo
# go through the list of defined methods, and look for the one with
# the same number of arguments as what was passed. this is a weak
# check that should probably be improved in the future to check the
# types of the arguments to allow for polymorphism
for method in self._methods[name]:
if len(method.callinfo.inparams) == len(kwargs):
callinfo = method.callinfo
binding = _Binding(url=self._url or callinfo.location,
soapaction=callinfo.soapAction,
**self._kw)
kw = dict(unique=True)
if callinfo.use == 'encoded':
kw['unique'] = False
if callinfo.style == 'rpc':
request = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name), **kw)
response = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name+"Response"), **kw)
if len(callinfo.getInParameters()) != len(args):
raise RuntimeError('expecting "%s" parts, got %s' %(
str(callinfo.getInParameters(), str(args))))
for msg,pms in ((request,callinfo.getInParameters()),
(response,callinfo.getOutParameters())):
msg.ofwhat = []
for part in pms:
klass = GTD(*part.type)
if klass is None:
if part.type:
klass = filter(lambda gt: part.type==gt.type,TC.TYPES)
if len(klass) == 0:
klass = filter(lambda gt: part.type[1]==gt.type[1],TC.TYPES)
if not len(klass):klass = [TC.Any]
if len(klass) > 1: #Enumerations, XMLString, etc
klass = filter(lambda i: i.__dict__.has_key('type'), klass)
klass = klass[0]
else:
klass = TC.Any
msg.ofwhat.append(klass(part.name))
msg.ofwhat = tuple(msg.ofwhat)
if not args: args = {}
else:
# Grab <part element> attribute
ipart,opart = callinfo.getInParameters(),callinfo.getOutParameters()
if ( len(ipart) != 1 or not ipart[0].element_type or
ipart[0].type is None ):
raise RuntimeError, 'Bad Input Message "%s"' %callinfo.name
if ( len(opart) not in (0,1) or not opart[0].element_type or
opart[0].type is None ):
raise RuntimeError, 'Bad Output Message "%s"' %callinfo.name
# if ( len(args) > 1 ):
# raise RuntimeError, 'Message has only one part: %s' %str(args)
ipart = ipart[0]
request,response = GED(*ipart.type),None
if opart: response = GED(*opart[0].type)
msg = args
if self._asdict:
if not msg: msg = dict()
self._nullpyclass(request)
elif request.pyclass is not None:
if type(args) is dict:
msg = request.pyclass()
msg.__dict__.update(args)
elif type(args) is list and len(args) == 1:
msg = request.pyclass(args[0])
else:
msg = request.pyclass()
binding.Send(None, None, msg,
requesttypecode=request,
soapheaders=soapheaders,
encodingStyle=callinfo.encodingStyle)
if response is None:
return None
if self._asdict: self._nullpyclass(response)
return binding.Receive(replytype=response,
encodingStyle=callinfo.encodingStyle)
return call_closure
def _nullpyclass(cls, typecode):
typecode.pyclass = None
if not hasattr(typecode, 'ofwhat'): return
if type(typecode.ofwhat) not in (list,tuple): #Array
cls._nullpyclass(typecode.ofwhat)
else: #Struct/ComplexType
for i in typecode.ofwhat: cls._nullpyclass(i)
_nullpyclass = classmethod(_nullpyclass)
class MethodProxy:
""" """
def __init__(self, parent, callinfo):
self.__name__ = callinfo.methodName
self.__doc__ = callinfo.documentation
self.callinfo = callinfo
self.parent = weakref.ref(parent)
self.soapheaders = []
def __call__(self, *args, **kwargs):
return self.parent()._call(self.__name__, self.soapheaders)(*args, **kwargs)
def add_headers(self, **headers):
"""packing dicts into typecode pyclass, may fail if typecodes are
used in the body (when asdict=True)
"""
class _holder: pass
def _remap(pyobj, **d):
pyobj.__dict__ = d
for k,v in pyobj.__dict__.items():
if type(v) is not dict: continue
pyobj.__dict__[k] = p = _holder()
_remap(p, **v)
for k,v in headers.items():
h = filter(lambda i: k in i.type, self.callinfo.inheaders)[0]
if h.element_type != 1:
raise RuntimeError, 'not implemented'
typecode = GED(*h.type)
if typecode is None:
raise RuntimeError, 'no matching element for %s' %str(h.type)
pyclass = typecode.pyclass
if pyclass is None:
raise RuntimeError, 'no pyclass for typecode %s' %str(h.type)
if type(v) is not dict:
pyobj = pyclass(v)
else:
pyobj = pyclass()
_remap(pyobj, **v)
self.soapheaders.append(pyobj)
|
|
import networkx, os, cPickle
def main():
base_dir = "/home/emre/arastirma/data/ontology/umls/2013AA/META/"
desc_file = base_dir + "MRCONSO.RRF"
rel_file = base_dir + "MRREL.RRF"
#g = get_mesh_disease_ontology(desc_file, rel_file)
#get_basic_info(desc_file, rel_file)
#get_drug_info(desc_file, rel_file)
mesh_id_to_name, concept_id_to_mesh_id, mesh_id_to_name_with_synonyms = get_mesh_id_mapping(desc_file, rel_file)
print "mesh dict:", len(mesh_id_to_name), len(mesh_id_to_name_with_synonyms)
print(mesh_id_to_name["D003924"])
print(concept_id_to_mesh_id["C0011860"])
print(mesh_id_to_name_with_synonyms["D003924"])
for mesh_id in [ "D003924", "D001769", "D005947", "D004493", "D006943" ]:
print(mesh_id, mesh_id in mesh_id_to_name)
return
class UMLS(object):
def __init__(self, file_name_desc, file_name_rel, concept_types = None, concept_sources = None, only_preferred = False):
self.file_name_desc = file_name_desc
self.file_name_rel = file_name_rel
self.delim = "|"
self.ontology = None
self.concept_id_to_values = None
self.concept_to_concept_id = None
self.concept_id_to_relations = None
self._get_concept_info(concept_types, concept_sources, only_preferred)
return
def _get_concept_info(self, concept_types = None, concept_sources = None, only_preferred = False):
"""
Parses MRCONSO file to get concept info, typically called without any arguments and saved to the dictionary
"""
if self.concept_id_to_values is None and self.concept_to_concept_id is None:
self.concept_id_to_values = {}
self.concept_to_concept_id = {}
f = open(self.file_name_desc)
header_names = ["CUI", "LAT", "TS", "LUI", "STT", "SUI", "ISPREF", "AUI", "SAUI", "SCUI", "SDUI", "SAB", "TTY", "CODE", "STR", "SRL", "SUPPRESS", "CVF", "dummy"]
# CUI / LAT (ENG) / TS (P) / STT (PF/VO) all / ISPREF (Y) / SCUI - source based id / SAB - source / TTY (PT/SY) pt-preferred sy-synonym / CODE similar to SCUI / STR
col_to_idx = dict((val.lower(), i) for i, val in enumerate(header_names))
for line in f:
words = line.strip("\n").split(self.delim)
concept_id = words[col_to_idx["cui"]]
#if concept_id == "C0360380":
# print len(words), words
# print words[col_to_idx["ts"]], words[col_to_idx["ispref"]], words[col_to_idx["tty"]]
if words[col_to_idx["lat"]] != "ENG": # words[col_to_idx["ts"]] != "P"
continue
if only_preferred and words[col_to_idx["ispref"]] != "Y":
continue
concept_type = words[col_to_idx["tty"]]
if concept_types is not None and concept_type not in concept_types:
continue
source = words[col_to_idx["sab"]]
if concept_sources is not None and source not in concept_sources:
continue
concept = words[col_to_idx["str"]]
source_id = words[col_to_idx["code"]]
d = self.concept_id_to_values.setdefault(concept_id, {})
d.setdefault(source, set()).add((concept, source_id, concept_type))
if concept_id in self.concept_to_concept_id:
print("Concept id conflict - overwriting: {}, {}, {}".format(concept, self.concept_to_concept_id[concept], concept_id))
self.concept_to_concept_id[concept] = concept_id
return self.concept_id_to_values, self.concept_to_concept_id
def get_concept_id(self, concept):
return self.concept_to_concept_id[concept]
def get_values_by_concept_id(self, concept_id):
return self.concept_id_to_values[concept_id]
def get_concepts(self, concept_id, concept_sources = None, concept_types = None):
concepts = []
values = self.get_values_by_concept_id(concept_id)
for source, vals in values.iteritems():
if concept_sources is not None and source not in concept_sources:
continue
for concept, source_id, concept_type in vals:
if concept_types is not None and concept_type not in concept_types:
continue
concepts.append((source, concept, concept_type))
#else:
# print concept_type
#if len(concepts) == 0:
# raise ValueError("Concept not found")
return concepts
def get_relations(self, relation_types = None, relation_a_types = None, source_types = None): # , "may_treat", "may_be_treated"
"""
Parses MRREL file to get relation info, typically called with relation type parameters and saved to the dictionary
"""
if self.concept_id_to_relations is None:
self.concept_id_to_relations = {}
f = open(self.file_name_rel)
header_names = ["CUI1", "AUI1", "STYPE1", "REL", "CUI2", "AUI2", "STYPE2", "RELA", "RUI", "SRUI", "SAB", "SL", "RG", "DIR", "SUPPRESS", "CVF", "dummy"]
col_to_idx = dict((val.lower(), i) for i, val in enumerate(header_names))
for line in f:
words = line.strip("\n").split(self.delim)
relation = words[col_to_idx["rel"]]
relation_a = words[col_to_idx["rela"]]
if relation_types is not None and relation not in relation_types:
continue
if relation_a_types is not None and relation_a not in relation_a_types:
continue
source_id = words[col_to_idx["cui1"]]
target_id = words[col_to_idx["cui2"]]
source = words[col_to_idx["sab"]]
if source_types is not None and source not in source_types:
continue
d = self.concept_id_to_relations.setdefault(target_id, {})
d.setdefault(source_id, []).append((relation, source))
if relation_a != "":
d[source_id].append((relation_a, source))
return self.concept_id_to_relations
def get_ontology(self, root_concept = None, relation_types = None, relation_a_types = None, source_types = None):
"""
Gets the graph (ontology tree) from MRREL file, typically called with relation type parameters and saved as a Networkx Graph object
"""
if self.ontology is None:
self.ontology = networkx.DiGraph()
f = open(self.file_name_rel)
header_names = ["CUI1", "AUI1", "STYPE1", "REL", "CUI2", "AUI2", "STYPE2", "RELA", "RUI", "SRUI", "SAB", "SL", "RG", "DIR", "SUPPRESS", "CVF", "dummy"]
col_to_idx = dict((val.lower(), i) for i, val in enumerate(header_names))
i = 0
for line in f:
words = line.strip("\n").split(self.delim)
source_id = words[col_to_idx["cui1"]]
target_id = words[col_to_idx["cui2"]]
relation = words[col_to_idx["rel"]]
relation_a = words[col_to_idx["rela"]]
source = words[col_to_idx["sab"]]
if relation_types is not None and relation not in relation_types:
continue
if relation_a_types is not None and relation_a not in relation_a_types:
continue
if source_types is not None and source not in source_types:
continue
#if source_id == root or target_id == root:
# print self.get_concepts(source_id), relation, self.get_concepts(target_id), source
self.ontology.add_edge(target_id, source_id)
i += 1
#if i > 1000:
# break
self.ontology = self.ontology.reverse()
if root_concept is not None:
root = self.get_concept_id(root_concept)
g = get_tree_rooted_at(self.ontology, root)
else:
g = self.ontology
return g
def get_drug_disease_relations(self):
drug_to_diseases = {}
concept_types = set(["MH", "PF", "PT", "PN", "EN", "EP", "FN", "SY", "PM"])
for nodes in self.get_ontology(root_concept = "Pharmaceutical / biologic product", relation_a_types = set(["isa"]), source_types = set(["SNOMEDCT"])).edges():
for node in nodes:
try:
rels = self.get_relations(relation_a_types=set(["treats", "may_treat"]), source_types = None)[node]
except:
continue
for cid, values in rels.iteritems():
relation, source = values
#if relation != "treats":
# continue
for source, concept, concept_type in self.get_concepts(node, concept_types = concept_types):
for source2, concept2, concept_type2 in self.get_concepts(cid, concept_types = concept_types):
drug_to_diseases.setdefault(concept, set()).add(concept2)
return drug_to_diseases
def get_tree_rooted_at(g, root):
neighbors = g.neighbors(root)
nodes_selected = set([root]) | set(neighbors)
while True:
neighbors_inner = set()
for node in neighbors:
neighbors_inner |= set(g.neighbors(node))
neighbors = set(list(neighbors_inner))
#if len(neighbors) == 0: # does not work probably due to circularity
# break
if len(neighbors - nodes_selected) == 0:
break
nodes_selected |= neighbors_inner
return g.subgraph(nodes_selected)
def get_mesh_id_mapping(desc_file, rel_file, only_diseases = True, dump_file = None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
source_id_to_concept, concept_id_to_mesh_id, source_id_to_concepts = values
return source_id_to_concept, concept_id_to_mesh_id, source_id_to_concepts
umls = UMLS(desc_file, rel_file)
concept_ids_disease = None
if only_diseases: #! This excludes several synoyms (includes only snonyms of the concept id that is part of the MeSH diseases)
g = get_mesh_disease_ontology(desc_file, rel_file, umls=umls, dump_file=dump_file+".ontology")
concept_ids_disease = set(g.nodes())
source_id_to_concept = {} # only main headers
source_id_to_concepts = {} # all concepts including synonyms
concept_id_to_mesh_id = {}
for concept_id, values in umls.concept_id_to_values.iteritems():
if concept_ids_disease is not None and concept_id not in concept_ids_disease:
continue
for concept, source_id, concept_type in values["MSH"]:
if concept_type == "MH": # main heading
source_id_to_concept[source_id] = concept
source_id_to_concepts.setdefault(source_id, set()).add(concept)
#if concept_id in concept_id_to_mesh_id and concept_id_to_mesh_id[concept_id] != source_id:
# print "Inconsistency", concept_id, source_id
concept_id_to_mesh_id[concept_id] = source_id
if dump_file is not None:
values = (source_id_to_concept, concept_id_to_mesh_id, source_id_to_concepts)
cPickle.dump(values, open(dump_file, 'w'))
return source_id_to_concept, concept_id_to_mesh_id, source_id_to_concepts
def get_mesh_disease_ontology(desc_file, rel_file, umls = None, dump_file = None):
if dump_file is not None and os.path.exists(dump_file):
g = cPickle.load(open(dump_file))
return g
if umls is None:
umls = UMLS(desc_file, rel_file)
root = "Diseases (MeSH Category)" #! Consider adding Mental disorders as well
sources = set(["MSH"])
relations = set(["CHD"])
g = umls.get_ontology(root_concept = root, relation_types = relations, source_types = sources)
#print "Disease ontology:", len(g.nodes()), len(g.edges())
#for node in g.neighbors(umls.get_concept_id(root)):
# print node, umls.get_concepts(node, concept_sources = sources)
cPickle.dump(g, open(dump_file, 'w'))
return g
def get_mesh_id_to_disease_category(desc_file, rel_file, dump_file = None):
g = get_mesh_disease_ontology(desc_file, rel_file, dump_file = dump_file)
root = "C0012674" # "Diseases (MeSH Category)"
concept_id_to_top_ids = {}
for parent in g.neighbors(root):
t = get_tree_rooted_at(g, parent)
for node in t.nodes():
concept_id_to_top_ids.setdefault(node, []).append(parent)
return concept_id_to_top_ids
def get_snomedct_drug_ontology(desc_file, rel_file, umls = None):
if umls is None:
umls = UMLS(desc_file, rel_file)
root = "Pharmaceutical / biologic product"
sources = set(["SNOMEDCT"])
relations = set(["isa"])
g = umls.get_ontology(root_concept = root, relation_types = relations, source_types = sources)
return g
def get_basic_info(desc_file, rel_file):
u = UMLS(desc_file, rel_file)
concept = "Diabetes Mellitus" #"Triazole antifungals"
sources = set(["MSH"]) # set(["SNOMEDCT"])
relations = set(["CHD"]) # set(["isa"])
concept_id = u.get_concept_id(concept)
print(concept, concept_id)
concepts = u.get_concepts(concept_id, concept_sources = sources)
print(concepts)
root = "Diseases (MeSH Category)" #"Pharmaceutical / biologic product"
g = u.get_ontology(root_concept = root, relation_types = relations, source_types = sources)
print(len(g.nodes()), len(g.edges()))
print(concept_id, g.edges([concept_id]))
for s, v in g.edges([concept_id]):
print(s, v, u.get_concepts(v, concept_sources = sources))
concept_id = "C0011849" #"C0360363" # Azole antifungal
rels = u.get_relations(relation_types = relations, source_types = sources)[concept_id]
for cid, values in rels.iteritems():
print(cid, values)
return
def get_drug_info(desc_file, rel_file):
u = UMLS(desc_file, rel_file)
drug_to_diseases = u.get_drug_disease_relations()
for drug, diseases in drug_to_diseases.iteritems():
print(drug, diseases)
return
def get_disease_specific_drugs(umls, selected_drugs, name_to_drug, synonym_to_drug, phenotypes):
drug_to_diseases = umls.get_drug_disease_relations()
disease_to_drugs = {}
for drug, diseases in drug_to_diseases.iteritems():
drug = drug.split()[0].lower()
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
continue
if drugbank_id not in selected_drugs:
continue
for description in diseases:
description = description.lower()
for phenotype in phenotypes:
disease_mod = phenotype.replace(" and ", ", ")
phrases = disease_mod.split(",")
values = []
for phrase in phrases:
inner_values = []
words = phrase.strip().split()
for i, token in enumerate(words):
if token.endswith("'s"):
token = token[:-2]
if i == len(words) - 1:
if token[-1] == "s":
token = token[:-1]
if token in ("disease", "disorder", "syndrome"):
continue
inner_values.append(token)
#if len(inner_values) > 0:
values.append(" ".join(inner_values))
if all([ description.find(word.strip()) != -1 for word in values ]): # phenotype.split(",")
disease_to_drugs.setdefault(phenotype, set()).add(drugbank_id)
return disease_to_drugs
def old_get_disease_specific_drugs(umls, drug_to_name, phenotypes):
import re
#drug_to_diseases = {"telmisartan": set(['Diabetic renal disease', 'congestive cardiac failure', 'congestive heart failure chf', 'left ventricular dysfunction', 'HBP', 'failure congestive heart'])
drug_to_diseases = umls.get_drug_disease_relations()
exps = [ re.compile(keyword.lower()) for keyword in phenotypes ]
drug_id_to_exp = {}
for drug_id, keyword in drug_to_name.iteritems():
try:
for l in "[{}]":
keyword = keyword.replace(l, "_")
exp = re.compile(keyword.lower())
except:
print keyword
continue
drug_id_to_exp[drug_id] = exp
disease_to_drugs = {}
for drug, diseases in drug_to_diseases.iteritems():
#drug = drug.lower().split()[0]
#print drug, diseases
drugbank_ids = [] #None
for drug_id, drug_name in drug_to_name.iteritems():
if drug_id not in drug_id_to_exp:
continue
exp_drug = drug_id_to_exp[drug_id]
if exp_drug.search(drug.lower()) is not None:
#if len(drugbank_ids) > 0: # is not None:
#raise ValueError("Duplicate match for drug " + drug_id)
#print "Duplicate match for drug ", drug, drug_id, drugbank_ids
drugbank_ids.append(drug_id)
if len(drugbank_ids) == 0:
continue
for disease, exp in zip(phenotypes, exps):
if any(map(lambda x: x is not None, [ exp.search(description.lower()) for description in diseases ])):
selected_drugbank_id = None
length = 0
for drugbank_id in drugbank_ids:
# choose drug with longer name
val = len(drug_to_name[drugbank_id])
if val > length:
selected_drugbank_id = drugbank_id
length = val
#if len(drugbank_ids) > 1:
# print selected_drugbank_id, drugbank_ids
disease_to_drugs.setdefault(disease, set()).add(selected_drugbank_id)
return disease_to_drugs
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
# Dependency imports
from DLT2T.data_generators import generator_utils
from DLT2T.data_generators import problem
from DLT2T.data_generators import text_encoder
from DLT2T.utils import registry
import tensorflow as tf
# End-of-sentence marker.
EOS = text_encoder.EOS_ID
FLAGS = tf.flags.FLAGS
@registry.register_problem
class DuallearningEnde(problem.Text2TextProblem):
@property
def is_character_level(self):
return False
@property
def vocab_name(self):
return "vocab.endefr"
@property
def targeted_vocab_size(self):
return 2**15 # 32768
@property
def input_space_id(self):
return problem.SpaceID.EN_TOK
@property
def target_space_id(self):
return problem.SpaceID.DE_TOK
@property
def num_shards(self):
return 1
@property
def use_subword_tokenizer(self):
return True
def generate_data(self, data_dir, tmp_dir, train_mode, task_id=-1):
train_paths = self.training_filepaths(
data_dir, self.num_shards, shuffled=False)
dev_paths = self.dev_filepaths(
data_dir, self.num_dev_shards, shuffled=False)
if self.use_train_shards_for_dev:
all_paths = train_paths + dev_paths
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, True, train_mode), all_paths)
generator_utils.shuffle_dataset(all_paths)
else:
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True, train_mode), train_paths,
self.generator(data_dir, tmp_dir, False, train_mode), dev_paths)
def generator(self, data_dir, tmp_dir, train, train_mode):
symbolizer_vocab = generator_utils.get_or_generate_vocab(
data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size)
#symbolizer_vocab = text_encoder.SubwordTextEncoder(os.path.join(data_dir, self.vocab_file))
datasets = _DUAL_ENDE_TRAIN_DATASETS if train else _DUAL_ENDE_TEST_DATASETS
if train:
return token_generator(
train = train,
train_mode = train_mode,
A_path = os.path.join(data_dir,datasets[0]),
B_path = os.path.join(data_dir,datasets[1]),
A_m_path = os.path.join(data_dir,datasets[2]),
B_m_path = os.path.join(data_dir,datasets[3]),
A_hat_path = os.path.join(data_dir,datasets[4]),
B_hat_path = os.path.join(data_dir,datasets[5]),
A_score_path = os.path.join(data_dir,datasets[6]),
B_score_path = os.path.join(data_dir,datasets[7]),
token_vocab = symbolizer_vocab,
eos = EOS)
else:
return token_generator(
train = train,
train_mode = None,
A_path = os.path.join(data_dir,datasets[0]),
B_path = os.path.join(data_dir,datasets[1]),
token_vocab = symbolizer_vocab,
eos=EOS)
def preprocess_example(self, examples, mode, hparams):
del mode
max_seq_length = min(max(hparams.max_input_seq_length,0),max(hparams.max_target_seq_length,0))
'''
if hparams.max_input_seq_length > 0:
examples['A'] = examples['A'][:hparams.max_input_seq_length]
examples['B_m'] = examples['B_hat'][:hparams.max_input_seq_length]
examples['A_hat'] = examples['A_hat'][:hparams.max_input_seq_length]
if hparams.max_target_seq_length > 0:
examples['B'] = examples['B'][:hparams.max_target_seq_length]
examples['B_m'] = examples['B_m'][:hparams.max_target_seq_length]
examples['A_m'] = examples['A_m'][:hparams.max_input_seq_length]
'''
if max_seq_length > 0:
print("######################## It is invoked!!", max_seq_length)
examples['A'] = examples['A'][:max_seq_length]
examples['B'] = examples['B'][:max_seq_length]
examples['A_m'] = examples['A_m'][:max_seq_length]
examples['B_hat'] = examples['B_hat'][:max_seq_length]
examples['B_m'] = examples['B_m'][:max_seq_length]
examples['A_hat'] = examples['A_hat'][:max_seq_length]
else:
print("######################## It is NOT invoked!!", max_seq_length)
'''
if hparams.prepend_mode != "none":
examples["targets"] = tf.concat(
[examples["inputs"], [0], examples["targets"]], 0)'''
return examples
def example_reading_spec(self):
data_fields = {
'A': tf.VarLenFeature(tf.int64),
'B': tf.VarLenFeature(tf.int64),
'A_m': tf.VarLenFeature(tf.int64),
'B_hat': tf.VarLenFeature(tf.int64),
'B_m': tf.VarLenFeature(tf.int64),
'A_hat': tf.VarLenFeature(tf.int64),
'A_score': tf.VarLenFeature(tf.float32),
'B_score': tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def token_generator(
train,
train_mode,
A_path,
B_path,
A_m_path=None,
B_m_path=None,
A_hat_path=None,
B_hat_path=None,
A_score_path=None,
B_score_path=None,
token_vocab=None,
eos=None):
'''
Refer to token_generator in wmt.py
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines.
'''
tf.logging.info('Generating tokens...')
eos_list = [] if eos is None else [eos]
if not train:
with tf.gfile.GFile(A_path, mode="r") as A_file:
with tf.gfile.GFile(B_path, mode="r") as B_file:
A, B = A_file.readline(), B_file.readline()
while A and B:
A_ints = token_vocab.encode(A.strip()) + eos_list
B_ints = token_vocab.encode(B.strip()) + eos_list
yield {"A": A_ints, "B": B_ints}
A, B = A_file.readline(), B_file.readline()
elif train_mode.startswith("pretrain"):
with tf.gfile.GFile(A_path, mode="r") as A_file:
with tf.gfile.GFile(B_path, mode="r") as B_file:
A = A_file.readline()
B = B_file.readline()
while A and B:
A_ints = token_vocab.encode(A.strip()) + eos_list
B_ints = token_vocab.encode(B.strip()) + eos_list
yield {'A':A_ints, 'B':B_ints}
A = A_file.readline()
B = B_file.readline()
else:
with tf.gfile.GFile(A_path, mode="r") as A_file:
with tf.gfile.GFile(B_path, mode="r") as B_file:
with tf.gfile.GFile(A_m_path, mode="r") as A_m_file:
with tf.gfile.GFile(B_m_path, mode="r") as B_m_file:
with tf.gfile.GFile(A_hat_path, mode="r") as A_hat_file:
with tf.gfile.GFile(B_hat_path, mode="r") as B_hat_file:
with tf.gfile.GFile(A_score_path, mode="r") as A_score_file:
with tf.gfile.GFile(B_score_path, mode="r") as B_score_file:
A = A_file.readline()
B = B_file.readline()
A_m = A_m_file.readline()
B_m = B_m_file.readline()
A_hat = A_hat_file.readline()
B_hat = B_hat_file.readline()
A_score = A_score_file.readline()
B_score = B_score_file.readline()
while A and B and A_m and B_m and A_hat and B_hat and A_score and B_score:
A_ints = token_vocab.encode(A.strip()) + eos_list
B_ints = token_vocab.encode(B.strip()) + eos_list
A_m_ints = token_vocab.encode(A_m.strip()) + eos_list
B_m_ints = token_vocab.encode(B_m.strip()) + eos_list
A_hat_ints = token_vocab.encode(A_hat.strip()) + eos_list
B_hat_ints = token_vocab.encode(B_hat.strip()) + eos_list
A_score = [float(A_score.strip())]
B_score = [float(B_score.strip())]
yield {'A':A_ints, 'B':B_ints, 'A_m':A_m_ints, 'B_m':B_m_ints, 'A_hat':A_hat_ints, 'B_hat':B_hat_ints, 'A_score':A_score, 'B_score':B_score}
A = A_file.readline()
B = B_file.readline()
A_m = read_mono_sentence(A_m_file)
B_m = read_mono_sentence(B_m_file)
A_hat = read_mono_sentence(A_hat_file)
B_hat = read_mono_sentence(B_hat_file)
A_score = A_score_file.readline()
B_score = B_score_file.readline()
def read_mono_sentence(mono_file):
line = mono_file.readline()
if not line:
mono_file.seek(0)
line = mono_file.readline()
return line
_DUAL_ENDE_TRAIN_DATASETS = [
'parallel_ende.en',
'parallel_ende.de',
'mono_ende.en',
'mono_ende.de',
'infer_ende.en',
'infer_ende.de',
'parallel_ende_score.en',
'parallel_ende_score.de',
]
_DUAL_ENDE_TEST_DATASETS = [
'dev_ende.en',
'dev_ende.de',
]
|
|
""" Example limit setting script
This script provides an example of how to use the limit setting tools,
built into echidna, to set a 90% confidence limit on neutrinoless double
beta decay.
The numbers used in scaling the signal/backgrounds should set a
reasonable limit, but are not necessariy the optimum choice of
parameters.
Note that this script assumes the user has already made a fiducial volume
cut when creating the spectra and the energy par is "energy_mc" for all
spectra.
Examples:
To use simply run the script::
$ python zero_nu_limit.py -s /path/to/signal.hdf5 -t /path/to/2n2b.hdf5
-b /path/to/B8_Solar.hdf5
.. note:: Use the -v option to print out progress and timing information
"""
import numpy
import echidna
import echidna.output.store as store
import echidna.limit.limit_config as limit_config
import echidna.limit.limit_setting as limit_setting
import echidna.limit.chi_squared as chi_squared
import echidna.output.plot_chi_squared_root as plot_chi_squared
from echidna.calc import decay
import argparse
import os
class ReadableDir(argparse.Action):
""" Custom argparse action
Adapted from http://stackoverflow.com/a/11415816
Checks that hdf5 files supplied via command line exist and can be read
"""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dirs = []
if type(values) is str:
prospective_dirs.append(values)
elif type(values) is list:
prospective_dirs = values
else:
raise TypeError("Invalid type for arg.")
for prospective_dir in prospective_dirs:
if not os.path.isfile(prospective_dir):
raise argparse.ArgumentTypeError(
"ReadableDir:{0} not a valid path".format(prospective_dir))
if not os.access(prospective_dir, os.R_OK):
raise argparse.ArgumentTypeError(
"ReadableDir:{0} is not readable".format(prospective_dir))
setattr(namespace, self.dest, values) # keeps original format
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Example limit setting "
"script.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print progress and timing information")
parser.add_argument("-s", "--signal", action=ReadableDir,
help="Supply path for signal hdf5 file")
parser.add_argument("-t", "--two_nu", action=ReadableDir,
help="Supply paths for Te130_2n2b hdf5 files")
parser.add_argument("-b", "--b8_solar", action=ReadableDir,
help="Supply paths for B8_Solar hdf5 files")
args = parser.parse_args()
# REF: SNO+-doc-2593-v9 (as used by Andy)
roi = (2.46, 2.68)
# Create signal spectrum
Te130_0n2b = store.load(args.signal)
Te130_0n2b.scale(200.)
unshrunk = Te130_0n2b.sum()
Te130_0n2b = store.load(args.signal)
shrink_dict = {"energy_mc_low": roi[0], "energy_mc_high": roi[1]}
Te130_0n2b.shrink(**shrink_dict)
Te130_0n2b.scale(200.)
shrunk = Te130_0n2b.sum()
scaling = shrunk/unshrunk
# Set decay converter
# REF: SNO+-doc-1728-v2 (all three values)
atm_weight_iso = 129.9062244
atm_weight_nat = 127.603
abundance = 0.3408
# REF: J. Kotila & F. Iachello, Phys. Rev. C 85, 034316- (2012)
phase_space = 3.69e-14
# REF: J. Barea et al. Phys. Rev. C 87, 014315- (2013)
matrix_element = 4.03
converter = decay.DBIsotope(
"Te130", atm_weight_iso, atm_weight_nat, abundance, phase_space,
matrix_element, roi_efficiency=scaling)
Te130_0n2b = store.load(args.signal) # Reload spectrum
# Create background spectra
Te130_2n2b = store.load(args.two_nu)
Te130_2n2b.scale(1.e6)
unshrunk = Te130_2n2b.sum()
Te130_2n2b = store.load(args.two_nu)
Te130_2n2b.scale(1.e6)
shrink_dict = {"energy_mc_low": roi[0], "energy_mc_high": roi[1]}
Te130_2n2b.shrink(**shrink_dict)
shrunk = Te130_2n2b.sum()
scaling = shrunk / unshrunk
# Set decay converter
# REF: J. Kotila & F. Iachello, Phys. Rev. C 85, 034316- (2012)
phase_space = 1.529e-18
# REF: J. Barea et al. Phys. Rev. C 87, 014315- (2013)
matrix_element = 3.31
two_nu_converter = decay.DBIsotope(
"Te130_2n2b", atm_weight_iso, atm_weight_nat, abundance, phase_space,
matrix_element, roi_efficiency=scaling)
Te130_2n2b = store.load(args.two_nu) # Reload spectra
B8_Solar = store.load(args.b8_solar)
# 1/ Set limit with no penalty term
# Create dictionary of backgrounds and priors
# REF: R. Arnold et al. (NEMO-3 Collaboration), PRL 107, 062504 (2011)
two_nu_half_life = 7.0e20
Te130_2n2b_prior = two_nu_converter.half_life_to_counts(two_nu_half_life,
roi_cut=False)
# REF: SNO+-doc-507v27 - Valentina's Numbers
B8_Solar_prior = 1021. * 5. # 1021 events/year for 5 year livetime
fixed_backgrounds = {Te130_2n2b._name: [Te130_2n2b, Te130_2n2b_prior],
B8_Solar._name: [B8_Solar, B8_Solar_prior]}
# Create fixed spectrum. Pre-shrink here if pre-shrinking in LimitSetting
fixed = limit_setting.make_fixed_background(fixed_backgrounds,
roi=roi)
# Initialise limit setting class
set_limit = limit_setting.LimitSetting(Te130_0n2b, fixed_background=fixed,
roi=roi, pre_shrink=True,
verbose=args.verbose)
# Configure Te130_0n2b
Te130_0n2b_counts = numpy.arange(0.5, 500.0, 0.5, dtype=float)
Te130_0n2b_prior = 0. # Setting a 90% CL so no signal in observed
Te130_0n2b_config = limit_config.LimitConfig(Te130_0n2b_prior,
Te130_0n2b_counts)
set_limit.configure_signal(Te130_0n2b_config)
# Set chi squared calculator
calculator = chi_squared.ChiSquared()
set_limit.set_calculator(calculator)
# Calculate confidence limit
sig_num_decays = set_limit.get_limit_no_float()
half_life = converter.counts_to_half_life(sig_num_decays)
print "90% CL with no penalty at: " + str(sig_num_decays) + " ROI counts"
print "90% CL with no penalty at: " + str(half_life) + " y"
# 2/ Now try fixing B8_Solar and floating Te130_2n2b
Te130_0n2b = store.load(args.signal)
# Reload background spectra
Te130_2n2b = store.load(args.two_nu)
B8_Solar = store.load(args.b8_solar)
fixed_backgrounds = {B8_Solar._name: [B8_Solar, B8_Solar_prior]}
fixed = limit_setting.make_fixed_background(fixed_backgrounds,
roi=roi)
# List of backgrounds to float
floating = [Te130_2n2b]
# Reinitialise limit setting
set_limit = limit_setting.LimitSetting(Te130_0n2b, fixed_background=fixed,
floating_backgrounds=floating,
roi=roi, pre_shrink=True,
verbose=args.verbose)
# Configure Te130_0n2b
Te130_0n2b_penalty_config = limit_config.LimitConfig(Te130_0n2b_prior,
Te130_0n2b_counts)
set_limit.configure_signal(Te130_0n2b_penalty_config)
# Set config for Te130_2n2b
# Sigma of rate:
# REF: R. Arnold et al. (NEMO-3 Collaboration), Phys. Rev. Lett. 107,
# 062504 (2011), via SNO+-doc-3000-v1 (Andy's doc on systematics)
Te130_2n2b_sigma = 0.203 * Te130_2n2b_prior
# Floating range (+/- 1 sigma):
Te130_2n2b_counts = numpy.linspace(0.797*Te130_2n2b_prior,
1.203*Te130_2n2b_prior, 51)
Te130_2n2b_penalty_config = limit_config.LimitConfig(
Te130_2n2b_prior, Te130_2n2b_counts, Te130_2n2b_sigma)
set_limit.configure_background(Te130_2n2b._name,
Te130_2n2b_penalty_config,
plot_systematic=True)
# Set chi squared calculator
set_limit.set_calculator(calculator)
# Calculate confidence limit
sig_num_decays = set_limit.get_limit()
half_life = converter.counts_to_half_life(sig_num_decays)
print ("90% CL with Te130_2n2b floating at: " +
str(sig_num_decays) + " ROI counts")
print "90% CL with Te130_2n2b floating at: " + str(half_life) + " y"
fig1 = plot_chi_squared.chi_squared_vs_signal(Te130_0n2b_config)
fig1.Draw("AP")
raw_input("RETURN to continue")
for syst_analyser in set_limit._syst_analysers.values():
store.dump_ndarray(syst_analyser._name+"_2.hdf5", syst_analyser)
# 3/ Fix no backgrounds and float all#
Te130_0n2b = store.load(args.signal)
# Reload background spectra
Te130_2n2b = store.load(args.two_nu)
B8_Solar = store.load(args.b8_solar)
# List of backgrounds to float
floating = [Te130_2n2b, B8_Solar]
# Reinitialise limit setting
set_limit = limit_setting.LimitSetting(Te130_0n2b,
floating_backgrounds=floating,
roi=roi, pre_shrink=True,
verbose=args.verbose)
# Configure Te130_0n2b
Te130_0n2b_penalty_config = limit_config.LimitConfig(Te130_0n2b_prior,
Te130_0n2b_counts)
set_limit.configure_signal(Te130_0n2b_penalty_config)
Te130_2n2b_penalty_config = limit_config.LimitConfig(
Te130_2n2b_prior, Te130_2n2b_counts, Te130_2n2b_sigma)
set_limit.configure_background(Te130_2n2b._name,
Te130_2n2b_penalty_config,
plot_systematic=True)
# Set config for B8_Solar
# Sigma of rate:
# REF: R. Arnold et al. (NEMO-3 Collaboration), Phys. Rev. Lett. 107,
# 062504 (2011), via SNO+-doc-3000-v1 (Andy's doc on systematics)
B8_Solar_sigma = 0.04 * B8_Solar_prior
# Floating range (+/- 1 sigma):
B8_Solar_counts = numpy.linspace(0.96*B8_Solar_prior,
1.04*B8_Solar_prior, 11)
# 11 bins to make sure midpoint (no variation from prior) is included
B8_Solar_penalty_config = limit_config.LimitConfig(B8_Solar_prior,
B8_Solar_counts,
B8_Solar_sigma)
set_limit.configure_background(B8_Solar._name, B8_Solar_penalty_config,
plot_systematic=True)
# Set chi squared calculator
set_limit.set_calculator(calculator)
# Calculate confidence limit
sig_num_decays = set_limit.get_limit()
half_life = converter.counts_to_half_life(sig_num_decays)
print ("90% CL, with all backgrounds floating, at: " +
str(sig_num_decays) + " ROI counts")
print "90% CL, with all backgrounds floating, at: " + str(half_life) + " y"
fig2 = plot_chi_squared.chi_squared_vs_signal(Te130_0n2b_config)
fig2.Draw("AP")
raw_input("RETURN to continue")
for syst_analyser in set_limit._syst_analysers.values():
store.dump_ndarray(syst_analyser._name+"_3.hdf5", syst_analyser)
store.dump_ndarray("Te130_0n2b_config.hdf5", Te130_0n2b_config)
store.dump_ndarray("Te130_0n2b_penalty_config.hdf5",
Te130_0n2b_penalty_config)
|
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
import asyncio
import datetime
import enum
import functools
from ipaddress import ip_address
import logging
import os
import pathlib
import re
import threading
from time import monotonic
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Coroutine,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
TypeVar,
Union,
cast,
)
import attr
import voluptuous as vol
import yarl
from homeassistant import block_async_io, loader, util
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SECONDS,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
LENGTH_METERS,
MATCH_ALL,
__version__,
)
from homeassistant.exceptions import (
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
ServiceNotFound,
Unauthorized,
)
from homeassistant.util import location, network
from homeassistant.util.async_ import fire_coroutine_threadsafe, run_callback_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.thread import fix_threading_exception_logging
from homeassistant.util.timeout import TimeoutManager
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM, UnitSystem
import homeassistant.util.uuid as uuid_util
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from homeassistant.auth import AuthManager
from homeassistant.config_entries import ConfigEntries
from homeassistant.components.http import HomeAssistantHTTP
block_async_io.enable()
fix_threading_exception_logging()
T = TypeVar("T")
_UNDEF: dict = {}
# pylint: disable=invalid-name
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
DOMAIN = "homeassistant"
# How long to wait to log tasks that are blocking
BLOCK_LOG_TIMEOUT = 60
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
# How long to wait until things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
VALID_ENTITY_ID = re.compile(r"^(?!.+__)(?!_)[\da-z_]+(?<!_)\.(?!_)[\da-z_]+(?<!_)$")
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return VALID_ENTITY_ID.match(entity_id) is not None
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) < 256
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
final_write = "FINAL_WRITE"
stopped = "STOPPED"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class HomeAssistant:
"""Root object of the Home Assistant home automation."""
auth: "AuthManager"
http: "HomeAssistantHTTP" = None # type: ignore
config_entries: "ConfigEntries" = None # type: ignore
def __init__(self) -> None:
"""Initialize new Home Assistant object."""
self.loop = asyncio.get_running_loop()
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state: CoreState = CoreState.not_running
self.exit_code: int = 0
# If not None, use to signal end-of-loop
self._stopped: Optional[asyncio.Event] = None
# Timeout handler for Core/Helper namespace
self.timeout: TimeoutManager = TimeoutManager()
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
@property
def is_stopping(self) -> bool:
"""Return if Home Assistant is stopping."""
return self.state in (CoreState.stopping, CoreState.final_write)
def start(self) -> int:
"""Start Home Assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
finally:
self.loop.close()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("Home Assistant is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
# pylint: disable=import-outside-toplevel
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
setattr(self.loop, "_thread_ident", threading.get_ident())
self.state = CoreState.starting
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
async with self.timeout.async_timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at http://bit.ly/2ogP58T : %s",
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent"
)
return
self.state = CoreState.running
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
_async_create_timer(self)
def add_job(self, target: Callable[..., Any], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self, target: Callable[..., Any], *args: Any
) -> Optional[asyncio.Future]:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call async_add_job with None")
task = None
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutine(check_target):
task = self.loop.create_task(target) # type: ignore
elif asyncio.iscoroutinefunction(check_target):
task = self.loop.create_task(target(*args))
elif is_callback(check_target):
self.loop.call_soon(target, *args)
else:
task = self.loop.run_in_executor( # type: ignore
None, target, *args
)
# If a task is scheduled
if self._track_task and task is not None:
self._pending_tasks.append(task)
return task
@callback
def async_create_task(self, target: Coroutine) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task: asyncio.tasks.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_job(
self, target: Callable[..., Union[None, Awaitable]], *args: Any
) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if (
not asyncio.iscoroutine(target)
and not asyncio.iscoroutinefunction(target)
and is_callback(target)
):
target(*args)
else:
self.async_add_job(target, *args)
def block_till_done(self) -> None:
"""Block until all pending work is done."""
asyncio.run_coroutine_threadsafe(
self.async_block_till_done(), self.loop
).result()
async def async_block_till_done(self) -> None:
"""Block until all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: Optional[float] = None
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await self._await_and_log_pending(pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
await asyncio.sleep(0)
async def _await_and_log_pending(self, pending: Iterable[Awaitable[Any]]) -> None:
"""Await and log tasks that take a long time."""
wait_time = 0
while pending:
_, pending = await asyncio.wait(pending, timeout=BLOCK_LOG_TIMEOUT)
if not pending:
return
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state in [CoreState.stopping, CoreState.final_write]:
_LOGGER.info("async_stop called twice: ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning("async_stop called before startup is complete")
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
try:
async with self.timeout.async_timeout(120):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 1 to complete, the shutdown will continue"
)
# stage 2
self.state = CoreState.final_write
self.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
try:
async with self.timeout.async_timeout(60):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 2 to complete, the shutdown will continue"
)
# stage 3
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
try:
async with self.timeout.async_timeout(30):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 3 to complete, the shutdown will continue"
)
# Python 3.9+ and backported in runner.py
await self.loop.shutdown_default_executor() # type: ignore
self.exit_code = exit_code
self.state = CoreState.stopped
if self._stopped is not None:
self._stopped.set()
else:
self.loop.stop()
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id: str = attr.ib(default=None)
parent_id: Optional[str] = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.uuid_v1mc_hex)
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class Event:
"""Representation of an event within the bus."""
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
def __init__(
self,
event_type: str,
data: Optional[Dict[str, Any]] = None,
origin: EventOrigin = EventOrigin.local,
time_fired: Optional[int] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event.
Async friendly.
"""
return {
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin),
"time_fired": self.time_fired,
"context": self.context.as_dict(),
}
def __repr__(self) -> str:
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return f"<Event {self.event_type}[{str(self.origin)[0]}]: {util.repr_helper(self.data)}>"
return f"<Event {self.event_type}[{str(self.origin)[0]}]>"
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return ( # type: ignore
self.__class__ == other.__class__
and self.event_type == other.event_type
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
class EventBus:
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners: Dict[str, List[Callable]] = {}
self._hass = hass
@callback
def async_listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key]) for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe(self._hass.loop, self.async_listeners).result()
def fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin, context
)
@callback
def async_fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, None, context)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self._hass.async_add_job(func, event)
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
if hasattr(onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, "run", True)
self._async_remove_listener(event_type, onetime_listener)
self._hass.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(self, event_type: str, listener: Callable) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
class State:
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
domain: Domain of this state.
"""
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
]
def __init__(
self,
entity_id: str,
state: str,
attributes: Optional[Mapping] = None,
last_changed: Optional[datetime.datetime] = None,
last_updated: Optional[datetime.datetime] = None,
context: Optional[Context] = None,
validate_entity_id: Optional[bool] = True,
) -> None:
"""Initialize a new state."""
state = str(state)
if validate_entity_id and not valid_entity_id(entity_id):
raise InvalidEntityFormatError(
f"Invalid entity id encountered: {entity_id}. "
"Format should be <domain>.<object_id>"
)
if not valid_state(state):
raise InvalidStateError(
f"Invalid state encountered for entity id: {entity_id}. "
"State max length is 255 characters."
)
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
self.domain = split_entity_id(self.entity_id)[0]
@property
def object_id(self) -> str:
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self) -> str:
"""Name of this state."""
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
def as_dict(self) -> Dict:
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": self.last_changed,
"last_updated": self.last_updated,
"context": self.context.as_dict(),
}
@classmethod
def from_dict(cls, json_dict: Dict) -> Any:
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
return None
last_changed = json_dict.get("last_changed")
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
context = json_dict.get("context")
if context:
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
"""Return the comparison of the state."""
return ( # type: ignore
self.__class__ == other.__class__
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
"""Return the representation of the states."""
attrs = f"; {util.repr_helper(self.attributes)}" if self.attributes else ""
return (
f"<state {self.entity_id}={self.state}{attrs}"
f" @ {dt_util.as_local(self.last_changed).isoformat()}>"
)
class StateMachine:
"""Helper class that tracks the state of different entities."""
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
"""Initialize state machine."""
self._states: Dict[str, State] = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter: Optional[str] = None) -> List[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
@callback
def async_entity_ids(
self, domain_filter: Optional[Union[str, Iterable]] = None
) -> List[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.keys())
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state.entity_id
for state in self._states.values()
if state.domain in domain_filter
]
def all(self) -> List[State]:
"""Create a list of all states."""
return run_callback_threadsafe(self._loop, self.async_all).result()
@callback
def async_all(self) -> List[State]:
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id: str) -> Optional[State]:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id
).result()
@callback
def async_remove(self, entity_id: str, context: Optional[Context] = None) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
EventOrigin.local,
context=context,
)
return True
def set(
self,
entity_id: str,
new_state: str,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_set(
self,
entity_id: str,
new_state: str,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
if old_state is None:
same_state = False
same_attr = False
last_changed = None
else:
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
state = State(entity_id, new_state, attributes, last_changed, None, context)
self._states[entity_id] = state
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
)
class Service:
"""Representation of a callable service."""
__slots__ = ["func", "schema", "is_callback", "is_coroutinefunction"]
def __init__(
self,
func: Callable,
schema: Optional[vol.Schema],
context: Optional[Context] = None,
) -> None:
"""Initialize a service."""
self.func = func
self.schema = schema
# Properly detect wrapped functions
while isinstance(func, functools.partial):
func = func.func
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
class ServiceCall:
"""Representation of a call to a service."""
__slots__ = ["domain", "service", "data", "context"]
def __init__(
self,
domain: str,
service: str,
data: Optional[Dict] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
"""Return the representation of the service."""
if self.data:
return (
f"<ServiceCall {self.domain}.{self.service} "
f"(c:{self.context.id}): {util.repr_helper(self.data)}>"
)
return f"<ServiceCall {self.domain}.{self.service} (c:{self.context.id})>"
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a service registry."""
self._services: Dict[str, Dict[str, Service]] = {}
self._hass = hass
@property
def services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe(self._hass.loop, self.async_services).result()
@callback
def async_services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: self._services[domain].copy() for domain in self._services}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s", domain, service)
return
self._services[domain].pop(service)
if not self._services[domain]:
self._services.pop(domain)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
limit: Optional[float] = SERVICE_CALL_LIMIT,
) -> Optional[bool]:
"""
Call a service.
See description of async_call for details.
"""
return asyncio.run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking, context, limit),
self._hass.loop,
).result()
async def async_call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
limit: Optional[float] = SERVICE_CALL_LIMIT,
) -> Optional[bool]:
"""
Call a service.
Specify blocking=True to wait until service is executed.
Waits a maximum of limit, which may be None for no timeout.
If blocking = True, will return boolean if service executed
successfully within limit.
This method will fire an event to indicate the service has been called.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if handler.schema:
try:
processed_data = handler.schema(service_data)
except vol.Invalid:
_LOGGER.debug(
"Invalid data for service call %s.%s: %s",
domain,
service,
service_data,
)
raise
else:
processed_data = service_data
service_call = ServiceCall(domain, service, processed_data, context)
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
coro = self._execute_service(handler, service_call)
if not blocking:
self._run_service_in_background(coro, service_call)
return None
task = self._hass.async_create_task(coro)
try:
await asyncio.wait({task}, timeout=limit)
except asyncio.CancelledError:
# Task calling us was cancelled, so cancel service call task, and wait for
# it to be cancelled, within reason, before leaving.
_LOGGER.debug("Service call was cancelled: %s", service_call)
task.cancel()
await asyncio.wait({task}, timeout=SERVICE_CALL_LIMIT)
raise
if task.cancelled():
# Service call task was cancelled some other way, such as during shutdown.
_LOGGER.debug("Service was cancelled: %s", service_call)
raise asyncio.CancelledError
if task.done():
# Propagate any exceptions that might have happened during service call.
task.result()
# Service call completed successfully!
return True
# Service call task did not complete before timeout expired.
# Let it keep running in background.
self._run_service_in_background(task, service_call)
_LOGGER.debug("Service did not complete before timeout: %s", service_call)
return False
def _run_service_in_background(
self, coro_or_task: Union[Coroutine, asyncio.Task], service_call: ServiceCall
) -> None:
"""Run service call in background, catching and logging any exceptions."""
async def catch_exceptions() -> None:
try:
await coro_or_task
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except asyncio.CancelledError:
_LOGGER.debug("Service was cancelled: %s", service_call)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service: %s", service_call)
self._hass.async_create_task(catch_exceptions())
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.is_coroutinefunction:
await handler.func(service_call)
elif handler.is_callback:
handler.func(service_call)
else:
await self._hass.async_add_executor_job(handler.func, service_call)
class Config:
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new config object."""
self.hass = hass
self.latitude: float = 0
self.longitude: float = 0
self.elevation: int = 0
self.location_name: str = "Home"
self.time_zone: datetime.tzinfo = dt_util.UTC
self.units: UnitSystem = METRIC_SYSTEM
self.internal_url: Optional[str] = None
self.external_url: Optional[str] = None
self.config_source: str = "default"
# If True, pip install is skipped for requirements on startup
self.skip_pip: bool = False
# List of loaded components
self.components: Set[str] = set()
# API (HTTP) server configuration, see components.http.ApiConfig
self.api: Optional[Any] = None
# Directory that holds the configuration
self.config_dir: Optional[str] = None
# List of allowed external dirs to access
self.allowlist_external_dirs: Set[str] = set()
# List of allowed external URLs that integrations may use
self.allowlist_external_urls: Set[str] = set()
# If Home Assistant is running in safe mode
self.safe_mode: bool = False
def distance(self, lat: float, lon: float) -> Optional[float]:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), LENGTH_METERS
)
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_external_url(self, url: str) -> bool:
"""Check if an external URL is allowed."""
parsed_url = f"{str(yarl.URL(url))}/"
return any(
allowed
for allowed in self.allowlist_external_urls
if parsed_url.startswith(allowed)
)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for allowed_path in self.allowlist_external_dirs:
try:
thepath.relative_to(allowed_path)
return True
except ValueError:
pass
return False
def as_dict(self) -> Dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
return {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": time_zone,
"components": self.components,
"config_dir": self.config_dir,
# legacy, backwards compat
"whitelist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_urls": self.allowlist_external_urls,
"version": __version__,
"config_source": self.config_source,
"safe_mode": self.safe_mode,
"state": self.hass.state.value,
"external_url": self.external_url,
"internal_url": self.internal_url,
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
time_zone = dt_util.get_time_zone(time_zone_str)
if time_zone:
self.time_zone = time_zone
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError(f"Received invalid time zone {time_zone_str}")
@callback
def _update(
self,
*,
source: str,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
elevation: Optional[int] = None,
unit_system: Optional[str] = None,
location_name: Optional[str] = None,
time_zone: Optional[str] = None,
external_url: Optional[Union[str, dict]] = _UNDEF,
internal_url: Optional[Union[str, dict]] = _UNDEF,
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
if external_url is not _UNDEF:
self.external_url = cast(Optional[str], external_url)
if internal_url is not _UNDEF:
self.internal_url = cast(Optional[str], internal_url)
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
data = await store.async_load()
async def migrate_base_url(_: Event) -> None:
"""Migrate base_url to internal_url/external_url."""
if self.hass.config.api is None:
return
base_url = yarl.URL(self.hass.config.api.deprecated_base_url)
# Check if this is an internal URL
if str(base_url.host).endswith(".local") or (
network.is_ip_address(str(base_url.host))
and network.is_private(ip_address(base_url.host))
):
await self.async_update(
internal_url=network.normalize_url(str(base_url))
)
return
# External, ensure this is not a loopback address
if not (
network.is_ip_address(str(base_url.host))
and network.is_loopback(ip_address(base_url.host))
):
await self.async_update(
external_url=network.normalize_url(str(base_url))
)
if data:
# Try to migrate base_url to internal_url/external_url
if "external_url" not in data:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, migrate_base_url
)
self._update(
source=SOURCE_STORAGE,
latitude=data.get("latitude"),
longitude=data.get("longitude"),
elevation=data.get("elevation"),
unit_system=data.get("unit_system"),
location_name=data.get("location_name"),
time_zone=data.get("time_zone"),
external_url=data.get("external_url", _UNDEF),
internal_url=data.get("internal_url", _UNDEF),
)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
data = {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": time_zone,
"external_url": self.external_url,
"internal_url": self.internal_url,
}
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
timer_context = Context()
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(EVENT_TIME_CHANGED, {ATTR_NOW: now}, context=timer_context)
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
hass.bus.async_fire(
EVENT_TIMER_OUT_OF_SYNC, {ATTR_SECONDS: late}, context=timer_context
)
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())
|
|
"""Test the Xiaomi Miio config flow."""
from unittest.mock import Mock, patch
from miio import DeviceException
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.xiaomi_miio import const
from homeassistant.components.xiaomi_miio.config_flow import (
DEFAULT_DEVICE_NAME,
DEFAULT_GATEWAY_NAME,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
ZEROCONF_NAME = "name"
ZEROCONF_PROP = "properties"
ZEROCONF_MAC = "mac"
TEST_HOST = "1.2.3.4"
TEST_TOKEN = "12345678901234567890123456789012"
TEST_NAME = "Test_Gateway"
TEST_MODEL = const.MODELS_GATEWAY[0]
TEST_MAC = "ab:cd:ef:gh:ij:kl"
TEST_GATEWAY_ID = TEST_MAC
TEST_HARDWARE_VERSION = "AB123"
TEST_FIRMWARE_VERSION = "1.2.3_456"
TEST_ZEROCONF_NAME = "lumi-gateway-v3_miio12345678._miio._udp.local."
TEST_SUB_DEVICE_LIST = []
def get_mock_info(
model=TEST_MODEL,
mac_address=TEST_MAC,
hardware_version=TEST_HARDWARE_VERSION,
firmware_version=TEST_FIRMWARE_VERSION,
):
"""Return a mock gateway info instance."""
gateway_info = Mock()
gateway_info.model = model
gateway_info.mac_address = mac_address
gateway_info.hardware_version = hardware_version
gateway_info.firmware_version = firmware_version
return gateway_info
async def test_config_flow_step_gateway_connect_error(hass):
"""Test config flow, gateway connection error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_gateway_success(hass):
"""Test a successful config flow."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
mock_info = get_mock_info()
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_GATEWAY_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def test_zeroconf_gateway_success(hass):
"""Test a successful zeroconf discovery of a gateway."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: TEST_ZEROCONF_NAME,
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
mock_info = get_mock_info()
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_GATEWAY_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: TEST_MODEL,
const.CONF_MAC: TEST_MAC,
}
async def test_zeroconf_unknown_device(hass):
"""Test a failed zeroconf discovery because of a unknown device."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: "not-a-xiaomi-miio-device",
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_no_data(hass):
"""Test a failed zeroconf discovery because of no data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data={}
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_missing_data(hass):
"""Test a failed zeroconf discovery because of missing data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={zeroconf.ATTR_HOST: TEST_HOST, ZEROCONF_NAME: TEST_ZEROCONF_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_config_flow_step_device_connect_error(hass):
"""Test config flow, device connection error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_step_unknown_device(hass):
"""Test config flow, unknown device error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
mock_info = get_mock_info(model="UNKNOWN")
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {"base": "unknown_device"}
async def test_import_flow_success(hass):
"""Test a successful import form yaml for a device."""
mock_info = get_mock_info(model=const.MODELS_SWITCH[0])
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_NAME: TEST_NAME, CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: const.MODELS_SWITCH[0],
const.CONF_MAC: TEST_MAC,
}
async def config_flow_device_success(hass, model_to_test):
"""Test a successful config flow for a device (base class)."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
mock_info = get_mock_info(model=model_to_test)
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_DEVICE_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: model_to_test,
const.CONF_MAC: TEST_MAC,
}
async def zeroconf_device_success(hass, zeroconf_name_to_test, model_to_test):
"""Test a successful zeroconf discovery of a device (base class)."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: zeroconf_name_to_test,
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "form"
assert result["step_id"] == "device"
assert result["errors"] == {}
mock_info = get_mock_info(model=model_to_test)
with patch(
"homeassistant.components.xiaomi_miio.device.Device.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_DEVICE_NAME
assert result["data"] == {
const.CONF_FLOW_TYPE: const.CONF_DEVICE,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
const.CONF_MODEL: model_to_test,
const.CONF_MAC: TEST_MAC,
}
async def test_config_flow_plug_success(hass):
"""Test a successful config flow for a plug."""
test_plug_model = const.MODELS_SWITCH[0]
await config_flow_device_success(hass, test_plug_model)
async def test_zeroconf_plug_success(hass):
"""Test a successful zeroconf discovery of a plug."""
test_plug_model = const.MODELS_SWITCH[0]
test_zeroconf_name = const.MODELS_SWITCH[0].replace(".", "-")
await zeroconf_device_success(hass, test_zeroconf_name, test_plug_model)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ....core import CPUPlace, EOFException
from .... import compiler
from ....framework import Variable
from .... import io
from .... import profiler
from .... import scope_guard
from ....data_feeder import DataFeeder
from ....log_helper import get_logger
from ....reader import DataLoaderBase
from ..graph import *
from .config import ConfigFactory
import numpy as np
from collections import Iterable
import time
import os
import logging
import sys
import pickle
import functools
import traceback
__all__ = ['Context', 'Compressor']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def cached_reader(reader, sampled_rate, cache_path, cached_id):
"""
Sample partial data from reader and cache them into local file system.
Args:
reader: Iterative data source.
sampled_rate(float): The sampled rate used to sample partial data for evaluation. None means using all data in eval_reader. default: None.
cache_path(str): The path to cache the sampled data.
cached_id(int): The id of dataset sampled. Evaluations with same cached_id use the same sampled dataset. default: 0.
"""
np.random.seed(cached_id)
cache_path = os.path.join(cache_path, str(cached_id))
_logger.debug('read data from: {}'.format(cache_path))
def s_reader():
if os.path.isdir(cache_path):
for file_name in open(os.path.join(cache_path, "list")):
yield np.load(
os.path.join(cache_path, file_name.strip()),
allow_pickle=True)
else:
os.makedirs(cache_path)
list_file = open(os.path.join(cache_path, "list"), 'w')
batch = 0
dtype = None
for data in reader():
if batch == 0 or (np.random.uniform() < sampled_rate):
np.save(
os.path.join(cache_path, 'batch' + str(batch)), data)
list_file.write('batch' + str(batch) + '.npy\n')
batch += 1
yield data
return s_reader
class Context(object):
"""
The context in the process of compression.
"""
def __init__(self,
place,
scope,
train_graph=None,
train_reader=None,
eval_graph=None,
eval_reader=None,
teacher_graphs=None,
train_optimizer=None,
distiller_optimizer=None,
search_space=None):
"""
Args:
place: The device place where the compression job running.
scope: The scope used in compression job.
train_graph: The graph with loss as output node.
eval_graph: The graph used for evaluation.
eval_reader: The data reader used for evaluation.
teacher_graphs: The teacher graphs used in distillation strategies.
train_optimizer: The optimizer used to append backward ops and
optimization ops into train_graph.
distiller_optimizer: The optimizer used by distillation strategies.
"""
# The total number of epoches to be trained.
self.epoch = 0
# Current epoch
self.epoch_id = 0
# Current batch
self.batch_id = 0
self.k_v = {}
self.place = place
self.scope = scope
self.train_graph = train_graph
self.train_reader = train_reader
self.eval_graph = eval_graph
self.eval_reader = eval_reader
self.executor = None
self.teacher_graphs = teacher_graphs
self.train_optimizer = train_optimizer
self.distiller_optimizer = distiller_optimizer
self.optimize_graph = None
self.cache_path = './eval_cache'
self.eval_results = {}
self.skip_training = False
self.search_space = search_space
def to_file(self, file_name):
"""
Save the context into file.
"""
data = {}
data['epoch_id'] = self.epoch_id
data['eval_results'] = self.eval_results
with open(file_name, 'wb') as context_file:
pickle.dump(data, context_file)
def from_file(self, file_name):
"""
Load the context from file.
"""
with open(file_name, 'rb') as context_file:
if sys.version_info < (3, 0):
data = pickle.load(context_file)
else:
data = pickle.load(context_file, encoding='bytes')
self.epoch_id = data['epoch_id']
self.eval_results = data['eval_results']
def eval_converged(self, metric_name, delta=0.001):
"""
Check whether the training has been converged.
Args:
metric_name(str): The metric used to check convergence.
delta(float): '(metric[k] - metric[k-1] / metric[k-1]) < delta'
means that the training has been converged.
Returns:
bool: True means the training has been converged.
"""
# TODO(wanghaoshuang@baidu.com): enhence this method.
if (metric_name not in self.eval_results
) or len(self.eval_results[metric_name]) < 2:
return False
results = self.eval_results[metric_name][-2:]
_logger.info('Latest evaluations: {}'.format(results))
return abs(results[1] - results[0]) / results[0] < delta
def run_eval_graph(self, sampled_rate=None, cached_id=0):
"""
Evaluate the current mode in context.
Args:
sampled_rate(float): The sampled rate used to sample partial data
for evaluation. None means using all data in eval_reader. default: None.
cached_id(int): The id of dataset sampled. Evaluations with same
cached_id use the same sampled dataset. default: 0.
"""
_logger.info('Running evaluation')
assert self.eval_graph is not None
assert self.eval_reader is not None
eval_graph = self.eval_graph.clone(for_test=True)
executor = SlimGraphExecutor(self.place)
results = []
batch_id = 0
s_time = time.time()
reader = self.eval_reader
if sampled_rate:
assert (not isinstance(reader, Variable))
assert (sampled_rate > 0)
assert (self.cache_path is not None)
_logger.info('sampled_rate: {}; cached_id: {}'.format(sampled_rate,
cached_id))
reader = cached_reader(reader, sampled_rate, self.cache_path,
cached_id)
if isinstance(reader, Variable) or (
isinstance(reader, DataLoaderBase) and (not reader.iterable)):
reader.start()
try:
while True:
result = executor.run(eval_graph, self.scope)
result = [np.mean(r) for r in result]
results.append(result)
if batch_id % 20 == 0:
_logger.info("batch-{}; {}={}".format(
batch_id, eval_graph.out_nodes.keys(), result))
batch_id += 1
except EOFException:
reader.reset()
else:
for data in reader():
result = executor.run(eval_graph, self.scope, data=data)
result = [np.mean(r) for r in result]
results.append(result)
if batch_id % 20 == 0:
_logger.info("batch-{}; {}={}".format(
batch_id, eval_graph.out_nodes.keys(), result))
batch_id += 1
result = np.mean(np.array(results), axis=0)
_logger.info("Final eval result: {}={}".format(
eval_graph.out_nodes.keys(), result))
if not isinstance(result, Iterable):
result = [result]
_logger.info('Finish evaluation')
return result, eval_graph.out_nodes.keys()
def put(self, key, value):
self.k_v[key] = value
def get(self, key):
return self.k_v.get(key)
class Compressor(object):
"""
The pass used to compress model.
"""
def __init__(self,
place,
scope,
train_program,
train_reader=None,
train_feed_list=None,
train_fetch_list=None,
eval_program=None,
eval_reader=None,
eval_feed_list=None,
eval_fetch_list=None,
eval_func=None,
save_eval_model=True,
prune_infer_model=None,
teacher_programs=[],
checkpoint_path=None,
train_optimizer=None,
distiller_optimizer=None,
search_space=None,
log_period=20):
"""
Args:
place(fluid.Place): The device place where the compression job running.
scope(fluid.core.Scope): The scope used to run graph.
train_program(Program): The main program to be compressed. It must have loss op.
train_reader: The data reader used for training.
train_feed_list(dict): A dict to indicate the input variable of the training program.
The key is user-defined and human-readable name.
The value is the name of Variable.
train_fetch_list(dict): A dict to indicate the output variable of the training program.
The key is user-defined and human-readable name.
The value is the name of Variable.
eval_program(Program): The program used for evaluation.
eval_reader: The data reader used for evaluation. It can be None if eval_func is not None.
eval_feed_list(dict): A dict to indicate the input variable of the evaluation program.
The key is user-defined and human-readable name.
The value is the name of Variable.
It can be None if eval_func is not None.
eval_fetch_list(dict): A dict to indicate the output variable of the evaluation program.
The key is user-defined and human-readable name.
The value is the name of Variable.
eval_func(dict|function): Callback functions used to evaluate the compressed model.
The eval_func is a dict, the key is user-defined name and the value is
a callback function. And the score returned from callback functions
can be referenced in config file by the key of eval_func.
The args of callback function are compressed eval_program and scope which
store the compressed parameters.
Default: None.
save_eval_model(bool): Whether to save eval model when saving checkpoints. Default: True.
prune_infer_model(tuple|list): If prune_infer_model is not None, compressor will prune
eval program into inference program according to inputs and outputs
defined in prune_infer_model. prune_infer_model[0] is a list of input
variables' names and prune_infer_model[1] is a list of output variables'
names. If prune_infer_model is None, it will not save inference model.
Default: None.
teacher_programs: The teacher graphs used in distillation strategies.
train_optimizer: The optimizer used to append backward ops and
optimization ops into train_graph.
distiller_optimizer: The optimizer used by distillation strategies. In distillation strategy,
this optimizer is used to minimize the combined loss of student-net and
teacher-net while train_optimizer is used to minimize loss of
student-net in fine-tune stage.
search_space(slim.nas.SearchSpace): The instance that define the searching space. It must inherite
slim.nas.SearchSpace class and overwrite the abstract methods.
log_period(int): The period of print log of training.
"""
assert train_feed_list is None or isinstance(
train_feed_list, list
), "train_feed_list should be a list of tuple, such as [('image', image.name), ('label', gt.name)]"
assert eval_feed_list is None or isinstance(
eval_feed_list, list
), "eval_feed_list should be a list of tuple, such as [('image', image.name), ('label', gt.name)]"
self.strategies = []
self.epoch = 0
self.place = CPUPlace() if place is None else place
self.scope = scope
self.train_graph = GraphWrapper(
train_program, in_nodes=train_feed_list, out_nodes=train_fetch_list)
self.eval_graph = GraphWrapper(
eval_program, in_nodes=eval_feed_list, out_nodes=eval_fetch_list)
self.train_reader = train_reader
self.eval_reader = eval_reader
self.eval_func = eval_func
self.save_eval_model = save_eval_model
self.prune_infer_model = prune_infer_model
self.teacher_graphs = []
for teacher in teacher_programs:
self.teacher_graphs.append(GraphWrapper(teacher))
self.checkpoint = None
self.checkpoint_path = checkpoint_path
self.eval_epoch = 1
self.train_optimizer = train_optimizer
self.distiller_optimizer = distiller_optimizer
self.init_model = None
self.search_space = search_space
self.log_period = log_period
assert (log_period > 0)
def _add_strategy(self, strategy):
"""
Add a strategy to current compress pass.
Args:
strategy: The strategy to be added into current compress pass.
"""
self.strategies.append(strategy)
self.epoch = max(strategy.end_epoch, self.epoch)
def config(self, config_file):
"""
Configure the compress pass from file with yaml format.
Args:
config_file(str): The config file in local file system.
"""
factory = ConfigFactory(config_file)
self.epoch = factory.compressor['epoch']
for strategy in factory.compressor['strategies']:
self._add_strategy(strategy)
if 'checkpoint_path' in factory.compressor:
self.checkpoint_path = factory.compressor['checkpoint_path']
if 'init_model' in factory.compressor:
self.init_model = factory.compressor['init_model']
if 'eval_epoch' in factory.compressor:
self.eval_epoch = factory.compressor['eval_epoch']
assert (self.eval_epoch > 0)
def _init_model(self, context):
"""
Load model that has been compressed.
"""
if self.init_model and os.path.exists(self.init_model):
exe = SlimGraphExecutor(context.place)
with scope_guard(context.scope):
context.train_graph.load_persistables(self.init_model, exe)
flops = context.eval_graph.flops()
conv_flops = context.eval_graph.flops(only_conv=True)
context.eval_graph.update_param_shape(context.scope)
context.eval_graph.update_groups_of_conv()
_logger.info("conv flops: -{}".format(1 - float(
context.eval_graph.flops(only_conv=True)) / conv_flops))
_logger.info("total flops: -{}".format(1 - float(
context.eval_graph.flops()) / flops))
context.train_graph.update_param_shape(context.scope)
context.train_graph.update_groups_of_conv()
context.train_graph.infer_shape()
_logger.info("Init model from: {}".format(self.init_model))
def _load_checkpoint(self, context):
"""
Load checkpoints from file.
"""
_logger.debug('_load_checkpoint')
strategies = self.strategies
if self.checkpoint_path:
if not os.path.exists(self.checkpoint_path):
_logger.warning("Checkpints path doesn't exist: [{}]".format(
self.checkpoint_path))
return context, strategies
checkpoints = [
dir for dir in os.listdir(self.checkpoint_path)
if os.path.isdir(os.path.join(self.checkpoint_path, dir))
]
_logger.debug('self.checkpoint_path: {}'.format(
self.checkpoint_path))
_logger.info('checkpoints: {}'.format(checkpoints))
if len(checkpoints) > 0:
latest = max([int(ck) for ck in checkpoints])
latest_ck_path = os.path.join(self.checkpoint_path, str(latest))
model_path = os.path.join(latest_ck_path, 'model')
context_path = os.path.join(latest_ck_path, 'context')
strategy_path = os.path.join(latest_ck_path, 'strategies')
if os.path.exists(context_path):
context.from_file(context_path)
context.epoch_id += 1
if os.path.exists(strategy_path):
with open(strategy_path, 'rb') as strategy_file:
if sys.version_info < (3, 0):
strategies = pickle.load(strategy_file)
else:
strategies = pickle.load(
strategy_file, encoding='bytes')
assert (len(self.strategies) == len(strategies))
for s, s1 in zip(self.strategies, strategies):
s1.__dict__.update(s.__dict__)
for strategy in strategies:
strategy.restore_from_checkpoint(context)
if os.path.exists(model_path):
exe = SlimGraphExecutor(context.place)
with scope_guard(context.scope):
context.optimize_graph.load_persistables(model_path,
exe)
_logger.info("Loaded params from: {}".format(model_path))
return context, strategies
def _save_checkpoint(self, context):
"""
Save checkpoints to file.
"""
if context.epoch_id % 1 == 0 and self.checkpoint_path:
checkpoint_path = os.path.join(self.checkpoint_path,
str(context.epoch_id))
model_path = os.path.join(checkpoint_path, 'model')
eval_model_path = os.path.join(checkpoint_path, 'eval_model')
context_path = os.path.join(checkpoint_path, 'context')
strategy_path = os.path.join(checkpoint_path, 'strategies')
if not os.path.isdir(model_path):
os.makedirs(model_path)
exe = SlimGraphExecutor(context.place)
with scope_guard(context.scope):
context.optimize_graph.save_persistables(model_path, exe)
if self.save_eval_model:
context.eval_graph.save_model(eval_model_path, exe)
if self.prune_infer_model:
context.eval_graph.save_infer_model(
eval_model_path,
exe,
self.prune_infer_model,
program_only=self.save_eval_model)
context.to_file(context_path)
with open(strategy_path, 'wb') as strategy_file:
pickle.dump(self.strategies, strategy_file)
_logger.info('Saved checkpoint to: {}'.format(checkpoint_path))
def _train_one_epoch(self, context):
"""
Train one epoch.
"""
if context.skip_training:
return
executor = SlimGraphExecutor(self.place)
if context.optimize_graph.compiled_graph is None:
build_strategy = compiler.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
context.optimize_graph.compiled_graph = compiler.CompiledProgram(
context.optimize_graph.program).with_data_parallel(
loss_name=context.optimize_graph.out_nodes['loss'],
build_strategy=build_strategy)
if isinstance(context.train_reader, Variable) or (
isinstance(context.train_reader, DataLoaderBase) and
(not context.train_reader.iterable)):
context.train_reader.start()
try:
while True:
for strategy in self.strategies:
strategy.on_batch_begin(context)
results = executor.run(context.optimize_graph,
context.scope)
results = [float(np.mean(result)) for result in results]
if context.batch_id % self.log_period == 0:
_logger.info("epoch:{}; batch_id:{}; {} = {}".format(
context.epoch_id, context.batch_id,
context.optimize_graph.out_nodes.keys(
), [round(r, 6) for r in results]))
for strategy in self.strategies:
strategy.on_batch_end(context)
context.batch_id += 1
except EOFException:
context.train_reader.reset()
else:
for data in context.train_reader():
for strategy in self.strategies:
strategy.on_batch_begin(context)
results = executor.run(context.optimize_graph,
context.scope,
data=data)
results = [float(np.mean(result)) for result in results]
if context.batch_id % self.log_period == 0:
_logger.info("epoch:{}; batch_id:{}; {} = {}".format(
context.epoch_id, context.batch_id,
context.optimize_graph.out_nodes.keys(
), [round(r, 6) for r in results]))
for strategy in self.strategies:
strategy.on_batch_end(context)
context.batch_id += 1
context.batch_id = 0
def _eval(self, context):
"""
Runing evaluation.
"""
if self.eval_func is not None:
for key in self.eval_func:
func = self.eval_func[key]
if key not in context.eval_results:
context.eval_results[key] = []
context.eval_results[key].append(
func(self.eval_graph.program, self.scope))
else:
results, names = context.run_eval_graph()
for name, result in zip(names, results):
if name not in context.eval_results:
context.eval_results[name] = []
context.eval_results[name].append(result)
def run(self):
"""
Execute compressiong pass.
"""
context = Context(
place=self.place,
scope=self.scope,
train_graph=self.train_graph,
train_reader=self.train_reader,
eval_graph=self.eval_graph,
eval_reader=self.eval_reader,
teacher_graphs=self.teacher_graphs,
train_optimizer=self.train_optimizer,
distiller_optimizer=self.distiller_optimizer,
search_space=self.search_space)
self.context = context
if self.teacher_graphs:
context.put('teachers', self.teacher_graphs)
self._init_model(context)
if not context.optimize_graph:
if context.train_optimizer:
context.train_optimizer._name = 'train_opt'
context.optimize_graph = context.train_graph.get_optimize_graph(
context.train_optimizer, context.place, context.scope)
else:
context.optimize_graph = context.train_graph
context, self.strategies = self._load_checkpoint(context)
for strategy in self.strategies:
strategy.on_compression_begin(context)
if 'MKLDNNPostTrainingQuantStrategy' in [
i.__class__.__name__ for i in self.strategies
]:
return None
start = context.epoch_id
for epoch in range(start, self.epoch):
context.epoch_id = epoch
try:
for strategy in self.strategies:
strategy.on_epoch_begin(context)
self._train_one_epoch(context)
if self.eval_epoch and epoch % self.eval_epoch == 0:
self._eval(context)
self._save_checkpoint(context)
for strategy in self.strategies:
strategy.on_epoch_end(context)
except Exception:
_logger.error(traceback.print_exc())
continue
for strategy in self.strategies:
strategy.on_compression_end(context)
return context.eval_graph
|
|
"""
.. _tut_artifacts_correct_ica:
Artifact Correction with ICA
============================
ICA finds directions in the feature space
corresponding to projections with high non-Gaussianity. We thus obtain
a decomposition into independent components, and the artifact's contribution
is localized in only a small number of components.
These components have to be correctly identified and removed.
If EOG or ECG recordings are available, they can be used in ICA to
automatically select the corresponding artifact components from the
decomposition. To do so, you have to first build an Epoch object around
blink or heartbeat event.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import ICA
from mne.preprocessing import create_eog_epochs, create_ecg_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# 1Hz high pass is often helpful for fitting ICA
raw.filter(1., 40., n_jobs=2, fir_design='firwin')
picks_meg = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
###############################################################################
# Before applying artifact correction please learn about your actual artifacts
# by reading :ref:`tut_artifacts_detect`.
###############################################################################
# Fit ICA
# -------
#
# ICA parameters:
n_components = 25 # if float, select n_components by explained variance of PCA
method = 'fastica' # for comparison with EEGLAB try "extended-infomax" here
decim = 3 # we need sufficient statistics, not all time points -> saves time
# we will also set state of the random number generator - ICA is a
# non-deterministic algorithm, but we want to have the same decomposition
# and the same order of components each time this tutorial is run
random_state = 23
###############################################################################
# Define the ICA object instance
ica = ICA(n_components=n_components, method=method, random_state=random_state)
print(ica)
###############################################################################
# we avoid fitting ICA on crazy environmental artifacts that would
# dominate the variance and decomposition
reject = dict(mag=5e-12, grad=4000e-13)
ica.fit(raw, picks=picks_meg, decim=decim, reject=reject)
print(ica)
###############################################################################
# Plot ICA components
ica.plot_components() # can you spot some potential bad guys?
###############################################################################
# Component properties
# --------------------
#
# Let's take a closer look at properties of first three independent components.
# first, component 0:
ica.plot_properties(raw, picks=0)
###############################################################################
# we can see that the data were filtered so the spectrum plot is not
# very informative, let's change that:
ica.plot_properties(raw, picks=0, psd_args={'fmax': 35.})
###############################################################################
# we can also take a look at multiple different components at once:
ica.plot_properties(raw, picks=[1, 2], psd_args={'fmax': 35.})
###############################################################################
# Instead of opening individual figures with component properties, we can
# also pass an instance of Raw or Epochs in ``inst`` arument to
# ``ica.plot_components``. This would allow us to open component properties
# interactively by clicking on individual component topomaps. In the notebook
# this woks only when running matplotlib in interactive mode (``%matplotlib``).
# uncomment the code below to test the inteactive mode of plot_components:
# ica.plot_components(picks=range(10), inst=raw)
###############################################################################
# Advanced artifact detection
# ---------------------------
#
# Let's use a more efficient way to find artefacts
eog_average = create_eog_epochs(raw, reject=dict(mag=5e-12, grad=4000e-13),
picks=picks_meg).average()
eog_epochs = create_eog_epochs(raw, reject=reject) # get single EOG trials
eog_inds, scores = ica.find_bads_eog(eog_epochs) # find via correlation
ica.plot_scores(scores, exclude=eog_inds) # look at r scores of components
# we can see that only one component is highly correlated and that this
# component got detected by our correlation analysis (red).
ica.plot_sources(eog_average, exclude=eog_inds) # look at source time course
###############################################################################
# We can take a look at the properties of that component, now using the
# data epoched with respect to EOG events.
# We will also use a little bit of smoothing along the trials axis in the
# epochs image:
ica.plot_properties(eog_epochs, picks=eog_inds, psd_args={'fmax': 35.},
image_args={'sigma': 1.})
###############################################################################
# That component is showing a prototypical average vertical EOG time course.
#
# Pay attention to the labels, a customized read-out of the
# ``mne.preprocessing.ICA.labels_``:
print(ica.labels_)
###############################################################################
# These labels were used by the plotters and are added automatically
# by artifact detection functions. You can also manually edit them to annotate
# components.
#
# Now let's see how we would modify our signals if we removed this component
# from the data.
ica.plot_overlay(eog_average, exclude=eog_inds, show=False)
# red -> before, black -> after. Yes! We remove quite a lot!
# to definitely register this component as a bad one to be removed
# there is the ``ica.exclude`` attribute, a simple Python list
ica.exclude.extend(eog_inds)
# from now on the ICA will reject this component even if no exclude
# parameter is passed, and this information will be stored to disk
# on saving
# uncomment this for reading and writing
# ica.save('my-ica.fif')
# ica = read_ica('my-ica.fif')
###############################################################################
# Note that nothing is yet removed from the raw data. To remove the effects of
# the rejected components,
# :meth:`the apply method <mne.preprocessing.ICA.apply>` must be called.
# Here we apply it on the copy of the first ten seconds, so that the rest of
# this tutorial still works as intended.
raw_copy = raw.copy().crop(0, 10)
ica.apply(raw_copy)
raw_copy.plot() # check the result
###############################################################################
# Exercise: find and remove ECG artifacts using ICA!
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ica.plot_properties(ecg_epochs, picks=ecg_inds, psd_args={'fmax': 35.})
###############################################################################
# What if we don't have an EOG channel?
# -------------------------------------
#
# We could either:
#
# 1. make a bipolar reference from frontal EEG sensors and use as virtual EOG
# channel. This can be tricky though as you can only hope that the frontal
# EEG channels only reflect EOG and not brain dynamics in the prefrontal
# cortex.
# 2. go for a semi-automated approach, using template matching.
#
# In MNE-Python option 2 is easily achievable and it might give better results,
# so let's have a look at it.
from mne.preprocessing.ica import corrmap # noqa
###############################################################################
# The idea behind corrmap is that artefact patterns are similar across subjects
# and can thus be identified by correlating the different patterns resulting
# from each solution with a template. The procedure is therefore
# semi-automatic. :func:`mne.preprocessing.corrmap` hence takes a list of
# ICA solutions and a template, that can be an index or an array.
#
# As we don't have different subjects or runs available today, here we will
# simulate ICA solutions from different subjects by fitting ICA models to
# different parts of the same recording. Then we will use one of the components
# from our original ICA as a template in order to detect sufficiently similar
# components in the simulated ICAs.
#
# The following block of code simulates having ICA solutions from different
# runs/subjects so it should not be used in real analysis - use independent
# data sets instead.
# We'll start by simulating a group of subjects or runs from a subject
start, stop = [0, raw.times[-1]]
intervals = np.linspace(start, stop, 4, dtype=np.float)
icas_from_other_data = list()
raw.pick_types(meg=True, eeg=False) # take only MEG channels
for ii, start in enumerate(intervals):
if ii + 1 < len(intervals):
stop = intervals[ii + 1]
print('fitting ICA from {0} to {1} seconds'.format(start, stop))
this_ica = ICA(n_components=n_components, method=method).fit(
raw, start=start, stop=stop, reject=reject)
icas_from_other_data.append(this_ica)
###############################################################################
# Remember, don't do this at home! Start by reading in a collection of ICA
# solutions instead. Something like:
#
# ``icas = [mne.preprocessing.read_ica(fname) for fname in ica_fnames]``
print(icas_from_other_data)
###############################################################################
# We use our original ICA as reference.
reference_ica = ica
###############################################################################
# Investigate our reference ICA:
reference_ica.plot_components()
###############################################################################
# Which one is the bad EOG component?
# Here we rely on our previous detection algorithm. You would need to decide
# yourself if no automatic detection was available.
reference_ica.plot_sources(eog_average, exclude=eog_inds)
###############################################################################
# Indeed it looks like an EOG, also in the average time course.
#
# We construct a list where our reference run is the first element. Then we
# can detect similar components from the other runs (the other ICA objects)
# using :func:`mne.preprocessing.corrmap`. So our template must be a tuple like
# (reference_run_index, component_index):
icas = [reference_ica] + icas_from_other_data
template = (0, eog_inds[0])
###############################################################################
# Now we can run the CORRMAP algorithm.
fig_template, fig_detected = corrmap(icas, template=template, label="blinks",
show=True, threshold=.8, ch_type='mag')
###############################################################################
# Nice, we have found similar ICs from the other (simulated) runs!
# In this way, you can detect a type of artifact semi-automatically for example
# for all subjects in a study.
# The detected template can also be retrieved as an array and stored; this
# array can be used as an alternative template to
# :func:`mne.preprocessing.corrmap`.
eog_component = reference_ica.get_components()[:, eog_inds[0]]
###############################################################################
# If you calculate a new ICA solution, you can provide this array instead of
# specifying the template in reference to the list of ICA objects you want
# to run CORRMAP on. (Of course, the retrieved component map arrays can
# also be used for other purposes than artifact correction.)
#
# You can also use SSP to correct for artifacts. It is a bit simpler and
# faster but also less precise than ICA and requires that you know the event
# timing of your artifact.
# See :ref:`tut_artifacts_correct_ssp`.
|
|
import os
from git.objects import Object, Commit
from git.util import (
join_path,
join_path_native,
to_native_path_linux,
assure_directory_exists
)
from gitdb.exc import (
BadObject,
BadName
)
from gitdb.util import (
join,
dirname,
isdir,
exists,
isfile,
rename,
hex_to_bin,
LockedFD
)
from git.compat import (
string_types,
defenc
)
from .log import RefLog
__all__ = ["SymbolicReference"]
class SymbolicReference(object):
"""Represents a special case of a reference such that this reference is symbolic.
It does not point to a specific commit, but to another Head, which itself
specifies a commit.
A typical example for a symbolic reference is HEAD."""
__slots__ = ("repo", "path")
_resolve_ref_on_create = False
_points_to_commits_only = True
_common_path_default = ""
_remote_common_path_default = "refs/remotes"
_id_attribute_ = "name"
def __init__(self, repo, path):
self.repo = repo
self.path = path
def __str__(self):
return self.path
def __repr__(self):
return '<git.%s "%s">' % (self.__class__.__name__, self.path)
def __eq__(self, other):
if hasattr(other, 'path'):
return self.path == other.path
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.path)
@property
def name(self):
"""
:return:
In case of symbolic references, the shortest assumable name
is the path itself."""
return self.path
@property
def abspath(self):
return join_path_native(self.repo.git_dir, self.path)
@classmethod
def _get_packed_refs_path(cls, repo):
return join(repo.git_dir, 'packed-refs')
@classmethod
def _iter_packed_refs(cls, repo):
"""Returns an iterator yielding pairs of sha1/path pairs (as bytes) for the corresponding refs.
:note: The packed refs file will be kept open as long as we iterate"""
try:
with open(cls._get_packed_refs_path(repo), 'rt') as fp:
for line in fp:
line = line.strip()
if not line:
continue
if line.startswith('#'):
if line.startswith('# pack-refs with:') and not line.endswith('peeled'):
raise TypeError("PackingType of packed-Refs not understood: %r" % line)
# END abort if we do not understand the packing scheme
continue
# END parse comment
# skip dereferenced tag object entries - previous line was actual
# tag reference for it
if line[0] == '^':
continue
yield tuple(line.split(' ', 1))
# END for each line
except (OSError, IOError):
raise StopIteration
# END no packed-refs file handling
# NOTE: Had try-finally block around here to close the fp,
# but some python version woudn't allow yields within that.
# I believe files are closing themselves on destruction, so it is
# alright.
@classmethod
def dereference_recursive(cls, repo, ref_path):
"""
:return: hexsha stored in the reference at the given ref_path, recursively dereferencing all
intermediate references as required
:param repo: the repository containing the reference at ref_path"""
while True:
hexsha, ref_path = cls._get_ref_info(repo, ref_path)
if hexsha is not None:
return hexsha
# END recursive dereferencing
@classmethod
def _get_ref_info(cls, repo, ref_path):
"""Return: (str(sha), str(target_ref_path)) if available, the sha the file at
rela_path points to, or None. target_ref_path is the reference we
point to, or None"""
tokens = None
try:
fp = open(join(repo.git_dir, ref_path), 'rt')
value = fp.read().rstrip()
fp.close()
# Don't only split on spaces, but on whitespace, which allows to parse lines like
# 60b64ef992065e2600bfef6187a97f92398a9144 branch 'master' of git-server:/path/to/repo
tokens = value.split()
assert(len(tokens) != 0)
except (OSError, IOError):
# Probably we are just packed, find our entry in the packed refs file
# NOTE: We are not a symbolic ref if we are in a packed file, as these
# are excluded explictly
for sha, path in cls._iter_packed_refs(repo):
if path != ref_path:
continue
# sha will be used
tokens = sha, path
break
# END for each packed ref
# END handle packed refs
if tokens is None:
raise ValueError("Reference at %r does not exist" % ref_path)
# is it a reference ?
if tokens[0] == 'ref:':
return (None, tokens[1])
# its a commit
if repo.re_hexsha_only.match(tokens[0]):
return (tokens[0], None)
raise ValueError("Failed to parse reference information from %r" % ref_path)
def _get_object(self):
"""
:return:
The object our ref currently refers to. Refs can be cached, they will
always point to the actual object as it gets re-created on each query"""
# have to be dynamic here as we may be a tag which can point to anything
# Our path will be resolved to the hexsha which will be used accordingly
return Object.new_from_sha(self.repo, hex_to_bin(self.dereference_recursive(self.repo, self.path)))
def _get_commit(self):
"""
:return:
Commit object we point to, works for detached and non-detached
SymbolicReferences. The symbolic reference will be dereferenced recursively."""
obj = self._get_object()
if obj.type == 'tag':
obj = obj.object
# END dereference tag
if obj.type != Commit.type:
raise TypeError("Symbolic Reference pointed to object %r, commit was required" % obj)
# END handle type
return obj
def set_commit(self, commit, logmsg=None):
"""As set_object, but restricts the type of object to be a Commit
:raise ValueError: If commit is not a Commit object or doesn't point to
a commit
:return: self"""
# check the type - assume the best if it is a base-string
invalid_type = False
if isinstance(commit, Object):
invalid_type = commit.type != Commit.type
elif isinstance(commit, SymbolicReference):
invalid_type = commit.object.type != Commit.type
else:
try:
invalid_type = self.repo.rev_parse(commit).type != Commit.type
except (BadObject, BadName):
raise ValueError("Invalid object: %s" % commit)
# END handle exception
# END verify type
if invalid_type:
raise ValueError("Need commit, got %r" % commit)
# END handle raise
# we leave strings to the rev-parse method below
self.set_object(commit, logmsg)
return self
def set_object(self, object, logmsg=None):
"""Set the object we point to, possibly dereference our symbolic reference first.
If the reference does not exist, it will be created
:param object: a refspec, a SymbolicReference or an Object instance. SymbolicReferences
will be dereferenced beforehand to obtain the object they point to
:param logmsg: If not None, the message will be used in the reflog entry to be
written. Otherwise the reflog is not altered
:note: plain SymbolicReferences may not actually point to objects by convention
:return: self"""
if isinstance(object, SymbolicReference):
object = object.object
# END resolve references
is_detached = True
try:
is_detached = self.is_detached
except ValueError:
pass
# END handle non-existing ones
if is_detached:
return self.set_reference(object, logmsg)
# set the commit on our reference
return self._get_reference().set_object(object, logmsg)
commit = property(_get_commit, set_commit, doc="Query or set commits directly")
object = property(_get_object, set_object, doc="Return the object our ref currently refers to")
def _get_reference(self):
""":return: Reference Object we point to
:raise TypeError: If this symbolic reference is detached, hence it doesn't point
to a reference, but to a commit"""
sha, target_ref_path = self._get_ref_info(self.repo, self.path)
if target_ref_path is None:
raise TypeError("%s is a detached symbolic reference as it points to %r" % (self, sha))
return self.from_path(self.repo, target_ref_path)
def set_reference(self, ref, logmsg=None):
"""Set ourselves to the given ref. It will stay a symbol if the ref is a Reference.
Otherwise an Object, given as Object instance or refspec, is assumed and if valid,
will be set which effectively detaches the refererence if it was a purely
symbolic one.
:param ref: SymbolicReference instance, Object instance or refspec string
Only if the ref is a SymbolicRef instance, we will point to it. Everthing
else is dereferenced to obtain the actual object.
:param logmsg: If set to a string, the message will be used in the reflog.
Otherwise, a reflog entry is not written for the changed reference.
The previous commit of the entry will be the commit we point to now.
See also: log_append()
:return: self
:note: This symbolic reference will not be dereferenced. For that, see
``set_object(...)``"""
write_value = None
obj = None
if isinstance(ref, SymbolicReference):
write_value = "ref: %s" % ref.path
elif isinstance(ref, Object):
obj = ref
write_value = ref.hexsha
elif isinstance(ref, string_types):
try:
obj = self.repo.rev_parse(ref + "^{}") # optionally deref tags
write_value = obj.hexsha
except (BadObject, BadName):
raise ValueError("Could not extract object from %s" % ref)
# END end try string
else:
raise ValueError("Unrecognized Value: %r" % ref)
# END try commit attribute
# typecheck
if obj is not None and self._points_to_commits_only and obj.type != Commit.type:
raise TypeError("Require commit, got %r" % obj)
# END verify type
oldbinsha = None
if logmsg is not None:
try:
oldbinsha = self.commit.binsha
except ValueError:
oldbinsha = Commit.NULL_BIN_SHA
# END handle non-existing
# END retrieve old hexsha
fpath = self.abspath
assure_directory_exists(fpath, is_file=True)
lfd = LockedFD(fpath)
fd = lfd.open(write=True, stream=True)
fd.write(write_value.encode('ascii'))
lfd.commit()
# Adjust the reflog
if logmsg is not None:
self.log_append(oldbinsha, logmsg)
# END handle reflog
return self
# aliased reference
reference = property(_get_reference, set_reference, doc="Returns the Reference we point to")
ref = reference
def is_valid(self):
"""
:return:
True if the reference is valid, hence it can be read and points to
a valid object or reference."""
try:
self.object
except (OSError, ValueError):
return False
else:
return True
@property
def is_detached(self):
"""
:return:
True if we are a detached reference, hence we point to a specific commit
instead to another reference"""
try:
self.ref
return False
except TypeError:
return True
def log(self):
"""
:return: RefLog for this reference. Its last entry reflects the latest change
applied to this reference
.. note:: As the log is parsed every time, its recommended to cache it for use
instead of calling this method repeatedly. It should be considered read-only."""
return RefLog.from_file(RefLog.path(self))
def log_append(self, oldbinsha, message, newbinsha=None):
"""Append a logentry to the logfile of this ref
:param oldbinsha: binary sha this ref used to point to
:param message: A message describing the change
:param newbinsha: The sha the ref points to now. If None, our current commit sha
will be used
:return: added RefLogEntry instance"""
# NOTE: we use the committer of the currently active commit - this should be
# correct to allow overriding the committer on a per-commit level.
# See https://github.com/gitpython-developers/GitPython/pull/146
try:
committer_or_reader = self.commit.committer
except ValueError:
committer_or_reader = self.repo.config_reader()
# end handle newly cloned repositories
return RefLog.append_entry(committer_or_reader, RefLog.path(self), oldbinsha,
(newbinsha is None and self.commit.binsha) or newbinsha,
message)
def log_entry(self, index):
""":return: RefLogEntry at the given index
:param index: python list compatible positive or negative index
.. note:: This method must read part of the reflog during execution, hence
it should be used sparringly, or only if you need just one index.
In that case, it will be faster than the ``log()`` method"""
return RefLog.entry_at(RefLog.path(self), index)
@classmethod
def to_full_path(cls, path):
"""
:return: string with a full repository-relative path which can be used to initialize
a Reference instance, for instance by using ``Reference.from_path``"""
if isinstance(path, SymbolicReference):
path = path.path
full_ref_path = path
if not cls._common_path_default:
return full_ref_path
if not path.startswith(cls._common_path_default + "/"):
full_ref_path = '%s/%s' % (cls._common_path_default, path)
return full_ref_path
@classmethod
def delete(cls, repo, path):
"""Delete the reference at the given path
:param repo:
Repository to delete the reference from
:param path:
Short or full path pointing to the reference, i.e. refs/myreference
or just "myreference", hence 'refs/' is implied.
Alternatively the symbolic reference to be deleted"""
full_ref_path = cls.to_full_path(path)
abs_path = join(repo.git_dir, full_ref_path)
if exists(abs_path):
os.remove(abs_path)
else:
# check packed refs
pack_file_path = cls._get_packed_refs_path(repo)
try:
reader = open(pack_file_path, 'rb')
except (OSError, IOError):
pass # it didnt exist at all
else:
new_lines = list()
made_change = False
dropped_last_line = False
for line in reader:
# keep line if it is a comment or if the ref to delete is not
# in the line
# If we deleted the last line and this one is a tag-reference object,
# we drop it as well
line = line.decode(defenc)
if (line.startswith('#') or full_ref_path not in line) and \
(not dropped_last_line or dropped_last_line and not line.startswith('^')):
new_lines.append(line)
dropped_last_line = False
continue
# END skip comments and lines without our path
# drop this line
made_change = True
dropped_last_line = True
# END for each line in packed refs
reader.close()
# write the new lines
if made_change:
# write-binary is required, otherwise windows will
# open the file in text mode and change LF to CRLF !
open(pack_file_path, 'wb').writelines(l.encode(defenc) for l in new_lines)
# END write out file
# END open exception handling
# END handle deletion
# delete the reflog
reflog_path = RefLog.path(cls(repo, full_ref_path))
if os.path.isfile(reflog_path):
os.remove(reflog_path)
# END remove reflog
@classmethod
def _create(cls, repo, path, resolve, reference, force, logmsg=None):
"""internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a detached symbolic reference will be created
instead"""
full_ref_path = cls.to_full_path(path)
abs_ref_path = join(repo.git_dir, full_ref_path)
# figure out target data
target = reference
if resolve:
target = repo.rev_parse(str(reference))
if not force and isfile(abs_ref_path):
target_data = str(target)
if isinstance(target, SymbolicReference):
target_data = target.path
if not resolve:
target_data = "ref: " + target_data
existing_data = open(abs_ref_path, 'rb').read().decode(defenc).strip()
if existing_data != target_data:
raise OSError("Reference at %r does already exist, pointing to %r, requested was %r" %
(full_ref_path, existing_data, target_data))
# END no force handling
ref = cls(repo, full_ref_path)
ref.set_reference(target, logmsg)
return ref
@classmethod
def create(cls, repo, path, reference='HEAD', force=False, logmsg=None):
"""Create a new symbolic reference, hence a reference pointing to another reference.
:param repo:
Repository to create the reference in
:param path:
full path at which the new symbolic reference is supposed to be
created at, i.e. "NEW_HEAD" or "symrefs/my_new_symref"
:param reference:
The reference to which the new symbolic reference should point to.
If it is a commit'ish, the symbolic ref will be detached.
:param force:
if True, force creation even if a symbolic reference with that name already exists.
Raise OSError otherwise
:param logmsg:
If not None, the message to append to the reflog. Otherwise no reflog
entry is written.
:return: Newly created symbolic Reference
:raise OSError:
If a (Symbolic)Reference with the same name but different contents
already exists.
:note: This does not alter the current HEAD, index or Working Tree"""
return cls._create(repo, path, cls._resolve_ref_on_create, reference, force, logmsg)
def rename(self, new_path, force=False):
"""Rename self to a new path
:param new_path:
Either a simple name or a full path, i.e. new_name or features/new_name.
The prefix refs/ is implied for references and will be set as needed.
In case this is a symbolic ref, there is no implied prefix
:param force:
If True, the rename will succeed even if a head with the target name
already exists. It will be overwritten in that case
:return: self
:raise OSError: In case a file at path but a different contents already exists """
new_path = self.to_full_path(new_path)
if self.path == new_path:
return self
new_abs_path = join(self.repo.git_dir, new_path)
cur_abs_path = join(self.repo.git_dir, self.path)
if isfile(new_abs_path):
if not force:
# if they point to the same file, its not an error
if open(new_abs_path, 'rb').read().strip() != open(cur_abs_path, 'rb').read().strip():
raise OSError("File at path %r already exists" % new_abs_path)
# else: we could remove ourselves and use the otherone, but
# but clarity we just continue as usual
# END not force handling
os.remove(new_abs_path)
# END handle existing target file
dname = dirname(new_abs_path)
if not isdir(dname):
os.makedirs(dname)
# END create directory
rename(cur_abs_path, new_abs_path)
self.path = new_path
return self
@classmethod
def _iter_items(cls, repo, common_path=None):
if common_path is None:
common_path = cls._common_path_default
rela_paths = set()
# walk loose refs
# Currently we do not follow links
for root, dirs, files in os.walk(join_path_native(repo.git_dir, common_path)):
if 'refs/' not in root: # skip non-refs subfolders
refs_id = [d for d in dirs if d == 'refs']
if refs_id:
dirs[0:] = ['refs']
# END prune non-refs folders
for f in files:
if f == 'packed-refs':
continue
abs_path = to_native_path_linux(join_path(root, f))
rela_paths.add(abs_path.replace(to_native_path_linux(repo.git_dir) + '/', ""))
# END for each file in root directory
# END for each directory to walk
# read packed refs
for sha, rela_path in cls._iter_packed_refs(repo):
if rela_path.startswith(common_path):
rela_paths.add(rela_path)
# END relative path matches common path
# END packed refs reading
# return paths in sorted order
for path in sorted(rela_paths):
try:
yield cls.from_path(repo, path)
except ValueError:
continue
# END for each sorted relative refpath
@classmethod
def iter_items(cls, repo, common_path=None):
"""Find all refs in the repository
:param repo: is the Repo
:param common_path:
Optional keyword argument to the path which is to be shared by all
returned Ref objects.
Defaults to class specific portion if None assuring that only
refs suitable for the actual class are returned.
:return:
git.SymbolicReference[], each of them is guaranteed to be a symbolic
ref which is not detached and pointing to a valid ref
List is lexigraphically sorted
The returned objects represent actual subclasses, such as Head or TagReference"""
return (r for r in cls._iter_items(repo, common_path) if r.__class__ == SymbolicReference or not r.is_detached)
@classmethod
def from_path(cls, repo, path):
"""
:param path: full .git-directory-relative path name to the Reference to instantiate
:note: use to_full_path() if you only have a partial path of a known Reference Type
:return:
Instance of type Reference, Head, or Tag
depending on the given path"""
if not path:
raise ValueError("Cannot create Reference from %r" % path)
# Names like HEAD are inserted after the refs module is imported - we have an import dependency
# cycle and don't want to import these names in-function
from . import HEAD, Head, RemoteReference, TagReference, Reference
for ref_type in (HEAD, Head, RemoteReference, TagReference, Reference, SymbolicReference):
try:
instance = ref_type(repo, path)
if instance.__class__ == SymbolicReference and instance.is_detached:
raise ValueError("SymbolRef was detached, we drop it")
return instance
except ValueError:
pass
# END exception handling
# END for each type to try
raise ValueError("Could not find reference type suitable to handle path %r" % path)
def is_remote(self):
""":return: True if this symbolic reference points to a remote branch"""
return self.path.startswith(self._remote_common_path_default + "/")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.estimator_batch import estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _multiclass_train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.], [5.], [3.5], [4.6], [3.5]])
}
label = constant_op.constant([[1], [0], [0], [2], [2], [0], [1]],
dtype=dtypes.int32)
return features, label
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
_QUANTILE_REGRESSION_SIZE = 1000
def _quantile_regression_input_fns(two_dimension=False):
# The data generation is taken from
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
def g(x):
"""The function to predict."""
return x * np.cos(x)
# Training data.
x = np.atleast_2d(np.random.uniform(0, 10.0,
size=_QUANTILE_REGRESSION_SIZE)).T
x = x.astype(np.float32)
# Labels.
if not two_dimension:
y = f(x).ravel()
else:
y = np.column_stack((f(x).ravel(), g(x).ravel()))
# Add random noise.
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y_original = y.astype(np.float32)
if not two_dimension:
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
train_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=None,
shuffle=True)
# Test on the training data to make sure the predictions are calibrated.
test_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=1,
shuffle=False)
return train_input_fn, test_input_fn, y_original
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testThatLeafIndexIsInPredictions(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("leaf_index" in prediction_dict)
self.assertTrue("logits" in prediction_dict)
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
use_core_libs=True,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
model.fit(input_fn=_ranking_train_input_fn, steps=1000)
model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
model.predict(input_fn=_infer_ranking_train_input_fn)
def testDoesNotOverrideGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False)
classifier.fit(input_fn=_train_input_fn, steps=15)
# When no override of global steps, 5 steps were used.
self._assert_checkpoint(classifier.model_dir, global_step=5)
def testOverridesGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False,
override_global_step_value=10000000)
classifier.fit(input_fn=_train_input_fn, steps=15)
self._assert_checkpoint(classifier.model_dir, global_step=10000000)
def testFitAndEvaluateMultiClassTreePerClassDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 6
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=12,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
# Multi-dimensional quantile regression.
def testQuantileRegressionMultiDimLabel(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 6
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns(
two_dimension=True)
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
label_dimension=2,
num_trees=18,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
count_below_upper = np.count_nonzero(upper > y, axis=0)
count_both_below_upper = np.count_nonzero(np.prod(upper > y, axis=1))
frac_below_upper_0 = round(1. * count_below_upper[0] / len(y), 3)
frac_below_upper_1 = round(1. * count_below_upper[1] / len(y), 3)
frac_both_below_upper = round(1. * count_both_below_upper / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper_0 >= 0.92)
self.assertTrue(frac_below_upper_0 <= 0.98)
self.assertTrue(frac_below_upper_1 >= 0.92)
self.assertTrue(frac_below_upper_1 <= 0.98)
self.assertTrue(frac_both_below_upper >= 0.91)
self.assertTrue(frac_both_below_upper <= 0.99)
class CoreGradientBoostedDecisionTreeEstimators(test_util.TensorFlowTestCase):
def testTrainEvaluateInferDoesNotThrowError(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
est.evaluate(input_fn=_eval_input_fn, steps=1)
est.predict(input_fn=_eval_input_fn)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
est = estimator.CoreGradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
# Train for a few steps.
est.train(input_fn=_ranking_train_input_fn, steps=1000)
est.evaluate(input_fn=_ranking_train_input_fn, steps=1)
est.predict(input_fn=_infer_ranking_train_input_fn)
def testFitAndEvaluateMultiClassTreePerClasssDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testWeightedCategoricalColumn(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
feature_columns = [
core_feature_column.weighted_categorical_column(
categorical_column=core_feature_column
.categorical_column_with_vocabulary_list(
key="word", vocabulary_list=["the", "cat", "dog"]),
weight_feature_key="weight")
]
labels = np.array([[1], [1], [0], [0.]], dtype=np.float32)
def _make_input_fn():
def _input_fn():
features_dict = {}
# Sparse tensor representing
# example 0: "cat","the"
# examaple 1: "dog"
# example 2: -
# example 3: "the"
# Weights for the words are 5 - cat, 6- dog and 1 -the.
features_dict["word"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=constant_op.constant(["the", "cat", "dog", "the"],
dtype=dtypes.string),
dense_shape=[4, 3])
features_dict["weight"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=[1., 5., 6., 1.],
dense_shape=[4, 3])
return features_dict, labels
return _input_fn
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=feature_columns)
input_fn = _make_input_fn()
est.train(input_fn=input_fn, steps=100)
est.evaluate(input_fn=input_fn, steps=1)
est.predict(input_fn=input_fn)
# Quantile regression in core is the same as in non core estimator, so we
# just check that it does not fail.
def testQuantileRegressionDoesNotThroughException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
# 95% percentile.
model_upper = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=1,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.train(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
if __name__ == "__main__":
googletest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
from utils.det_utils import encode_annos, losses, interpre_prediction
from configs.kitti_config import config
import tensorflow.contrib.slim as slim
# slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
tf.app.flags.DEFINE_float('width_multiplier', 1.0,
'Width Multiplier, for MobileNet only.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True,
width_multiplier=FLAGS.width_multiplier)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
# gt_bboxes format [ymin, xmin, ymax, xmax]
[image, img_shape, gt_labels, gt_bboxes] = provider.get(['image', 'shape',
'object/label',
'object/bbox'])
# Preprocesing
# gt_bboxes = scale_bboxes(gt_bboxes, img_shape) # bboxes format [0,1) for tf draw
image, gt_labels, gt_bboxes = image_preprocessing_fn(image,
config.IMG_HEIGHT,
config.IMG_WIDTH,
labels=gt_labels,
bboxes=gt_bboxes,
)
#############################################
# Encode annotations for losses computation #
#############################################
# anchors format [cx, cy, w, h]
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
# encode annos, box_input format [cx, cy, w, h]
input_mask, labels_input, box_delta_input, box_input = encode_annos(gt_labels,
gt_bboxes,
anchors,
config.NUM_CLASSES)
images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input = tf.train.batch(
[image, input_mask, labels_input, box_delta_input, box_input],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input = batch_queue.dequeue()
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
end_points = network_fn(images)
end_points["viz_images"] = images
conv_ds_14 = end_points['MobileNet/conv_ds_14/depthwise_conv']
dropout = slim.dropout(conv_ds_14, keep_prob=0.5, is_training=True)
num_output = config.NUM_ANCHORS * (config.NUM_CLASSES + 1 + 4)
predict = slim.conv2d(dropout, num_output, kernel_size=(3, 3), stride=1, padding='SAME',
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.0001),
scope="MobileNet/conv_predict")
with tf.name_scope("Interpre_prediction") as scope:
pred_box_delta, pred_class_probs, pred_conf, ious, det_probs, det_boxes, det_class = \
interpre_prediction(predict, b_input_mask, anchors, b_box_input)
end_points["viz_det_probs"] = det_probs
end_points["viz_det_boxes"] = det_boxes
end_points["viz_det_class"] = det_class
with tf.name_scope("Losses") as scope:
losses(b_input_mask, b_labels_input, ious, b_box_delta_input, pred_class_probs, pred_conf, pred_box_delta)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
if end_point not in ["viz_images", "viz_det_probs", "viz_det_boxes", "viz_det_class"]:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for det result TODO(shizehao): vizulize prediction
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
variable_averages=variable_averages,
variables_to_average=moving_average_variables,
replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
total_num_replicas=FLAGS.worker_replicas)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,
name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Sub programs for doing the measurements
author : Eoin O'Farrell
email : phyoec@nus.edu.sg
last edited : July 2013
Explantion:
There are 3 variables in our instrument:
1 Temperature
2 Field
3 Device parameter; e.g. Backgate V, Topgate V, Current, Angle (one day)
Typically a measurement will fix two of these and vary the other.
The controls for temperature and field are controlled by external
services that can be called by the measurement. The measurement
invokes a localhost for each of these services and can then
access certain methods
The generic ports for these are
Magnet: 18861
Temperature: 18871
Data from these processes can also be accessed through named pipes
Device parameters are so far controlled in situ in the measurement
loop. This should probably also be changed to be consistent
"""
import rpyc
import visa as visa
import string as string
import re as re
import time
import multiprocessing
import numpy as np
import pyqtgraph as pg
import pyqtgraph.multiprocess as mp
from pyqtgraph.Qt import QtCore, QtGui
from datetime import datetime
import os
import csv
import subprocess
import shutil
import asyncore
import h5py
import VisaSubs as VisaSubs
import SrsLia as LIA
import Keithleys as keithley
import SocketUtils as SocketUtils
from itertools import cycle
def TempSocketRead(Client,OldTemp,Status):
asyncore.loop(count=1,timeout=0.001)
TString = Client.received_data
Temp = OldTemp
if TString:
TString = TString.split(",")[-1]
TString = TString.split(" ")
if len(TString)==2:
NewTemp = TString[0]
Status = TString[1]
try:
Temp = float(NewTemp)
except:
pass
return Temp, Status
def MagSocketRead(Client,OldField,Status):
asyncore.loop(count=1,timeout=0.001)
MString = Client.received_data
Field = OldField
if MString:
MString = MString.split(",")[-1]
MString = MString.split(" ")
if len(MString)==2:
NewField = MString[0]
Status = MString[1]
try:
Field = float(NewField)
except:
pass
return Field, Status
def SocketWrite(Client,Msg):
Client.to_send = Msg
asyncore.loop(count=1,timeout=0.001)
time.sleep(2)
Client.to_send = "-"
asyncore.loop(count=1,timeout=0.001)
def OpenCSVFile(FileName,StartTime,Lockins,Kths,comment = "No comment!\n"):
# Try to make a directory called Data in the CWD
CurrentDir = os.getcwd()
DataDir = "".join((CurrentDir,"\\Data"))
try:
os.mkdir(DataDir)
except OSError:
pass
# Try to make a directory with the current director name in the
# network drive
NetworkDir = "Z:\\DATA"
DirName = os.path.basename(CurrentDir)
NetDir = "".join((NetworkDir,"\\",DirName))
if not os.path.exists(NetDir):
try:
os.mkdir(NetDir)
except OSError:
pass
# Try to make a file called ...-0.dat in data else ...-1.dat etc.
i = 0
while True:
File = "".join((DataDir,"\\",FileName,"-","%d" % i,".dat"))
try:
os.stat(File)
i = i+1
pass
except OSError:
csvfile = open(File,"w")
FileWriter = csv.writer(csvfile,delimiter = ',')
break
# Write the starttime and a description of each of the instruments
FileWriter.writerow([StartTime])
for k in Kths:
csvfile.write(k.Description())
ColumnString = "".join((k.Source,", ",k.Sense))
ColumnString = "".join((ColumnString,", B (T), T (mK)"))
for k in Lockins:
csvfile.write(k.Description())
ColumnString = "".join((ColumnString,", X, Y, R, Theta"))
ColumnString = "".join((ColumnString,"\n"))
csvfile.write(comment)
csvfile.write("\n")
csvfile.write(ColumnString)
print "Writing to data file %s\n" % File
return FileWriter, File, NetDir
#################################################
############# Vg SWEEP
#################################################
def DoVgSweep(GraphProc,rpg,DataFile, Lias, Kthly,
SetField=0 ,
Start = 0, Stop = 0, Step = 1, Finish = 0.0,
Delay = 0, Samples = 1,
Timeout = -1, SetTemp = -1, ReturnData = False,
comment = "No comment!",Persist=True,
Wait = 0.0, IgnoreMagnet = False,
ReadKeithley = False, **kwargs):
# Bind to the Temperature socket
TClient = SocketUtils.SockClient('localhost', 18871)
TCurrent = "0"
TStatus = "-1" # Unset
# Bind to the Magnet socket
MClient = SocketUtils.SockClient('localhost', 18861)
Field = "0"
MStatus = "1" # Busy
# Wait for the connection
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(5)
# Set the source voltages
if "mid" in kwargs.keys():
Source = Kthly.RunSweep(Start,Stop,Step,Delay,mid=kwargs["mid"])
else:
Source = Kthly.RunSweep(Start,Stop,Step,Delay)
SetTime = datetime.now()
# Go to the set temperature and magnetic field and finish in persistent mode
if SetTemp > 0:
SocketWrite(TClient," ".join(("SET","%.2f" % SetTemp)))
print "Wrote message to temperature socket \"SET %.2f\"" % SetTemp
if not IgnoreMagnet:
SocketWrite(MClient," ".join(("SET","%.3f" % SetField,"%d" % int(not Persist))))
print "Wrote message to Magnet socket \"SET %.3f %d\"" % (SetField, int(not Persist))
time.sleep(5)
# give precedence to the magnet and wait for the timeout
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
while MStatus != "0":
print "Waiting for magnet!"
time.sleep(15)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
NowTime = datetime.now()
Remaining = Timeout*60.0 - float((NowTime-SetTime).seconds)
while (TStatus != "1") and (Remaining > 0):
NowTime = datetime.now()
Remaining = Timeout*60.0 - float((NowTime-SetTime).seconds)
print "Waiting for temperature ... time remaining = %.2f minutes" % (Remaining/60.0)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(15)
time.sleep(Wait*60.0)
# Setup L plot windows
NLias = len(Lias)
GraphWin = rpg.GraphicsWindow(title="Vg Sweep...")
PlotData = GraphProc.transfer([])
GraphWin.resize(500,150+150*NLias)
Plot = []
Curve = []
for i in range(NLias+1):
Plot.append(GraphWin.addPlot())
Curve.append(Plot[i].plot(pen='y'))
GraphWin.nextRow()
StartTime = datetime.now()
Writer, FilePath, NetDir = OpenCSVFile(DataFile,StartTime,Lias,[Kthly],comment = comment)
print "Starting keithley!"
if Start != 0:
Kthly.Ramp(Start)
else:
Kthly.SetSource(0)
if not Kthly.Output:
Kthly.SwitchOutput()
Kthly.ReadData()
print "Waiting 1 minute!"
time.sleep(60)
print "Starting measurement!"
# This is the main measurement loop
for i in xrange(len(Source)):
DataList = np.zeros((Samples,4+NLias*4))
# Set the Keithley
Kthly.SetSource(Source[i])
for j in range(Samples):
# Read the Keithley
if ReadKeithley:
Kthly.ReadData()
DataList[j,0:2] = Kthly.Data
else:
DataList[j,0] = Source[i]
DataList[j,1] = Kthly.Data[1]
# Read the magnet
if not IgnoreMagnet:
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
else:
Field = 0.0
DataList[j,2] = Field
# Read the temperature
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
DataList[j,3] = TCurrent
# Read the Lockins
for k,inst in enumerate(Lias):
inst.ReadData()
DataList[j,((k+1)*4):((k+2)*4)] = inst.Data
# Sleep
time.sleep(Delay)
#DataList = np.reshape(DataList,[Samples,len(DataList)/Samples])
# Save the data
for j in xrange(Samples):
Writer.writerow(DataList[j,:])
# Package the data and send it for plotting
XData = DataList[:,0]
YData = np.empty([Samples,NLias+1])
YData[:,1:] = DataList[:,4:NLias*4+2:4]
YData[:,0] = DataList[:,1]
# Pass data to the plots
PlotData.extend(np.hstack([np.mean(XData),np.mean(YData,0)]),_callSync = "off")
for i in range(NLias+1):
Curve[i].setData(x=PlotData[0::NLias+2],y=PlotData[i+1::NLias+2],_callSync = "off")
# We are finished, now ramp the Keithley to the finish voltage
GraphWin.close()
MClient.close()
TClient.close()
if Stop != Finish:
Kthly.Ramp(Finish)
# if the finish is zero switch it off
if Finish == 0.0:
Kthly.SwitchOutput()
if ReturnData:
DataList = [None]*(NLias+1)
DataList[0] = PlotData[0::NLias+2]
for i in range(1,NLias+1):
DataList[i]=PlotData[i+1::NLias+2]
# Copy the file to the network
time.sleep(5)
try:
shutil.copy(FilePath,NetDir)
except OSError:
pass
if ReturnData:
return FilePath, DataList
else:
return FilePath
#################################################
############# T SWEEP
#################################################
def DoTempSweep(GraphProc,rpg,DataFile,
Magnet, Lias, Kthly,
SetField=0 ,
TempStart = 0, TempFinish = 0, TempRate = 1, TempFinal =0.0,
Delay = 1, VgMeas = 0.0, FinishGate = 0.0,
Timeout = -1,
comment = "No comment!",
Persist = True, IgnoreMagnet = False,
ReadKeithley=False,**kwargs):
# Bind to the Temperature socket
TClient = SocketUtils.SockClient('localhost', 18871)
TCurrent = "0"
TStatus = "-1" # Unset
# Bind to the Magnet socket
MClient = SocketUtils.SockClient('localhost', 18861)
Field = "0"
MStatus = "1" # Busy
# Wait for the connection
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(5)
SetTime = datetime.now()
# Go to the specified field and finish in persistent mode
SocketWrite(TClient," ".join(("SET","%.2f" % TempStart)))
print "Wrote message to temperature socket \"SET %.2f\"" % TempStart
if not IgnoreMagnet:
SocketWrite(MClient," ".join(("SET","%.3f" % SetField,"%d" % int(not Persist))))
print "Wrote message to Magnet socket \"SET %.3f %d\"" % (SetField, int(not Persist))
time.sleep(5)
# give precedence to the magnet and wait for the timeout
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
while MStatus != "0":
print "Waiting for magnet!"
time.sleep(15)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
NowTime = datetime.now()
Remaining = Timeout*60.0 - float((NowTime-SetTime).seconds)
while (TStatus != "1") and (Remaining > 0):
NowTime = datetime.now()
Remaining = Timeout*60.0 - float((NowTime-SetTime).seconds)
print "Waiting for temperature ... time remaining = %.2f minutes" % (Remaining/60.0)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(15)
# Setup L plot windows
NLias = len(Lias)
NGraph = NLias
GraphWin = rpg.GraphicsWindow(title="Temperature sweep...")
PlotData = GraphProc.transfer([])
GraphWin.resize(500,200*NLias)
Plot = []
Curve = []
for i in range(NLias+1):
Plot.append(GraphWin.addPlot())
Curve.append(Plot[i].plot(pen='y'))
GraphWin.nextRow()
StartTime = datetime.now()
Writer, FilePath, NetDir = OpenCSVFile(DataFile,StartTime,Lias,[Kthly],comment = comment)
if VgMeas != 0:
Kthly.Ramp(VgMeas)
else:
Kthly.SetSource(0)
if not Kthly.Output:
Kthly.SwitchOutput()
Kthly.ReadData()
time.sleep(60)
print "Starting measurement!"
TempSocketWrite(TClient," ".join(("SWP","%.2f" % TempStart,"%.2f" % TempFinish,"%.4f" % (TempRate/60.0))))
TStatus = "2"
time.sleep(2)
# This is the main measurement loop
while TStatus == "2":
DataList = np.zeros((4+NLias*4,))
# Read the Keithley
if ReadKeithley:
Kthly.ReadData()
DataList[0:2] = Kthly.Data
# Read the magnet
if not IgnoreMagnet:
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
else:
Field = 0.0
DataList[j,2] = Field
# Read the temperature
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
DataList[3] = TCurrent
# Read the Lockins
for k,inst in enumerate(Lias):
inst.ReadData()
DataList[((k+1)*4):((k+2)*4)] = inst.Data
# Save the data
Writer.writerow(DataList)
# Package the data and send it for plotting
XData = DataList[3]
YData = np.empty([NGraph+1])
YData[1:NLias+1] = DataList[4:NLias*4+2:4]
YData[0] = DataList[1]
# Pass data to the plots
PlotData.extend(np.hstack([XData,YData]),_callSync = "off")
for i in range(NGraph+1):
Curve[i].setData(x=PlotData[0::NGraph+2],y=PlotData[i+1::NGraph+2],_callSync = "off")
# Sleep and cycle the gate if necessary
time.sleep(Delay)
Kthly.Ramp(FinishGate)
if Kthly.Output and FinishGate == 0.0:
Kthly.SwitchOutput()
TempSocketWrite(TClient," ".join(("SET","%.2f" % TempFinal)))
# Copy the file to the network
time.sleep(5)
# We are finished, now ramp the Keithley to the finish voltage
GraphWin.close()
MClient.close()
TClient.close()
try:
shutil.copy(FilePath,NetDir)
except OSError:
pass
return FilePath
#########################################################################
######## B SWEEP ###########
#####################################################
def DoBSweep(GraphProc,rpg,DataFile,
Lias, Kthly, Vg = 0,
Start = 0, Stop = 0, FinishHeater = 0, Rate = 1.6,
Delay = 1.0, Timeout = -1, SetTemp = -1, VPreRamp = [],
HeaterConst = [], CycleGate = 0, CycleDelay = 0.05,
GateStep = 0.1,
FinishGate = 0.0, ReadKeithley = False,
comment = "No comment!"):
# Bind to the Temperature socket
TClient = SocketUtils.SockClient('localhost', 18871)
TCurrent = "0"
TStatus = "-1" # Unset
# Bind to the Magnet socket
MClient = SocketUtils.SockClient('localhost', 18861)
Field = "0"
MStatus = "1" # Busy
# Wait for the connection
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(5)
# Tell the magnet daemon to go to the inital field and set the temperature
if SetTemp > 0:
SocketWrite(TClient," ".join(("SET","%.2f" % SetTemp)))
print "Wrote message to temperature socket \"SET %.2f\"" % SetTemp
if HeaterConst:
TempSocketWrite(TClient," ".join(("CST","%.2f" % HeaterConst)))
SocketWrite(MClient," ".join(("SET","%.3f" % Start,"1")))
print "Wrote message to Magnet socket \"SET %.3f 1\"" % Start
time.sleep(5)
SetTime = datetime.now()
# Wait for the temperature timeout
NowTime = datetime.now()
Remaining = Timeout*60.0 - (NowTime-SetTime).seconds
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
while TStatus == "0" and Remaining > 0:
NowTime = datetime.now()
Remaining = Timeout*60.0 - (NowTime-SetTime).seconds*1.0
print "Time remaining = %.2f minutes" % (Remaining/60.0)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
time.sleep(15)
# Wait more for the magnet if necessary
while MStatus != "0":
print "Waiting for magnet!"
time.sleep(15)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
# Turn on the Keithley and then wait for a bit
#Kthly.SetSource(0)
if VPreRamp:
for i in VPreRamp:
Kthly.Ramp(i)
Kthly.Ramp(Vg-CycleGate)
Kthly.ReadData()
if CycleGate:
GateRange = np.hstack((np.arange(Vg-CycleGate,Vg+CycleGate,GateStep),np.arange(Vg+CycleGate,Vg-CycleGate,-1*GateStep)))
GateCycle = cycle(GateRange)
# Setup L plot windows for each LIA
NLias = len(Lias)
GraphWin = rpg.GraphicsWindow(title="B Sweep...")
PlotData = GraphProc.transfer([])
GraphWin.resize(500,150+150*NLias)
Plot = []
Curve = []
for i in range(NLias+1):
Plot.append(GraphWin.addPlot())
Curve.append(Plot[i].plot(pen='y'))
GraphWin.nextRow()
time.sleep(120)
StartTime = datetime.now()
# Open the data file
Writer, FilePath, NetDir = OpenCSVFile(DataFile,StartTime,Lias,[Kthly],comment = comment)
# Start the sweep
SocketWrite(MClient," ".join(("SWP","%.3f" % Start,"%.3f" % Stop,"%d" % FinishHeater)))
while MStatus != "2":
time.sleep(1)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
#print Field
while MStatus == "2":
DataList = np.zeros((4+NLias*4,))
# Read the Keithley
if ReadKeithley:
Kthly.ReadData()
DataList[0:2] = Kthly.Data
# Read the magnet
Field, MStatus = MagSocketRead(MClient, Field, MStatus)
DataList[2] = Field
# Read the temperature
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
DataList[3] = TCurrent
# Read the Lockins
for k,inst in enumerate(Lias):
inst.ReadData()
DataList[((k+1)*4):((k+2)*4)] = inst.Data
# Save the data
Writer.writerow(DataList)
# Package the data and send it for plotting
XData = DataList[2]
YData = np.empty([NLias+1])
YData[1:NLias+1] = DataList[4:NLias*4+2:4]
YData[0] = DataList[1]
# YData[-1] = DataList[-2]
# Pass data to the plots
PlotData.extend(np.hstack([XData,YData]),_callSync = "off")
for i in range(NLias+1):
Curve[i].setData(x=PlotData[0::NLias+2],y=PlotData[i+1::NLias+2],_callSync = "off")
# Sleep and cycle the gate if necessary
if CycleGate:
LoopTime = time.time() + Delay
while True:
if time.time() > LoopTime:
break
Kthly.SetSource(GateCycle.next())
time.sleep(CycleDelay)
else:
time.sleep(Delay)
Kthly.Ramp(FinishGate)
if Kthly.Output and FinishGate == 0.0:
Kthly.SwitchOutput()
# We are finished
GraphWin.close()
MClient.close()
TClient.close()
# Copy the file to the network
time.sleep(5)
try:
shutil.copy(FilePath,NetDir)
except OSError:
pass
return FilePath
###########################################
###########################################
def Vg2D(GraphProc, rpg, DataFile,
Lias, Kthly,
SetTemp = -1,
VgStart = -10, VgStop = 10, VgStep = 1,
VgSamples = 1, VgFinish=0.0,
Delay = 0,
BStart = -1, BStop = 1, BStep = 0.25,
Timeout = -1, comment = "No comment!",
Persist=True, **kwargs):
if VgStop < VgStart:
VgSet = [VgStop, VgStart]
else:
VgSet = [VgStart, VgStop]
if "BCustom" in kwargs.keys():
BVec = kwargs["BCustom"]
else:
BVec = np.hstack((np.arange(BStart,BStop,BStep),BStop))
NLIAS = len(Lias)
Plt2DWin = [None]*NLIAS
VwBox = [None]*NLIAS
Imv = [None]*NLIAS
Z= [[] for _ in range(NLIAS)]
ZArray = [None] * NLIAS
#Exporter = [None] * NLIAS
#ImageTitle = [None] * NLIAS
for i in range(NLIAS):
Plt2DWin[i] = rpg.QtGui.QMainWindow()
Plt2DWin[i].resize(500,500)
VwBox[i] = rpg.ViewBox(invertY = True)
Imv[i] = rpg.ImageView(view=rpg.PlotItem(viewBox=VwBox[i]))
Plt2DWin[i].setCentralWidget(Imv[i])
#ImageTitle[i] = "LIA %d" % i
Plt2DWin[i].setWindowTitle("2D Plot")
Plt2DWin[i].show()
#Exporter[i] = rpg.exporters.ImageExporter.ImageExporter(Imv[i].imageItem)
X = BVec
LenB = len(X)
for i in range(len(X)):
FileName, DataList = DoVgSweep(GraphProc,
rpg,DataFile,
Lias,Kthly,
Start=VgSet[0],Stop=VgSet[1],Step=VgStep,
Samples=VgSamples,Finish = VgFinish,
Timeout=Timeout,Delay=Delay,
SetTemp=SetTemp,
SetField = X[i],
Persist = Persist,
ReturnData=True, comment = comment)
if i == 0:
Y = DataList[0]
for j in range(NLIAS):
Z[j].append(DataList[j+1])
if i >= 1:
YScale = abs(Y[-1]-Y[0])/float(len(Y))
XScale = abs(X[i]-X[0])/float(i)
#XScale = abs(i-0)/float(i)
for j in range(NLIAS):
ZArray[j] = np.array(Z[j])
# print np.shape(ZArray[i])
ZArray[j] = np.reshape(ZArray[j],(i+1,-1))
Imv[j].setImage(ZArray[j],scale=(XScale,YScale),pos=(X[0],Y[0]))
VwBox[j].autoRange()
if i == LenB-1:
# export to hdf5
outFile = h5py.File("".join((DataFile,"-%d" % j,".hdf5")),"w")
ZSet = outFile.create_dataset("Z",data=ZArray[j])
YSet = outFile.create_dataset("V",data=Y)
XSet = outFile.create_dataset("B",data=X)
outFile.close()
#Exporter[j].export("".join((ImageTitle[i],".png")))
# Finished, ramp the keithley to zero and switch it off if not done
if Finish != 0.0:
Kthly.Ramp(0)
# if the finish is zero switch it off
Kthly.SwitchOutput()
for i in range(NLIAS):
Imv[i].close()
VwBox[i].close()
Plt2DWin[i].close()
return
################################################
################################################
def DoBSeq(GraphProc,rpg,DataFile,
Lias, Kthly,
VgStart = 0, VgStop = 0, VgStep = 0,
Start = 0, Stop = 0, Rate = 1.6,
Delay = 1, Timeout = -1,
SetTemp = -1, comment = "No comment!",VPreRamp=[],
CycleGate = 0.0, GateStep = 0.1, **kwargs):
if "VCustom" in kwargs.keys():
Source = kwargs["VCustom"]
elif "mid" in kwargs.keys():
Source = Kthly.RunSweep(VgStart,VgStop,
VgStep,Delay,mid=kwargs["mid"])
else:
Source = Kthly.RunSweep(VgStart,VgStop,VgStep,Delay)
Source = np.hstack((Source,[0.0]))
# No need to swap these
#if Start > Stop:
# BLim = [Stop,Start]
#else:
BLim = [Start,Stop]
for i,VGate in enumerate(Source[:-1]):
DoBSweep(GraphProc,rpg,DataFile,
Lias, Kthly, Vg = VGate,
Start = BLim[0], Stop = BLim[1],
FinishHeater = 1, Rate = Rate,
Delay = Delay, Timeout = Timeout,
SetTemp = SetTemp, VPreRamp = VPreRamp,
CycleGate = CycleGate,
FinishGate = Source[i+1], comment = comment)
BLim = BLim[::-1]
MClient = SocketUtils.SockClient('localhost', 18861)
time.sleep(5)
SocketWrite(MClient,"SET 0.0 0")
time.sleep(5)
MClient.close()
return
################################################
################################################
def DoVISweep(GraphProc, rpg, DataFile, Magnet, KthMeas, KthGate,
Field = 0, Persist = True, VStart = 0, VStop = 0, VStep = 1e-4,
VGate=0, SetTemp=-1, ReturnData = False, Delay = 0,
Samples = 1, Timeout = -1, comment = "No comment!", **kwargs):
# Bind to the Temperature socket
TClient = SocketUtils.SockClient('localhost', 18871)
TCurrent = "0.0"
TStatus = "-1"
# Wait for the connection
time.sleep(5)
# Set the source voltages
if "VCustom" in kwargs.keys():
Source = kwargs["VCustom"]
elif "mid" in kwargs.keys():
Source = KthMeas.RunSweep(VStart,VStop,VStep,Delay,mid=kwargs["mid"])
else:
Source = KthMeas.RunSweep(VStart,VStop,VStep,Delay)
SetTime = datetime.now()
# Go to the specified field and finish in persistent mode
if SetTemp > 0:
TempSocketWrite(TClient," ".join(("SET","%.2f" % SetTemp)))
Magnet.root.MagnetGoToSet(Field, int(not Persist), rate = 2.2)
# Wait for the timeout
NowTime = datetime.now()
Remaining = Timeout*60.0 - (NowTime-SetTime).seconds
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
while TStatus == "0" and Remaining > 0:
NowTime = datetime.now()
Remaining = Timeout*60.0 - float((NowTime-SetTime).seconds)
print "Time remaining = %.2f minutes" % (Remaining/60.0)
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
time.sleep(10)
# Setup plot windows
GraphWin = rpg.GraphicsWindow(title="Vg Sweep")
PlotData = GraphProc.transfer([])
GraphWin.resize(500,500)
Plot = []
Curve = []
for i in range(2):
Plot.append(GraphWin.addPlot())
Curve.append(Plot[i].plot(pen='y'))
GraphWin.nextRow()
StartTime = datetime.now()
Writer, FilePath, NetDir = OpenCSVFile(DataFile,
StartTime,[],[KthMeas],comment = comment)
if VGate != 0:
KthGate.Ramp(VGate)
else:
KthGate.SetSource(0)
if not KthGate.Output:
KthGate.SwitchOutput()
KthMeas.Ramp(Source[0])
if not KthMeas.Output:
KthMeas.SwitchOutput()
time.sleep(10)
print "Starting measurement!"
for i in xrange(len(Source)):
DataList = []
# Set the Keithley
KthMeas.SetSource(Source[i])
for j in xrange(Samples):
# Read the Keithley
KthGate.ReadData()
DataList = np.hstack([DataList,KthGate.Data])
# Read the magnet
Field = Magnet.root.MagnetReadField()
DataList = np.hstack([DataList,Field])
# Read the temperature
TCurrent, TStatus = TempSocketRead(TClient, TCurrent, TStatus)
DataList = np.hstack([DataList,TCurrent])
# Read the Keithley
KthMeas.ReadData()
DataList = np.hstack([DataList,KthMeas.Data])
# Sleep
time.sleep(Delay)
DataList = np.reshape(DataList,[Samples,len(DataList)/Samples])
# Save the data
for j in xrange(Samples):
Writer.writerow(DataList[j,:])
# Package the data and send it for plotting
Data = DataList[:,[0,1,4,5]]
# Pass data to the plots
PlotData.extend(np.mean(Data,0),_callSync = "off")
for i in range(2):
Curve[i].setData(x=PlotData[0+2*i::4],y=PlotData[1+2*i::4],_callSync = "off")
# We are finished, now switch off the Keithley
if VStop != 0:
KthMeas.Ramp(0)
else:
KthMeas.SetSource(0)
KthMeas.SwitchOutput()
KthGate.Ramp(0)
KthGate.SwitchOutput()
# Copy the file to the network
time.sleep(5)
try:
shutil.copy(FilePath,NetDir)
except OSError:
pass
GraphWin.close()
if ReturnData:
return FilePath, DataList
else:
return FilePath
|
|
import warnings
import numpy as np
from scipy import sparse
from sklearn import datasets, svm, linear_model, base
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from nose.tools import assert_raises, assert_true, assert_false
from nose.tools import assert_equal as nose_assert_equal
from sklearn.datasets import make_classification, load_digits
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear', probability=True).fit(X, Y)
sp_clf = svm.SVC(kernel='linear', probability=True).fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
assert_array_almost_equal(clf.predict_proba(T2),
sp_clf.predict_proba(T2), 4)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear',
probability=True).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC().fit(X, Y)
sp_clf = svm.LinearSVC().fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC().fit(iris.data, iris.target)
clf = svm.LinearSVC().fit(iris.data.todense(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.todense()))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.todense(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
max_iter=1)
with warnings.catch_warnings(record=True) as foo:
sp.fit(X_sp, Y)
nose_assert_equal(len(foo), 1, msg=foo)
nose_assert_equal(foo[0].category, ConvergenceWarning,
msg=foo[0].category)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, stat, traceback, pickle, argparse
import time, datetime
import os.path
from . import environment, interpreter, mesonlib
from . import build
import platform
from . import mlog, coredata
from .mesonlib import MesonException
parser = argparse.ArgumentParser()
default_warning = '1'
def add_builtin_argument(name, **kwargs):
k = kwargs.get('dest', name.replace('-', '_'))
c = coredata.get_builtin_option_choices(k)
b = True if kwargs.get('action', None) in [ 'store_true', 'store_false' ] else False
h = coredata.get_builtin_option_description(k)
if not b:
h = h.rstrip('.') + ' (default: %s).' % coredata.get_builtin_option_default(k)
if c and not b:
kwargs['choices'] = c
parser.add_argument('--' + name, default=coredata.get_builtin_option_default(k), help=h, **kwargs)
add_builtin_argument('prefix')
add_builtin_argument('libdir')
add_builtin_argument('libexecdir')
add_builtin_argument('bindir')
add_builtin_argument('includedir')
add_builtin_argument('datadir')
add_builtin_argument('mandir')
add_builtin_argument('localedir')
add_builtin_argument('sysconfdir')
add_builtin_argument('backend')
add_builtin_argument('buildtype')
add_builtin_argument('strip', action='store_true')
add_builtin_argument('unity', action='store_true')
add_builtin_argument('werror', action='store_true')
add_builtin_argument('layout')
add_builtin_argument('default-library')
add_builtin_argument('warnlevel', dest='warning_level')
add_builtin_argument('stdsplit', action='store_false')
add_builtin_argument('errorlogs', action='store_false')
parser.add_argument('--cross-file', default=None,
help='File describing cross compilation environment.')
parser.add_argument('-D', action='append', dest='projectoptions', default=[],
help='Set project options.')
parser.add_argument('-v', '--version', action='version',
version=coredata.version)
parser.add_argument('directories', nargs='*')
class MesonApp():
def __init__(self, dir1, dir2, script_launcher, handshake, options, original_cmd_line_args):
(self.source_dir, self.build_dir) = self.validate_dirs(dir1, dir2, handshake)
if not os.path.isabs(options.prefix):
raise RuntimeError('--prefix value must be an absolute path: {!r}'.format(options.prefix))
if options.prefix.endswith('/') or options.prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(options.prefix) == 3 and options.prefix[1] == ':':
pass
else:
options.prefix = options.prefix[:-1]
self.meson_script_launcher = script_launcher
self.options = options
self.original_cmd_line_args = original_cmd_line_args
def has_build_file(self, dirname):
fname = os.path.join(dirname, environment.build_filename)
return os.path.exists(fname)
def validate_core_dirs(self, dir1, dir2):
ndir1 = os.path.abspath(dir1)
ndir2 = os.path.abspath(dir2)
if not os.path.exists(ndir1):
os.makedirs(ndir1)
if not os.path.exists(ndir2):
os.makedirs(ndir2)
if not stat.S_ISDIR(os.stat(ndir1).st_mode):
raise RuntimeError('%s is not a directory' % dir1)
if not stat.S_ISDIR(os.stat(ndir2).st_mode):
raise RuntimeError('%s is not a directory' % dir2)
if os.path.samefile(dir1, dir2):
raise RuntimeError('Source and build directories must not be the same. Create a pristine build directory.')
if self.has_build_file(ndir1):
if self.has_build_file(ndir2):
raise RuntimeError('Both directories contain a build file %s.' % environment.build_filename)
return (ndir1, ndir2)
if self.has_build_file(ndir2):
return (ndir2, ndir1)
raise RuntimeError('Neither directory contains a build file %s.' % environment.build_filename)
def validate_dirs(self, dir1, dir2, handshake):
(src_dir, build_dir) = self.validate_core_dirs(dir1, dir2)
priv_dir = os.path.join(build_dir, 'meson-private/coredata.dat')
if os.path.exists(priv_dir):
if not handshake:
msg = '''Trying to run Meson on a build directory that has already been configured.
If you want to build it, just run your build command (e.g. ninja) inside the
build directory. Meson will autodetect any changes in your setup and regenerate
itself as required.'''
raise RuntimeError(msg)
else:
if handshake:
raise RuntimeError('Something went terribly wrong. Please file a bug.')
return (src_dir, build_dir)
def check_pkgconfig_envvar(self, env):
curvar = os.environ.get('PKG_CONFIG_PATH', '')
if curvar != env.coredata.pkgconf_envvar:
mlog.warning('PKG_CONFIG_PATH has changed between invocations from "%s" to "%s".' %
(env.coredata.pkgconf_envvar, curvar))
env.coredata.pkgconf_envvar = curvar
def generate(self):
env = environment.Environment(self.source_dir, self.build_dir, self.meson_script_launcher, self.options, self.original_cmd_line_args)
mlog.initialize(env.get_log_dir())
mlog.debug('Build started at', datetime.datetime.now().isoformat())
mlog.debug('Python binary:', sys.executable)
mlog.debug('Python system:', platform.system())
mlog.log(mlog.bold('The Meson build system'))
self.check_pkgconfig_envvar(env)
mlog.log('Version:', coredata.version)
mlog.log('Source dir:', mlog.bold(self.source_dir))
mlog.log('Build dir:', mlog.bold(self.build_dir))
if env.is_cross_build():
mlog.log('Build type:', mlog.bold('cross build'))
else:
mlog.log('Build type:', mlog.bold('native build'))
b = build.Build(env)
if self.options.backend == 'ninja':
from .backend import ninjabackend
g = ninjabackend.NinjaBackend(b)
elif self.options.backend == 'vs2010':
from .backend import vs2010backend
g = vs2010backend.Vs2010Backend(b)
elif self.options.backend == 'vs2015':
from .backend import vs2015backend
g = vs2015backend.Vs2015Backend(b)
elif self.options.backend == 'xcode':
from .backend import xcodebackend
g = xcodebackend.XCodeBackend(b)
else:
raise RuntimeError('Unknown backend "%s".' % self.options.backend)
intr = interpreter.Interpreter(b, g)
if env.is_cross_build():
mlog.log('Host machine cpu family:', mlog.bold(intr.builtin['host_machine'].cpu_family_method([], {})))
mlog.log('Host machine cpu:', mlog.bold(intr.builtin['host_machine'].cpu_method([], {})))
mlog.log('Target machine cpu family:', mlog.bold(intr.builtin['target_machine'].cpu_family_method([], {})))
mlog.log('Target machine cpu:', mlog.bold(intr.builtin['target_machine'].cpu_method([], {})))
mlog.log('Build machine cpu family:', mlog.bold(intr.builtin['build_machine'].cpu_family_method([], {})))
mlog.log('Build machine cpu:', mlog.bold(intr.builtin['build_machine'].cpu_method([], {})))
intr.run()
coredata_mtime = time.time()
g.generate(intr)
g.run_postconf_scripts()
dumpfile = os.path.join(env.get_scratch_dir(), 'build.dat')
with open(dumpfile, 'wb') as f:
pickle.dump(b, f)
# Write this last since we use the existence of this file to check if
# we generated the build file successfully, so we don't want an error
# that pops up during generation, post-conf scripts, etc to cause us to
# incorrectly signal a successful meson run which will cause an error
# about an already-configured build directory when the user tries again.
#
# However, we set the mtime to an earlier value to ensure that doing an
# mtime comparison between the coredata dump and other build files
# shows the build files to be newer, not older.
env.dump_coredata(coredata_mtime)
def run_script_command(args):
cmdname = args[0]
cmdargs = args[1:]
if cmdname == 'exe':
import mesonbuild.scripts.meson_exe as abc
cmdfunc = abc.run
elif cmdname == 'test':
import mesonbuild.scripts.meson_test as abc
cmdfunc = abc.run
elif cmdname == 'benchmark':
import mesonbuild.scripts.meson_benchmark as abc
cmdfunc = abc.run
elif cmdname == 'install':
import mesonbuild.scripts.meson_install as abc
cmdfunc = abc.run
elif cmdname == 'commandrunner':
import mesonbuild.scripts.commandrunner as abc
cmdfunc = abc.run
elif cmdname == 'delsuffix':
import mesonbuild.scripts.delwithsuffix as abc
cmdfunc = abc.run
elif cmdname == 'depfixer':
import mesonbuild.scripts.depfixer as abc
cmdfunc = abc.run
elif cmdname == 'dirchanger':
import mesonbuild.scripts.dirchanger as abc
cmdfunc = abc.run
elif cmdname == 'gtkdoc':
import mesonbuild.scripts.gtkdochelper as abc
cmdfunc = abc.run
elif cmdname == 'regencheck':
import mesonbuild.scripts.regen_checker as abc
cmdfunc = abc.run
elif cmdname == 'symbolextractor':
import mesonbuild.scripts.symbolextractor as abc
cmdfunc = abc.run
elif cmdname == 'scanbuild':
import mesonbuild.scripts.scanbuild as abc
cmdfunc = abc.run
elif cmdname == 'vcstagger':
import mesonbuild.scripts.vcstagger as abc
cmdfunc = abc.run
elif cmdname == 'gettext':
import mesonbuild.scripts.gettext as abc
cmdfunc = abc.run
elif cmdname == 'yelphelper':
import mesonbuild.scripts.yelphelper as abc
cmdfunc = abc.run
else:
raise MesonException('Unknown internal command {}.'.format(cmdname))
return cmdfunc(cmdargs)
def run(mainfile, args):
if sys.version_info < (3, 3):
print('Meson works correctly only with python 3.3+.')
print('You have python %s.' % sys.version)
print('Please update your environment')
return 1
if len(args) >= 2 and args[0] == '--internal':
if args[1] != 'regenerate':
sys.exit(run_script_command(args[1:]))
args = args[2:]
handshake = True
else:
handshake = False
args = mesonlib.expand_arguments(args)
options = parser.parse_args(args)
args = options.directories
if len(args) == 0 or len(args) > 2:
# if there's a meson.build in the dir above, and not in the current
# directory, assume we're in the build directory
if len(args) == 0 and not os.path.exists('meson.build') and os.path.exists('../meson.build'):
dir1 = '..'
dir2 = '.'
else:
print('{} <source directory> <build directory>'.format(sys.argv[0]))
print('If you omit either directory, the current directory is substituted.')
print('Run {} --help for more information.'.format(sys.argv[0]))
return 1
else:
dir1 = args[0]
if len(args) > 1:
dir2 = args[1]
else:
dir2 = '.'
try:
app = MesonApp(dir1, dir2, mainfile, handshake, options, sys.argv)
except Exception as e:
# Log directory does not exist, so just print
# to stdout.
print('Error during basic setup:\n')
print(e)
return 1
try:
app.generate()
except Exception as e:
if isinstance(e, MesonException):
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
mlog.log(mlog.red('\nMeson encountered an error in file %s, line %d, column %d:' % (e.file, e.lineno, e.colno)))
else:
mlog.log(mlog.red('\nMeson encountered an error:'))
mlog.log(e)
else:
traceback.print_exc()
return 1
return 0
|
|
# Copyright (c) 2014 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from oslo_utils import netutils
import pymongo
from trove.common import cfg
from trove.common.db.mongodb import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as ds_instance
from trove.common.stream_codecs import JsonCodec, SafeYamlCodec
from trove.common import utils as utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.mongodb import system
from trove.guestagent.datastore import service
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONFIG_FILE = operating_system.file_discovery(system.CONFIG_CANDIDATES)
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mongodb'
# Configuration group for clustering-related settings.
CNF_CLUSTER = 'clustering'
MONGODB_PORT = CONF.mongodb.mongodb_port
CONFIGSVR_PORT = CONF.mongodb.configsvr_port
class MongoDBApp(object):
"""Prepares DBaaS on a Guest container."""
def __init__(self):
self.state_change_wait_time = CONF.state_change_wait_time
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(CONFIG_FILE),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self.configuration_manager = ConfigurationManager(
CONFIG_FILE, system.MONGO_USER, system.MONGO_USER,
SafeYamlCodec(default_flow_style=False),
requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
self.is_query_router = False
self.is_cluster_member = False
self.status = MongoDBAppStatus()
def install_if_needed(self, packages):
"""Prepare the guest machine with a MongoDB installation."""
LOG.info(_("Preparing Guest as MongoDB."))
if not system.PACKAGER.pkg_is_installed(packages):
LOG.debug("Installing packages: %s." % str(packages))
system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
LOG.info(_("Finished installing MongoDB server."))
def _get_service_candidates(self):
if self.is_query_router:
return system.MONGOS_SERVICE_CANDIDATES
return system.MONGOD_SERVICE_CANDIDATES
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
self._get_service_candidates(), self.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def restart(self):
self.status.restart_db_service(
self._get_service_candidates(), self.state_change_wait_time)
def start_db(self, update_db=False):
self.status.start_db_service(
self._get_service_candidates(), self.state_change_wait_time,
enable_on_boot=True, update_db=update_db)
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.configuration_manager.apply_user_override(overrides)
def remove_overrides(self):
self.configuration_manager.remove_user_override()
def start_db_with_conf_changes(self, config_contents):
LOG.info(_('Starting MongoDB with configuration changes.'))
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info(_("Initiating config."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration(
None, mount_point=system.MONGODB_MOUNT_POINT)
self.start_db(True)
def apply_initial_guestagent_configuration(
self, cluster_config, mount_point=None):
LOG.debug("Applying initial configuration.")
# Mongodb init scripts assume the PID-file path is writable by the
# database service.
# See: https://jira.mongodb.org/browse/SERVER-20075
self._initialize_writable_run_dir()
self.configuration_manager.apply_system_override(
{'processManagement.fork': False,
'processManagement.pidFilePath': system.MONGO_PID_FILE,
'systemLog.destination': 'file',
'systemLog.path': system.MONGO_LOG_FILE,
'systemLog.logAppend': True
})
if mount_point:
self.configuration_manager.apply_system_override(
{'storage.dbPath': mount_point})
if cluster_config is not None:
self._configure_as_cluster_instance(cluster_config)
else:
self._configure_network(MONGODB_PORT)
def _initialize_writable_run_dir(self):
"""Create a writable directory for Mongodb's runtime data
(e.g. PID-file).
"""
mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE)
LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir)
operating_system.create_directory(
mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER,
force=True, as_root=True)
def _configure_as_cluster_instance(self, cluster_config):
"""Configure this guest as a cluster instance and return its
new status.
"""
if cluster_config['instance_type'] == "query_router":
self._configure_as_query_router()
elif cluster_config["instance_type"] == "config_server":
self._configure_as_config_server()
elif cluster_config["instance_type"] == "member":
self._configure_as_cluster_member(
cluster_config['replica_set_name'])
else:
LOG.error(_("Bad cluster configuration; instance type "
"given as %s.") % cluster_config['instance_type'])
return ds_instance.ServiceStatuses.FAILED
if 'key' in cluster_config:
self._configure_cluster_security(cluster_config['key'])
def _configure_as_query_router(self):
LOG.info(_("Configuring instance as a cluster query router."))
self.is_query_router = True
# FIXME(pmalik): We should really have a separate configuration
# template for the 'mongos' process.
# Remove all storage configurations from the template.
# They apply only to 'mongod' processes.
# Already applied overrides will be integrated into the base file and
# their current groups removed.
config = guestagent_utils.expand_dict(
self.configuration_manager.parse_configuration())
if 'storage' in config:
LOG.debug("Removing 'storage' directives from the configuration "
"template.")
del config['storage']
self.configuration_manager.save_configuration(
guestagent_utils.flatten_dict(config))
# Apply 'mongos' configuration.
self._configure_network(MONGODB_PORT)
self.configuration_manager.apply_system_override(
{'sharding.configDB': ''}, CNF_CLUSTER)
def _configure_as_config_server(self):
LOG.info(_("Configuring instance as a cluster config server."))
self._configure_network(CONFIGSVR_PORT)
self.configuration_manager.apply_system_override(
{'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)
def _configure_as_cluster_member(self, replica_set_name):
LOG.info(_("Configuring instance as a cluster member."))
self.is_cluster_member = True
self._configure_network(MONGODB_PORT)
# we don't want these thinking they are in a replica set yet
# as that would prevent us from creating the admin user,
# so start mongo before updating the config.
# mongo will be started by the cluster taskmanager
self.start_db()
self.configuration_manager.apply_system_override(
{'replication.replSetName': replica_set_name}, CNF_CLUSTER)
def _configure_cluster_security(self, key_value):
"""Force cluster key-file-based authentication.
This will enabled RBAC.
"""
# Store the cluster member authentication key.
self.store_key(key_value)
self.configuration_manager.apply_system_override(
{'security.clusterAuthMode': 'keyFile',
'security.keyFile': self.get_key_file()}, CNF_CLUSTER)
def _configure_network(self, port=None):
"""Make the service accessible at a given (or default if not) port.
"""
instance_ip = netutils.get_my_ipv4()
bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
options = {'net.bindIp': bind_interfaces_string}
if port is not None:
guestagent_utils.update_dict({'net.port': port}, options)
self.configuration_manager.apply_system_override(options)
self.status.set_host(instance_ip, port=port)
def clear_storage(self):
mount_point = "/var/lib/mongodb/*"
LOG.debug("Clearing storage at %s." % mount_point)
try:
operating_system.remove(mount_point, force=True, as_root=True)
except exception.ProcessExecutionError:
LOG.exception(_("Error clearing storage."))
def _has_config_db(self):
value_string = self.configuration_manager.get_value(
'sharding', {}).get('configDB')
return value_string is not None
# FIXME(pmalik): This method should really be called 'set_config_servers'.
# The current name suggests it adds more config servers, but it
# rather replaces the existing ones.
def add_config_servers(self, config_server_hosts):
"""Set config servers on a query router (mongos) instance.
"""
config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT)
for host in config_server_hosts])
LOG.info(_("Setting config servers: %s") % config_servers_string)
self.configuration_manager.apply_system_override(
{'sharding.configDB': config_servers_string}, CNF_CLUSTER)
self.start_db(True)
def add_shard(self, replica_set_name, replica_set_member):
"""
This method is used by query router (mongos) instances.
"""
url = "%(rs)s/%(host)s:%(port)s"\
% {'rs': replica_set_name,
'host': replica_set_member,
'port': MONGODB_PORT}
MongoDBAdmin().add_shard(url)
def add_members(self, members):
"""
This method is used by a replica-set member instance.
"""
def check_initiate_status():
"""
This method is used to verify replica-set status.
"""
status = MongoDBAdmin().get_repl_status()
if((status["ok"] == 1) and
(status["members"][0]["stateStr"] == "PRIMARY") and
(status["myState"] == 1)):
return True
else:
return False
def check_rs_status():
"""
This method is used to verify replica-set status.
"""
status = MongoDBAdmin().get_repl_status()
primary_count = 0
if status["ok"] != 1:
return False
if len(status["members"]) != (len(members) + 1):
return False
for rs_member in status["members"]:
if rs_member["state"] not in [1, 2, 7]:
return False
if rs_member["health"] != 1:
return False
if rs_member["state"] == 1:
primary_count += 1
return primary_count == 1
MongoDBAdmin().rs_initiate()
# TODO(ramashri) see if hardcoded values can be removed
utils.poll_until(check_initiate_status, sleep_time=30,
time_out=CONF.mongodb.add_members_timeout)
# add replica-set members
MongoDBAdmin().rs_add_members(members)
# TODO(ramashri) see if hardcoded values can be removed
utils.poll_until(check_rs_status, sleep_time=10,
time_out=CONF.mongodb.add_members_timeout)
def _set_localhost_auth_bypass(self, enabled):
"""When active, the localhost exception allows connections from the
localhost interface to create the first user on the admin database.
The exception applies only when there are no users created in the
MongoDB instance.
"""
self.configuration_manager.apply_system_override(
{'setParameter': {'enableLocalhostAuthBypass': enabled}})
def list_all_dbs(self):
return MongoDBAdmin().list_database_names()
def db_data_size(self, db_name):
schema = models.MongoDBSchema(db_name)
return MongoDBAdmin().db_stats(schema.serialize())['dataSize']
def admin_cmd_auth_params(self):
return MongoDBAdmin().cmd_admin_auth_params
def get_key_file(self):
return system.MONGO_KEY_FILE
def get_key(self):
return operating_system.read_file(
system.MONGO_KEY_FILE, as_root=True).rstrip()
def store_key(self, key):
"""Store the cluster key."""
LOG.debug('Storing key for MongoDB cluster.')
operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
operating_system.chmod(system.MONGO_KEY_FILE,
operating_system.FileMode.SET_USR_RO,
as_root=True)
operating_system.chown(system.MONGO_KEY_FILE,
system.MONGO_USER, system.MONGO_USER,
as_root=True)
def store_admin_password(self, password):
LOG.debug('Storing admin password.')
creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME,
password=password)
creds.write(system.MONGO_ADMIN_CREDS_FILE)
return creds
def create_admin_user(self, password):
"""Create the admin user while the localhost exception is active."""
LOG.debug('Creating the admin user.')
creds = self.store_admin_password(password)
user = models.MongoDBUser(name='admin.%s' % creds.username,
password=creds.password)
user.roles = system.MONGO_ADMIN_ROLES
# the driver engine is already cached, but we need to change it it
with MongoDBClient(None, host='localhost',
port=MONGODB_PORT) as client:
MongoDBAdmin().create_validated_user(user, client=client)
# now revert to the normal engine
self.status.set_host(host=netutils.get_my_ipv4(),
port=MONGODB_PORT)
LOG.debug('Created admin user.')
def secure(self):
"""Create the Trove admin user.
The service should not be running at this point.
This will enable role-based access control (RBAC) by default.
"""
if self.status.is_running:
raise RuntimeError(_("Cannot secure the instance. "
"The service is still running."))
try:
self.configuration_manager.apply_system_override(
{'security.authorization': 'enabled'})
self._set_localhost_auth_bypass(True)
self.start_db(update_db=False)
password = utils.generate_random_password()
self.create_admin_user(password)
LOG.debug("MongoDB secure complete.")
finally:
self._set_localhost_auth_bypass(False)
self.stop_db()
def get_configuration_property(self, name, default=None):
"""Return the value of a MongoDB configuration property.
"""
return self.configuration_manager.get_value(name, default)
def prep_primary(self):
# Prepare the primary member of a replica set.
password = utils.generate_random_password()
self.create_admin_user(password)
self.restart()
@property
def replica_set_name(self):
return MongoDBAdmin().get_repl_status()['set']
@property
def admin_password(self):
creds = MongoDBCredentials()
creds.read(system.MONGO_ADMIN_CREDS_FILE)
return creds.password
def is_shard_active(self, replica_set_name):
shards = MongoDBAdmin().list_active_shards()
if replica_set_name in [shard['_id'] for shard in shards]:
LOG.debug('Replica set %s is active.' % replica_set_name)
return True
else:
LOG.debug('Replica set %s is not active.' % replica_set_name)
return False
class MongoDBAppStatus(service.BaseDbStatus):
def __init__(self, host='localhost', port=None):
super(MongoDBAppStatus, self).__init__()
self.set_host(host, port=port)
def set_host(self, host, port=None):
# This forces refresh of the 'pymongo' engine cached in the
# MongoDBClient class.
# Authentication is not required to check the server status.
MongoDBClient(None, host=host, port=port)
def _get_actual_db_status(self):
try:
with MongoDBClient(None) as client:
client.server_info()
return ds_instance.ServiceStatuses.RUNNING
except (pymongo.errors.ServerSelectionTimeoutError,
pymongo.errors.AutoReconnect):
return ds_instance.ServiceStatuses.SHUTDOWN
except Exception:
LOG.exception(_("Error getting MongoDB status."))
return ds_instance.ServiceStatuses.SHUTDOWN
def cleanup_stalled_db_services(self):
pid, err = utils.execute_with_timeout(system.FIND_PID, shell=True)
utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True)
class MongoDBAdmin(object):
"""Handles administrative tasks on MongoDB."""
# user is cached by making it a class attribute
admin_user = None
def _admin_user(self):
if not type(self).admin_user:
creds = MongoDBCredentials()
creds.read(system.MONGO_ADMIN_CREDS_FILE)
user = models.MongoDBUser(
'admin.%s' % creds.username,
creds.password
)
type(self).admin_user = user
return type(self).admin_user
@property
def cmd_admin_auth_params(self):
"""Returns a list of strings that constitute MongoDB command line
authentication parameters.
"""
user = self._admin_user()
return ['--username', user.username,
'--password', user.password,
'--authenticationDatabase', user.database.name]
def _create_user_with_client(self, user, client):
"""Run the add user command."""
client[user.database.name].add_user(
user.username, password=user.password, roles=user.roles
)
def create_validated_user(self, user, client=None):
"""Creates a user on their database. The caller should ensure that
this action is valid.
:param user: a MongoDBUser object
"""
LOG.debug('Creating user %s on database %s with roles %s.'
% (user.username, user.database.name, str(user.roles)))
if client:
self._create_user_with_client(user, client)
else:
with MongoDBClient(self._admin_user()) as admin_client:
self._create_user_with_client(user, admin_client)
def create_users(self, users):
"""Create the given user(s).
:param users: list of serialized user objects
"""
with MongoDBClient(self._admin_user()) as client:
for item in users:
user = models.MongoDBUser.deserialize(item)
# this could be called to create multiple users at once;
# catch exceptions, log the message, and continue
try:
user.check_create()
if self._get_user_record(user.name, client=client):
raise ValueError(_('User with name %(user)s already '
'exists.') % {'user': user.name})
self.create_validated_user(user, client=client)
except (ValueError, pymongo.errors.PyMongoError) as e:
LOG.error(e)
LOG.warning(_('Skipping creation of user with name '
'%(user)s') % {'user': user.name})
def delete_validated_user(self, user):
"""Deletes a user from their database. The caller should ensure that
this action is valid.
:param user: a MongoDBUser object
"""
LOG.debug('Deleting user %s from database %s.'
% (user.username, user.database.name))
with MongoDBClient(self._admin_user()) as admin_client:
admin_client[user.database.name].remove_user(user.username)
def delete_user(self, user):
"""Delete the given user.
:param user: a serialized user object
"""
user = models.MongoDBUser.deserialize(user)
user.check_delete()
self.delete_validated_user(user)
def _get_user_record(self, name, client=None):
"""Get the user's record."""
user = models.MongoDBUser(name)
if user.is_ignored:
LOG.warning(_('Skipping retrieval of user with reserved '
'name %(user)s') % {'user': user.name})
return None
if client:
user_info = client.admin.system.users.find_one(
{'user': user.username, 'db': user.database.name})
else:
with MongoDBClient(self._admin_user()) as admin_client:
user_info = admin_client.admin.system.users.find_one(
{'user': user.username, 'db': user.database.name})
if not user_info:
return None
user.roles = user_info['roles']
return user
def get_existing_user(self, name):
"""Check that a user exists."""
user = self._get_user_record(name)
if not user:
raise ValueError(_('User with name %(user)s does not'
'exist.') % {'user': name})
return user
def get_user(self, name):
"""Get information for the given user."""
LOG.debug('Getting user %s.' % name)
user = self._get_user_record(name)
if not user:
return None
return user.serialize()
def list_users(self, limit=None, marker=None, include_marker=False):
"""Get a list of all users."""
users = []
with MongoDBClient(self._admin_user()) as admin_client:
for user_info in admin_client.admin.system.users.find():
user = models.MongoDBUser(name=user_info['_id'])
user.roles = user_info['roles']
if not user.is_ignored:
users.append(user)
LOG.debug('users = ' + str(users))
return guestagent_utils.serialize_list(
users,
limit=limit, marker=marker, include_marker=include_marker)
def change_passwords(self, users):
with MongoDBClient(self._admin_user()) as admin_client:
for item in users:
user = models.MongoDBUser.deserialize(item)
# this could be called to create multiple users at once;
# catch exceptions, log the message, and continue
try:
user.check_create()
self.get_existing_user(user.name)
self.create_validated_user(user, admin_client)
LOG.debug('Changing password for user %(user)s'
% {'user': user.name})
self._create_user_with_client(user, admin_client)
except (ValueError, pymongo.errors.PyMongoError) as e:
LOG.error(e)
LOG.warning(_('Skipping password change for user with '
'name %(user)s') % {'user': user.name})
def update_attributes(self, name, user_attrs):
"""Update user attributes."""
user = self.get_existing_user(name)
password = user_attrs.get('password')
if password:
user.password = password
self.change_passwords([user.serialize()])
if user_attrs.get('name'):
LOG.warning(_('Changing user name is not supported.'))
if user_attrs.get('host'):
LOG.warning(_('Changing user host is not supported.'))
def enable_root(self, password=None):
"""Create a user 'root' with role 'root'."""
if not password:
LOG.debug('Generating root user password.')
password = utils.generate_random_password()
root_user = models.MongoDBUser.root(password=password)
root_user.roles = {'db': 'admin', 'role': 'root'}
root_user.check_create()
self.create_validated_user(root_user)
return root_user.serialize()
def is_root_enabled(self):
"""Check if user 'admin.root' exists."""
with MongoDBClient(self._admin_user()) as admin_client:
return bool(admin_client.admin.system.users.find_one(
{'roles.role': 'root'}
))
def _update_user_roles(self, user):
with MongoDBClient(self._admin_user()) as admin_client:
admin_client[user.database.name].add_user(
user.username, roles=user.roles
)
def grant_access(self, username, databases):
"""Adds the RW role to the user for each specified database."""
user = self.get_existing_user(username)
for db_name in databases:
# verify the database name
models.MongoDBSchema(db_name)
role = {'db': db_name, 'role': 'readWrite'}
if role not in user.roles:
LOG.debug('Adding role %s to user %s.'
% (str(role), username))
user.roles = role
else:
LOG.debug('User %s already has role %s.'
% (username, str(role)))
LOG.debug('Updating user %s.' % username)
self._update_user_roles(user)
def revoke_access(self, username, database):
"""Removes the RW role from the user for the specified database."""
user = self.get_existing_user(username)
# verify the database name
models.MongoDBSchema(database)
role = {'db': database, 'role': 'readWrite'}
LOG.debug('Removing role %s from user %s.'
% (str(role), username))
user.revoke_role(role)
LOG.debug('Updating user %s.' % username)
self._update_user_roles(user)
def list_access(self, username):
"""Returns a list of all databases for which the user has the RW role.
"""
user = self.get_existing_user(username)
return user.databases
def create_database(self, databases):
"""Forces creation of databases.
For each new database creates a dummy document in a dummy collection,
then drops the collection.
"""
tmp = 'dummy'
with MongoDBClient(self._admin_user()) as admin_client:
for item in databases:
schema = models.MongoDBSchema.deserialize(item)
schema.check_create()
LOG.debug('Creating MongoDB database %s' % schema.name)
db = admin_client[schema.name]
db[tmp].insert({'dummy': True})
db.drop_collection(tmp)
def delete_database(self, database):
"""Deletes the database."""
with MongoDBClient(self._admin_user()) as admin_client:
schema = models.MongoDBSchema.deserialize(database)
schema.check_delete()
admin_client.drop_database(schema.name)
def list_database_names(self):
"""Get the list of database names."""
with MongoDBClient(self._admin_user()) as admin_client:
return admin_client.database_names()
def list_databases(self, limit=None, marker=None, include_marker=False):
"""Lists the databases."""
databases = []
for db_name in self.list_database_names():
schema = models.MongoDBSchema(name=db_name)
if not schema.is_ignored():
databases.append(schema)
LOG.debug('databases = ' + str(databases))
return guestagent_utils.serialize_list(
databases,
limit=limit, marker=marker, include_marker=include_marker)
def add_shard(self, url):
"""Runs the addShard command."""
with MongoDBClient(self._admin_user()) as admin_client:
admin_client.admin.command({'addShard': url})
def get_repl_status(self):
"""Runs the replSetGetStatus command."""
with MongoDBClient(self._admin_user()) as admin_client:
status = admin_client.admin.command('replSetGetStatus')
LOG.debug('Replica set status: %s' % status)
return status
def rs_initiate(self):
"""Runs the replSetInitiate command."""
with MongoDBClient(self._admin_user()) as admin_client:
return admin_client.admin.command('replSetInitiate')
def rs_add_members(self, members):
"""Adds the given members to the replication set."""
with MongoDBClient(self._admin_user()) as admin_client:
# get the current config, add the new members, then save it
config = admin_client.admin.command('replSetGetConfig')['config']
config['version'] += 1
next_id = max([m['_id'] for m in config['members']]) + 1
for member in members:
config['members'].append({'_id': next_id, 'host': member})
next_id += 1
admin_client.admin.command('replSetReconfig', config)
def db_stats(self, database, scale=1):
"""Gets the stats for the given database."""
with MongoDBClient(self._admin_user()) as admin_client:
db_name = models.MongoDBSchema.deserialize(database).name
return admin_client[db_name].command('dbStats', scale=scale)
def list_active_shards(self):
"""Get a list of shards active in this cluster."""
with MongoDBClient(self._admin_user()) as admin_client:
return [shard for shard in admin_client.config.shards.find()]
class MongoDBClient(object):
"""A wrapper to manage a MongoDB connection."""
# engine information is cached by making it a class attribute
engine = {}
def __init__(self, user, host=None, port=None):
"""Get the client. Specifying host and/or port updates cached values.
:param user: MongoDBUser instance used to authenticate
:param host: server address, defaults to localhost
:param port: server port, defaults to 27017
:return:
"""
new_client = False
self._logged_in = False
if not type(self).engine:
# no engine cached
type(self).engine['host'] = (host if host else 'localhost')
type(self).engine['port'] = (port if port else MONGODB_PORT)
new_client = True
elif host or port:
LOG.debug("Updating MongoDB client.")
if host:
type(self).engine['host'] = host
if port:
type(self).engine['port'] = port
new_client = True
if new_client:
host = type(self).engine['host']
port = type(self).engine['port']
LOG.debug("Creating MongoDB client to %(host)s:%(port)s."
% {'host': host, 'port': port})
type(self).engine['client'] = pymongo.MongoClient(host=host,
port=port,
connect=False)
self.session = type(self).engine['client']
if user:
db_name = user.database.name
LOG.debug("Authenticating MongoDB client on %s." % db_name)
self._db = self.session[db_name]
self._db.authenticate(user.username, password=user.password)
self._logged_in = True
def __enter__(self):
return self.session
def __exit__(self, exc_type, exc_value, traceback):
LOG.debug("Disconnecting from MongoDB.")
if self._logged_in:
self._db.logout()
self.session.close()
class MongoDBCredentials(object):
"""Handles storing/retrieving credentials. Stored as json in files."""
def __init__(self, username=None, password=None):
self.username = username
self.password = password
def read(self, filename):
credentials = operating_system.read_file(filename, codec=JsonCodec())
self.username = credentials['username']
self.password = credentials['password']
def write(self, filename):
credentials = {'username': self.username,
'password': self.password}
operating_system.write_file(filename, credentials, codec=JsonCodec())
operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW)
|
|
import json
from dateutil.parser import parse as parse_datetime
from dateutil import tz
from datetime import datetime
import itertools
import logging
import urllib
import urlparse
from flask import render_template, redirect, session, make_response, url_for, g, request, abort, \
Response
from google.appengine.api import taskqueue, urlfetch, users
import feedparser
from sparkprs import app, cache
from sparkprs.models import Issue, JIRAIssue, KVS, User
from sparkprs.github_api import raw_github_request, github_request, ISSUES_BASE, BASE_AUTH_URL
from link_header import parse as parse_link_header
# --------- Authentication and admin panel functionality -----------------------------------------#
@app.before_request
def before_request():
g.user = None
if 'github_login' in session:
g.user = User.query(User.github_login == session['github_login']).get()
@app.route('/github-callback')
def github_authorized_callback():
# This is based loosely on https://github.com/cenkalti/github-flask
# and http://stackoverflow.com/a/22275563
if 'code' not in request.args:
raise Exception("Got error from GitHub")
next_url = request.args.get('next') or url_for('main')
payload = {
'code': request.args.get('code'),
'client_id': app.config['GITHUB_CLIENT_ID'],
'client_secret': app.config['GITHUB_CLIENT_SECRET'],
}
auth_url = BASE_AUTH_URL + 'access_token'
logging.info("Auth url is %s" % auth_url)
response = urlfetch.fetch(auth_url, method=urlfetch.POST, payload=urllib.urlencode(payload),
validate_certificate=True)
if response.status_code != 200:
raise Exception("Got %i response from GitHub:\n%s" %
(response.status_code, response.content))
data = urlparse.parse_qs(response.content)
access_token = data.get('access_token', None)
if access_token is None:
return redirect(next_url)
access_token = access_token[0].decode('ascii')
user_json = json.loads(github_request("user", oauth_token=access_token).content)
user = User.query(User.github_login == user_json['login']).get()
if user is None:
user = User(github_login=user_json['login'])
user.github_user_json = user_json
user.github_access_token = access_token
user.put()
session['github_login'] = user.github_login
return redirect(url_for('main'))
@app.route('/login')
def login():
query = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'redirect_uri': app.config['GITHUB_CALLBACK_URL'],
}
auth_url = BASE_AUTH_URL + 'authorize?' + urllib.urlencode(query)
return redirect(auth_url)
@app.route('/logout')
def logout():
session.pop('github_login', None)
return redirect(url_for('main'))
@app.route('/appengine-admin-login')
def appengine_admin_login():
return redirect(users.create_login_url("/"))
@app.route('/appengine-admin-logout')
def appengine_admin_logout():
return redirect(users.create_logout_url("/"))
@app.route('/user-info')
def user_info():
"""
Returns JSON describing the currently-signed-in user.
"""
if g.user:
user_dict = {
'github_login': g.user.github_login,
'roles': g.user.roles,
}
else:
user_dict = None
return Response(json.dumps(user_dict, indent=2, separators=(',', ': ')),
mimetype='application/json')
# --------- Task queue and cron jobs -------------------------------------------------------------#
@app.route("/tasks/update-github-prs")
def update_github_prs():
def fetch_and_process(url):
logging.debug("Following url %s" % url)
response = raw_github_request(url, oauth_token=app.config['GITHUB_OAUTH_KEY'])
link_header = parse_link_header(response.headers.get('Link', ''))
prs = json.loads(response.content)
now = datetime.utcnow()
for pr in prs:
updated_at = \
parse_datetime(pr['updated_at']).astimezone(tz.tzutc()).replace(tzinfo=None)
is_fresh = (now - updated_at).total_seconds() < app.config['FRESHNESS_THRESHOLD']
queue_name = ("fresh-prs" if is_fresh else "old-prs")
taskqueue.add(url="/tasks/update-github-pr/%i" % pr['number'], queue_name=queue_name)
for link in link_header.links:
if link.rel == 'next':
fetch_and_process(link.href)
last_update_time = KVS.get("issues_since")
url = ISSUES_BASE + "?sort=updated&state=all&per_page=100"
if last_update_time:
url += "&since=%s" % last_update_time
fetch_and_process(url)
KVS.put('issues_since', datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"))
return "Done fetching updated GitHub issues"
@app.route("/tasks/update-github-pr/<int:number>", methods=['GET', 'POST'])
def update_pr(number):
Issue.get_or_create(number).update(app.config['GITHUB_OAUTH_KEY'])
return "Done updating pull request %i" % number
@app.route("/tasks/update-jira-issues")
def update_jira_issues():
feed_url = "%s/activity?maxResults=20&streams=key+IS+%s&providers=issues" % \
(app.config['JIRA_API_BASE'], app.config['JIRA_PROJECT'])
feed = feedparser.parse(feed_url)
# To avoid double-processing of RSS feed entries, only process entries that are newer than
# the watermark set during the last refresh:
last_watermark = KVS.get("jira_sync_watermark")
if last_watermark is not None:
new_entries = [i for i in feed.entries if i.published_parsed > last_watermark]
else:
new_entries = feed.entries
if not new_entries:
return "No new entries to update since last watermark " + str(last_watermark)
issue_ids = set(i.link.split('/')[-1] for i in new_entries)
for issue in issue_ids:
taskqueue.add(url="/tasks/update-jira-issue/" + issue, queue_name='jira-issues')
KVS.put('jira_sync_watermark', new_entries[0].published_parsed)
return "Queued JIRA issues for update: " + str(issue_ids)
@app.route("/tasks/update-jira-issues-for-all-open-prs")
def update_all_jiras_for_open_prs():
"""
Used to bulk-load information from JIRAs for all open PRs. Useful when upgrading
from an earlier version of spark-prs.
"""
prs = Issue.query(Issue.state == "open").order(-Issue.updated_at).fetch()
jira_issues = set(itertools.chain.from_iterable(pr.parsed_title['jiras'] for pr in prs))
for issue in jira_issues:
taskqueue.add(url="/tasks/update-jira-issue/SPARK-%i" % issue, queue_name='jira-issues')
return "Queued JIRA issues for update: " + str(jira_issues)
@app.route("/tasks/update-jira-issue/<string:issue_id>", methods=['GET', 'POST'])
def update_jira_issue(issue_id):
JIRAIssue.get_or_create(issue_id).update()
return "Done updating JIRA issue %s" % issue_id
# --------- User-facing pages --------------------------------------------------------------------#
def build_response(template, max_age=60):
rendered = render_template(template)
response = make_response(rendered)
response.cache_control.max_age = max_age
return response
@app.route('/search-open-prs')
@cache.cached(timeout=60)
def search_open_prs():
prs = Issue.query(Issue.state == "open").order(-Issue.updated_at).fetch()
json_dicts = []
for pr in prs:
last_jenkins_comment_dict = None
if pr.last_jenkins_comment:
last_jenkins_comment_dict = {
'body': pr.last_jenkins_comment['body'],
'user': {'login': pr.last_jenkins_comment['user']['login']},
'html_url': pr.last_jenkins_comment['html_url'],
}
d = {
'parsed_title': pr.parsed_title,
'number': pr.number,
'updated_at': str(pr.updated_at),
'user': pr.user,
'state': pr.state,
'components': pr.components,
'lines_added': pr.lines_added,
'lines_deleted': pr.lines_deleted,
'lines_changed': pr.lines_changed,
'is_mergeable': pr.is_mergeable,
'commenters': [{'username': u, 'data': d} for (u, d) in pr.commenters],
'last_jenkins_outcome': pr.last_jenkins_outcome,
'last_jenkins_comment': last_jenkins_comment_dict,
}
# Use the first JIRA's information to populate the "Priority" and "Issue Type" columns:
jiras = pr.parsed_title["jiras"]
if jiras:
first_jira = JIRAIssue.get_by_id("SPARK-%i" % jiras[0])
if first_jira:
d['jira_priority_name'] = first_jira.priority_name
d['jira_priority_icon_url'] = first_jira.priority_icon_url
d['jira_issuetype_name'] = first_jira.issuetype_name
d['jira_issuetype_icon_url'] = first_jira.issuetype_icon_url
json_dicts.append(d)
response = Response(json.dumps(json_dicts), mimetype='application/json')
return response
@app.route("/trigger-jenkins/<int:number>", methods=['GET', 'POST'])
def test_pr(number):
"""
Triggers a parametrized Jenkins build for testing Spark pull requests.
"""
if not (g.user and g.user.has_capability("jenkins")):
return abort(403)
pr = Issue.get_or_create(number)
commit = pr.pr_json["head"]["sha"]
target_branch = pr.pr_json["base"]["ref"]
# The parameter names here were chosen to match the ones used by Jenkins' GitHub pull request
# builder plugin: https://wiki.jenkins-ci.org/display/JENKINS/Github+pull+request+builder+plugin
# In the Spark repo, the https://github.com/apache/spark/blob/master/dev/run-tests-jenkins
# script reads these variables when posting pull request feedback.
query = {
'token': app.config['JENKINS_PRB_TOKEN'],
'ghprbPullId': number,
'ghprbActualCommit': commit,
'ghprbTargetBranch': target_branch,
# This matches the Jenkins plugin's logic; see
# https://github.com/jenkinsci/ghprb-plugin/blob/master/src/main/java/org/jenkinsci/plugins/ghprb/GhprbTrigger.java#L146
#
# It looks like origin/pr/*/merge ref points to the last successful test merge commit that
# GitHub generates when it checks for mergeability. This API technically isn't documented,
# but enough plugins seem to rely on it that it seems unlikely to change anytime soon
# (if it does, we can always upgrade our tests to perform the merge ourselves).
#
# See also: https://developer.github.com/changes/2013-04-25-deprecating-merge-commit-sha/
'sha1': ("origin/pr/%i/merge" % number) if pr.is_mergeable else commit,
}
trigger_url = "%sbuildWithParameters?%s" % (app.config["JENKINS_PRB_JOB_URL"],
urllib.urlencode(query))
logging.debug("Triggering Jenkins with url %s" % trigger_url)
response = urlfetch.fetch(trigger_url, method="POST")
if response.status_code not in (200, 201):
logging.error("Jenkins responded with status code %i" % response.status_code)
return response.content
else:
return redirect(app.config["JENKINS_PRB_JOB_URL"])
@app.route("/admin/add-role", methods=['POST'])
def add_role():
if not g.user or "admin" not in g.user.roles:
return abort(403)
user = User.query(User.github_login == request.form["username"]).get()
if user is None:
user = User(github_login=request.form["username"])
role = request.form["role"]
if role not in user.roles:
user.roles.append(role)
user.put()
return "Updated user %s; now has roles %s" % (user.github_login, user.roles)
@app.route('/admin')
def admin_panel():
if not g.user or "admin" not in g.user.roles:
return abort(403)
return build_response('admin.html')
@app.route('/')
@app.route('/open-prs')
@app.route('/users')
@app.route('/users/<username>')
def main(username=None):
return build_response('index.html')
|
|
"""
Type variables for Parametric polymorphism.
Cretonne instructions and instruction transformations can be specified to be
polymorphic by using type variables.
"""
from __future__ import absolute_import
import math
from . import types, is_power_of_two
from copy import copy
try:
from typing import Tuple, Union, Iterable, Any, Set, TYPE_CHECKING # noqa
if TYPE_CHECKING:
from srcgen import Formatter # noqa
Interval = Tuple[int, int]
# An Interval where `True` means 'everything'
BoolInterval = Union[bool, Interval]
# Set of special types: None, False, True, or iterable.
SpecialSpec = Union[bool, Iterable[types.SpecialType]]
except ImportError:
pass
MAX_LANES = 256
MAX_BITS = 64
MAX_BITVEC = MAX_BITS * MAX_LANES
def int_log2(x):
# type: (int) -> int
return int(math.log(x, 2))
def intersect(a, b):
# type: (Interval, Interval) -> Interval
"""
Given two `(min, max)` inclusive intervals, compute their intersection.
Use `(None, None)` to represent the empty interval on input and output.
"""
if a[0] is None or b[0] is None:
return (None, None)
lo = max(a[0], b[0])
assert lo is not None
hi = min(a[1], b[1])
assert hi is not None
if lo <= hi:
return (lo, hi)
else:
return (None, None)
def is_empty(intv):
# type: (Interval) -> bool
return intv is None or intv is False or intv == (None, None)
def encode_bitset(vals, size):
# type: (Iterable[int], int) -> int
"""
Encode a set of values (each between 0 and size) as a bitset of width size.
"""
res = 0
assert is_power_of_two(size) and size <= 64
for v in vals:
assert 0 <= v and v < size
res |= 1 << v
return res
def pp_set(s):
# type: (Iterable[Any]) -> str
"""
Return a consistent string representation of a set (ordering is fixed)
"""
return '{' + ', '.join([repr(x) for x in sorted(s)]) + '}'
def decode_interval(intv, full_range, default=None):
# type: (BoolInterval, Interval, int) -> Interval
"""
Decode an interval specification which can take the following values:
True
Use the `full_range`.
`False` or `None`
An empty interval
(lo, hi)
An explicit interval
"""
if isinstance(intv, tuple):
# mypy bug here: 'builtins.None' object is not iterable
lo, hi = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
assert lo >= full_range[0]
assert hi <= full_range[1]
return intv
if intv:
return full_range
else:
return (default, default)
def interval_to_set(intv):
# type: (Interval) -> Set
if is_empty(intv):
return set()
(lo, hi) = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
return set([2**i for i in range(int_log2(lo), int_log2(hi)+1)])
def legal_bool(bits):
# type: (int) -> bool
"""
True iff bits is a legal bit width for a bool type.
bits == 1 || bits \in { 8, 16, .. MAX_BITS }
"""
return bits == 1 or \
(bits >= 8 and bits <= MAX_BITS and is_power_of_two(bits))
class TypeSet(object):
"""
A set of types.
We don't allow arbitrary subsets of types, but use a parametrized approach
instead.
Objects of this class can be used as dictionary keys.
Parametrized type sets are specified in terms of ranges:
- The permitted range of vector lanes, where 1 indicates a scalar type.
- The permitted range of integer types.
- The permitted range of floating point types, and
- The permitted range of boolean types.
The ranges are inclusive from smallest bit-width to largest bit-width.
A typeset representing scalar integer types `i8` through `i32`:
>>> TypeSet(ints=(8, 32))
TypeSet(lanes={1}, ints={8, 16, 32})
Passing `True` instead of a range selects all available scalar types:
>>> TypeSet(ints=True)
TypeSet(lanes={1}, ints={8, 16, 32, 64})
>>> TypeSet(floats=True)
TypeSet(lanes={1}, floats={32, 64})
>>> TypeSet(bools=True)
TypeSet(lanes={1}, bools={1, 8, 16, 32, 64})
Similarly, passing `True` for the lanes selects all possible scalar and
vector types:
>>> TypeSet(lanes=True, ints=True)
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={8, 16, 32, 64})
Finally, a type set can contain special types (derived from `SpecialType`)
which can't appear as lane types.
:param lanes: `(min, max)` inclusive range of permitted vector lane counts.
:param ints: `(min, max)` inclusive range of permitted scalar integer
widths.
:param floats: `(min, max)` inclusive range of permitted scalar floating
point widths.
:param bools: `(min, max)` inclusive range of permitted scalar boolean
widths.
:param bitvecs : `(min, max)` inclusive range of permitted bitvector
widths.
:param specials: Sequence of special types to appear in the set.
"""
def __init__(
self,
lanes=None, # type: BoolInterval
ints=None, # type: BoolInterval
floats=None, # type: BoolInterval
bools=None, # type: BoolInterval
bitvecs=None, # type: BoolInterval
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.lanes = interval_to_set(decode_interval(lanes, (1, MAX_LANES), 1))
self.ints = interval_to_set(decode_interval(ints, (8, MAX_BITS)))
self.floats = interval_to_set(decode_interval(floats, (32, 64)))
self.bools = interval_to_set(decode_interval(bools, (1, MAX_BITS)))
self.bools = set(filter(legal_bool, self.bools))
self.bitvecs = interval_to_set(decode_interval(bitvecs,
(1, MAX_BITVEC)))
# Allow specials=None, specials=True, specials=(...)
self.specials = set() # type: Set[types.SpecialType]
if isinstance(specials, bool):
if specials:
self.specials = set(types.ValueType.all_special_types)
elif specials:
self.specials = set(specials)
def copy(self):
# type: (TypeSet) -> TypeSet
"""
Return a copy of our self.
"""
n = TypeSet()
n.lanes = copy(self.lanes)
n.ints = copy(self.ints)
n.floats = copy(self.floats)
n.bools = copy(self.bools)
n.bitvecs = copy(self.bitvecs)
n.specials = copy(self.specials)
return n
def typeset_key(self):
# type: () -> Tuple[Tuple, Tuple, Tuple, Tuple, Tuple, Tuple]
"""Key tuple used for hashing and equality."""
return (tuple(sorted(list(self.lanes))),
tuple(sorted(list(self.ints))),
tuple(sorted(list(self.floats))),
tuple(sorted(list(self.bools))),
tuple(sorted(list(self.bitvecs))),
tuple(sorted(s.name for s in self.specials)))
def __hash__(self):
# type: () -> int
h = hash(self.typeset_key())
assert h == getattr(self, 'prev_hash', h), "TypeSet changed!"
self.prev_hash = h
return h
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, TypeSet):
return self.typeset_key() == other.typeset_key()
else:
return False
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
def __repr__(self):
# type: () -> str
s = 'TypeSet(lanes={}'.format(pp_set(self.lanes))
if len(self.ints) > 0:
s += ', ints={}'.format(pp_set(self.ints))
if len(self.floats) > 0:
s += ', floats={}'.format(pp_set(self.floats))
if len(self.bools) > 0:
s += ', bools={}'.format(pp_set(self.bools))
if len(self.bitvecs) > 0:
s += ', bitvecs={}'.format(pp_set(self.bitvecs))
if len(self.specials) > 0:
s += ', specials=[{}]'.format(pp_set(self.specials))
return s + ')'
def emit_fields(self, fmt):
# type: (Formatter) -> None
"""Emit field initializers for this typeset."""
assert len(self.bitvecs) == 0, "Bitvector types are not emitable."
fmt.comment(repr(self))
fields = (('lanes', 16),
('ints', 8),
('floats', 8),
('bools', 8))
for (field, bits) in fields:
vals = [int_log2(x) for x in getattr(self, field)]
fmt.line('{}: BitSet::<u{}>({}),'
.format(field, bits, encode_bitset(vals, bits)))
def __iand__(self, other):
# type: (TypeSet) -> TypeSet
"""
Intersect self with other type set.
>>> a = TypeSet(lanes=True, ints=(16, 32))
>>> a
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={16, 32})
>>> b = TypeSet(lanes=(4, 16), ints=True)
>>> a &= b
>>> a
TypeSet(lanes={4, 8, 16}, ints={16, 32})
>>> a = TypeSet(lanes=True, bools=(1, 8))
>>> b = TypeSet(lanes=True, bools=(16, 32))
>>> a &= b
>>> a
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256})
"""
self.lanes.intersection_update(other.lanes)
self.ints.intersection_update(other.ints)
self.floats.intersection_update(other.floats)
self.bools.intersection_update(other.bools)
self.bitvecs.intersection_update(other.bitvecs)
self.specials.intersection_update(other.specials)
return self
def issubset(self, other):
# type: (TypeSet) -> bool
"""
Return true iff self is a subset of other
"""
return self.lanes.issubset(other.lanes) and \
self.ints.issubset(other.ints) and \
self.floats.issubset(other.floats) and \
self.bools.issubset(other.bools) and \
self.bitvecs.issubset(other.bitvecs) and \
self.specials.issubset(other.specials)
def lane_of(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across lane_of
"""
new = self.copy()
new.lanes = set([1])
new.bitvecs = set()
return new
def as_bool(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across as_bool
"""
new = self.copy()
new.ints = set()
new.floats = set()
new.bitvecs = set()
if len(self.lanes.difference(set([1]))) > 0:
new.bools = self.ints.union(self.floats).union(self.bools)
if 1 in self.lanes:
new.bools.add(1)
return new
def half_width(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across halfwidth
"""
new = self.copy()
new.ints = set([x//2 for x in self.ints if x > 8])
new.floats = set([x//2 for x in self.floats if x > 32])
new.bools = set([x//2 for x in self.bools if x > 8])
new.bitvecs = set([x//2 for x in self.bitvecs if x > 1])
new.specials = set()
return new
def double_width(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across doublewidth
"""
new = self.copy()
new.ints = set([x*2 for x in self.ints if x < MAX_BITS])
new.floats = set([x*2 for x in self.floats if x < MAX_BITS])
new.bools = set(filter(legal_bool,
set([x*2 for x in self.bools if x < MAX_BITS])))
new.bitvecs = set([x*2 for x in self.bitvecs if x < MAX_BITVEC])
new.specials = set()
return new
def half_vector(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across halfvector
"""
new = self.copy()
new.bitvecs = set()
new.lanes = set([x//2 for x in self.lanes if x > 1])
new.specials = set()
return new
def double_vector(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across doublevector
"""
new = self.copy()
new.bitvecs = set()
new.lanes = set([x*2 for x in self.lanes if x < MAX_LANES])
new.specials = set()
return new
def to_bitvec(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across to_bitvec
"""
assert len(self.bitvecs) == 0
all_scalars = self.ints.union(self.floats.union(self.bools))
new = self.copy()
new.lanes = set([1])
new.ints = set()
new.bools = set()
new.floats = set()
new.bitvecs = set([lane_w * nlanes for lane_w in all_scalars
for nlanes in self.lanes])
new.specials = set()
return new
def image(self, func):
# type: (str) -> TypeSet
"""
Return the image of self across the derived function func
"""
if (func == TypeVar.LANEOF):
return self.lane_of()
elif (func == TypeVar.ASBOOL):
return self.as_bool()
elif (func == TypeVar.HALFWIDTH):
return self.half_width()
elif (func == TypeVar.DOUBLEWIDTH):
return self.double_width()
elif (func == TypeVar.HALFVECTOR):
return self.half_vector()
elif (func == TypeVar.DOUBLEVECTOR):
return self.double_vector()
elif (func == TypeVar.TOBITVEC):
return self.to_bitvec()
else:
assert False, "Unknown derived function: " + func
def preimage(self, func):
# type: (str) -> TypeSet
"""
Return the inverse image of self across the derived function func
"""
# The inverse of the empty set is always empty
if (self.size() == 0):
return self
if (func == TypeVar.LANEOF):
new = self.copy()
new.bitvecs = set()
new.lanes = set([2**i for i in range(0, int_log2(MAX_LANES)+1)])
return new
elif (func == TypeVar.ASBOOL):
new = self.copy()
new.bitvecs = set()
if 1 not in self.bools:
new.ints = self.bools.difference(set([1]))
new.floats = self.bools.intersection(set([32, 64]))
# If b1 is not in our typeset, than lanes=1 cannot be in the
# pre-image, as as_bool() of scalars is always b1.
new.lanes = self.lanes.difference(set([1]))
else:
new.ints = set([2**x for x in range(3, 7)])
new.floats = set([32, 64])
return new
elif (func == TypeVar.HALFWIDTH):
return self.double_width()
elif (func == TypeVar.DOUBLEWIDTH):
return self.half_width()
elif (func == TypeVar.HALFVECTOR):
return self.double_vector()
elif (func == TypeVar.DOUBLEVECTOR):
return self.half_vector()
elif (func == TypeVar.TOBITVEC):
new = TypeSet()
# Start with all possible lanes/ints/floats/bools
lanes = interval_to_set(decode_interval(True, (1, MAX_LANES), 1))
ints = interval_to_set(decode_interval(True, (8, MAX_BITS)))
floats = interval_to_set(decode_interval(True, (32, 64)))
bools = interval_to_set(decode_interval(True, (1, MAX_BITS)))
# See which combinations have a size that appears in self.bitvecs
has_t = set() # type: Set[Tuple[str, int, int]]
for l in lanes:
for i in ints:
if i * l in self.bitvecs:
has_t.add(('i', i, l))
for i in bools:
if i * l in self.bitvecs:
has_t.add(('b', i, l))
for i in floats:
if i * l in self.bitvecs:
has_t.add(('f', i, l))
for (t, width, lane) in has_t:
new.lanes.add(lane)
if (t == 'i'):
new.ints.add(width)
elif (t == 'b'):
new.bools.add(width)
else:
assert t == 'f'
new.floats.add(width)
return new
else:
assert False, "Unknown derived function: " + func
def size(self):
# type: () -> int
"""
Return the number of concrete types represented by this typeset
"""
return (len(self.lanes) * (len(self.ints) + len(self.floats) +
len(self.bools) + len(self.bitvecs)) +
len(self.specials))
def concrete_types(self):
# type: () -> Iterable[types.ValueType]
def by(scalar, lanes):
# type: (types.LaneType, int) -> types.ValueType
if (lanes == 1):
return scalar
else:
return scalar.by(lanes)
for nlanes in self.lanes:
for bits in self.ints:
yield by(types.IntType.with_bits(bits), nlanes)
for bits in self.floats:
yield by(types.FloatType.with_bits(bits), nlanes)
for bits in self.bools:
yield by(types.BoolType.with_bits(bits), nlanes)
for bits in self.bitvecs:
assert nlanes == 1
yield types.BVType.with_bits(bits)
for spec in self.specials:
yield spec
def get_singleton(self):
# type: () -> types.ValueType
"""
Return the singleton type represented by self. Can only call on
typesets containing 1 type.
"""
types = list(self.concrete_types())
assert len(types) == 1
return types[0]
def widths(self):
# type: () -> Set[int]
""" Return a set of the widths of all possible types in self"""
scalar_w = self.ints.union(self.floats.union(self.bools))
scalar_w = scalar_w.union(self.bitvecs)
return set(w * l for l in self.lanes for w in scalar_w)
class TypeVar(object):
"""
Type variables can be used in place of concrete types when defining
instructions. This makes the instructions *polymorphic*.
A type variable is restricted to vary over a subset of the value types.
This subset is specified by a set of flags that control the permitted base
types and whether the type variable can assume scalar or vector types, or
both.
:param name: Short name of type variable used in instruction descriptions.
:param doc: Documentation string.
:param ints: Allow all integer base types, or `(min, max)` bit-range.
:param floats: Allow all floating point base types, or `(min, max)`
bit-range.
:param bools: Allow all boolean base types, or `(min, max)` bit-range.
:param scalars: Allow type variable to assume scalar types.
:param simd: Allow type variable to assume vector types, or `(min, max)`
lane count range.
:param bitvecs: Allow all BitVec base types, or `(min, max)` bit-range.
"""
def __init__(
self,
name, # type: str
doc, # type: str
ints=False, # type: BoolInterval
floats=False, # type: BoolInterval
bools=False, # type: BoolInterval
scalars=True, # type: bool
simd=False, # type: BoolInterval
bitvecs=False, # type: BoolInterval
base=None, # type: TypeVar
derived_func=None, # type: str
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.name = name
self.__doc__ = doc
self.is_derived = isinstance(base, TypeVar)
if base:
assert self.is_derived
assert derived_func
self.base = base
self.derived_func = derived_func
self.name = '{}({})'.format(derived_func, base.name)
else:
min_lanes = 1 if scalars else 2
lanes = decode_interval(simd, (min_lanes, MAX_LANES), 1)
self.type_set = TypeSet(
lanes=lanes,
ints=ints,
floats=floats,
bools=bools,
bitvecs=bitvecs,
specials=specials)
@staticmethod
def singleton(typ):
# type: (types.ValueType) -> TypeVar
"""Create a type variable that can only assume a single type."""
scalar = None # type: types.ValueType
if isinstance(typ, types.VectorType):
scalar = typ.base
lanes = (typ.lanes, typ.lanes)
elif isinstance(typ, types.LaneType):
scalar = typ
lanes = (1, 1)
elif isinstance(typ, types.SpecialType):
return TypeVar(typ.name, typ.__doc__, specials=[typ])
else:
assert isinstance(typ, types.BVType)
scalar = typ
lanes = (1, 1)
ints = None
floats = None
bools = None
bitvecs = None
if isinstance(scalar, types.IntType):
ints = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.FloatType):
floats = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BoolType):
bools = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BVType):
bitvecs = (scalar.bits, scalar.bits)
tv = TypeVar(
typ.name, typ.__doc__,
ints=ints, floats=floats, bools=bools,
bitvecs=bitvecs, simd=lanes)
return tv
def __str__(self):
# type: () -> str
return "`{}`".format(self.name)
def __repr__(self):
# type: () -> str
if self.is_derived:
return (
'TypeVar({}, base={}, derived_func={})'
.format(self.name, self.base, self.derived_func))
else:
return (
'TypeVar({}, {})'
.format(self.name, self.type_set))
def __hash__(self):
# type: () -> int
if (not self.is_derived):
return object.__hash__(self)
return hash((self.derived_func, self.base))
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, TypeVar):
return False
if self.is_derived and other.is_derived:
return (
self.derived_func == other.derived_func and
self.base == other.base)
else:
return self is other
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
# Supported functions for derived type variables.
# The names here must match the method names on `ir::types::Type`.
# The camel_case of the names must match `enum OperandConstraint` in
# `instructions.rs`.
LANEOF = 'lane_of'
ASBOOL = 'as_bool'
HALFWIDTH = 'half_width'
DOUBLEWIDTH = 'double_width'
HALFVECTOR = 'half_vector'
DOUBLEVECTOR = 'double_vector'
TOBITVEC = 'to_bitvec'
@staticmethod
def is_bijection(func):
# type: (str) -> bool
return func in [
TypeVar.HALFWIDTH,
TypeVar.DOUBLEWIDTH,
TypeVar.HALFVECTOR,
TypeVar.DOUBLEVECTOR]
@staticmethod
def inverse_func(func):
# type: (str) -> str
return {
TypeVar.HALFWIDTH: TypeVar.DOUBLEWIDTH,
TypeVar.DOUBLEWIDTH: TypeVar.HALFWIDTH,
TypeVar.HALFVECTOR: TypeVar.DOUBLEVECTOR,
TypeVar.DOUBLEVECTOR: TypeVar.HALFVECTOR
}[func]
@staticmethod
def derived(base, derived_func):
# type: (TypeVar, str) -> TypeVar
"""Create a type variable that is a function of another."""
# Safety checks to avoid over/underflows.
ts = base.get_typeset()
assert len(ts.specials) == 0, "Can't derive from special types"
if derived_func == TypeVar.HALFWIDTH:
if len(ts.ints) > 0:
assert min(ts.ints) > 8, "Can't halve all integer types"
if len(ts.floats) > 0:
assert min(ts.floats) > 32, "Can't halve all float types"
if len(ts.bools) > 0:
assert min(ts.bools) > 8, "Can't halve all boolean types"
elif derived_func == TypeVar.DOUBLEWIDTH:
if len(ts.ints) > 0:
assert max(ts.ints) < MAX_BITS,\
"Can't double all integer types."
if len(ts.floats) > 0:
assert max(ts.floats) < MAX_BITS,\
"Can't double all float types."
if len(ts.bools) > 0:
assert max(ts.bools) < MAX_BITS, "Can't double all bool types."
elif derived_func == TypeVar.HALFVECTOR:
assert min(ts.lanes) > 1, "Can't halve a scalar type"
elif derived_func == TypeVar.DOUBLEVECTOR:
assert max(ts.lanes) < MAX_LANES, "Can't double 256 lanes."
return TypeVar(None, None, base=base, derived_func=derived_func)
@staticmethod
def from_typeset(ts):
# type: (TypeSet) -> TypeVar
""" Create a type variable from a type set."""
tv = TypeVar(None, None)
tv.type_set = ts
return tv
def lane_of(self):
# type: () -> TypeVar
"""
Return a derived type variable that is the scalar lane type of this
type variable.
When this type variable assumes a scalar type, the derived type will be
the same scalar type.
"""
return TypeVar.derived(self, self.LANEOF)
def as_bool(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same vector geometry as
this type variable, but with boolean lanes. Scalar types map to `b1`.
"""
return TypeVar.derived(self, self.ASBOOL)
def half_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same number of vector lanes
as this one, but the lanes are half the width.
"""
return TypeVar.derived(self, self.HALFWIDTH)
def double_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same number of vector lanes
as this one, but the lanes are double the width.
"""
return TypeVar.derived(self, self.DOUBLEWIDTH)
def half_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has half the number of vector lanes
as this one, with the same lane type.
"""
return TypeVar.derived(self, self.HALFVECTOR)
def double_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has twice the number of vector
lanes as this one, with the same lane type.
"""
return TypeVar.derived(self, self.DOUBLEVECTOR)
def to_bitvec(self):
# type: () -> TypeVar
"""
Return a derived type variable that represent a flat bitvector with
the same size as self
"""
return TypeVar.derived(self, self.TOBITVEC)
def singleton_type(self):
# type: () -> types.ValueType
"""
If the associated typeset has a single type return it. Otherwise return
None
"""
ts = self.get_typeset()
if ts.size() != 1:
return None
return ts.get_singleton()
def free_typevar(self):
# type: () -> TypeVar
"""
Get the free type variable controlling this one.
"""
if self.is_derived:
return self.base.free_typevar()
elif self.singleton_type() is not None:
# A singleton type variable is not a proper free variable.
return None
else:
return self
def rust_expr(self):
# type: () -> str
"""
Get a Rust expression that computes the type of this type variable.
"""
if self.is_derived:
return '{}.{}()'.format(
self.base.rust_expr(), self.derived_func)
elif self.singleton_type():
return self.singleton_type().rust_name()
else:
return self.name
def constrain_types_by_ts(self, ts):
# type: (TypeSet) -> None
"""
Constrain the range of types this variable can assume to a subset of
those in the typeset ts.
"""
if not self.is_derived:
self.type_set &= ts
else:
self.base.constrain_types_by_ts(ts.preimage(self.derived_func))
def constrain_types(self, other):
# type: (TypeVar) -> None
"""
Constrain the range of types this variable can assume to a subset of
those `other` can assume.
"""
if self is other:
return
self.constrain_types_by_ts(other.get_typeset())
def get_typeset(self):
# type: () -> TypeSet
"""
Returns the typeset for this TV. If the TV is derived, computes it
recursively from the derived function and the base's typeset.
"""
if not self.is_derived:
return self.type_set
else:
return self.base.get_typeset().image(self.derived_func)
def get_fresh_copy(self, name):
# type: (str) -> TypeVar
"""
Get a fresh copy of self. Can only be called on free typevars.
"""
assert not self.is_derived
tv = TypeVar.from_typeset(self.type_set.copy())
tv.name = name
return tv
|
|
"""User API to specify equations."""
import sympy
from cached_property import cached_property
from devito.finite_differences import default_rules
from devito.tools import as_tuple
from devito.types.lazy import Evaluable
__all__ = ['Eq', 'Inc']
class Eq(sympy.Eq, Evaluable):
"""
An equal relation between two objects, the left-hand side and the
right-hand side.
The left-hand side may be a Function or a SparseFunction. The right-hand
side may be any arbitrary expressions with numbers, Dimensions, Constants,
Functions and SparseFunctions as operands.
Parameters
----------
lhs : Function or SparseFunction
The left-hand side.
rhs : expr-like, optional
The right-hand side. Defaults to 0.
subdomain : SubDomain, optional
To restrict the computation of the Eq to a particular sub-region in the
computational domain.
coefficients : Substitutions, optional
Can be used to replace symbolic finite difference weights with user
defined weights.
implicit_dims : Dimension or list of Dimension, optional
An ordered list of Dimensions that do not explicitly appear in either the
left-hand side or in the right-hand side, but that should be honored when
constructing an Operator.
Examples
--------
>>> from devito import Grid, Function, Eq
>>> grid = Grid(shape=(4, 4))
>>> f = Function(name='f', grid=grid)
>>> Eq(f, f + 1)
Eq(f(x, y), f(x, y) + 1)
Any SymPy expressions may be used in the right-hand side.
>>> from devito import sin
>>> Eq(f, sin(f.dx)**2)
Eq(f(x, y), sin(Derivative(f(x, y), x))**2)
Notes
-----
An Eq can be thought of as an assignment in an imperative programming language
(e.g., ``a[i] = b[i]*c``).
"""
is_Increment = False
def __new__(cls, lhs, rhs=0, subdomain=None, coefficients=None, implicit_dims=None,
**kwargs):
kwargs['evaluate'] = False
obj = sympy.Eq.__new__(cls, lhs, rhs, **kwargs)
obj._subdomain = subdomain
obj._substitutions = coefficients
obj._implicit_dims = as_tuple(implicit_dims)
return obj
@property
def subdomain(self):
"""The SubDomain in which the Eq is defined."""
return self._subdomain
@cached_property
def evaluate(self):
"""
Evaluate the Equation or system of Equations.
The RHS of the Equation is evaluated at the indices of the LHS if required.
"""
try:
lhs, rhs = self.lhs.evaluate, self.rhs._eval_at(self.lhs).evaluate
except AttributeError:
lhs, rhs = self._evaluate_args()
eq = self.func(lhs, rhs, subdomain=self.subdomain,
coefficients=self.substitutions,
implicit_dims=self._implicit_dims)
if eq._uses_symbolic_coefficients:
# NOTE: As Coefficients.py is expanded we will not want
# all rules to be expunged during this procress.
rules = default_rules(eq, eq._symbolic_functions)
try:
eq = eq.xreplace({**eq.substitutions.rules, **rules})
except AttributeError:
if bool(rules):
eq = eq.xreplace(rules)
return eq
@property
def _flatten(self):
"""
Flatten vectorial/tensorial Equation into list of scalar Equations.
"""
if self.lhs.is_Matrix:
# Maps the Equations to retrieve the rhs from relevant lhs
eqs = dict(zip(as_tuple(self.lhs), as_tuple(self.rhs)))
# Get the relevant equations from the lhs structure. .values removes
# the symmetric duplicates and off-diagonal zeros.
lhss = self.lhs.values()
return [self.func(l, eqs[l], subdomain=self.subdomain,
coefficients=self.substitutions,
implicit_dims=self._implicit_dims)
for l in lhss]
else:
return [self]
@property
def substitutions(self):
return self._substitutions
@property
def implicit_dims(self):
return self._implicit_dims
@cached_property
def _uses_symbolic_coefficients(self):
return bool(self._symbolic_functions)
@cached_property
def _symbolic_functions(self):
try:
return self.lhs._symbolic_functions.union(self.rhs._symbolic_functions)
except AttributeError:
pass
try:
return self.lhs._symbolic_functions
except AttributeError:
pass
try:
return self.rhs._symbolic_functions
except AttributeError:
return frozenset()
else:
TypeError('Failed to retrieve symbolic functions')
@property
def func(self):
return lambda *args, **kwargs:\
self.__class__(*args,
subdomain=kwargs.pop('subdomain', self._subdomain),
coefficients=kwargs.pop('coefficients', self._substitutions),
implicit_dims=kwargs.pop('implicit_dims', self._implicit_dims),
**kwargs)
def xreplace(self, rules):
return self.func(self.lhs.xreplace(rules), self.rhs.xreplace(rules))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.lhs, self.rhs)
__repr__ = __str__
class Inc(Eq):
"""
An increment relation between two objects, the left-hand side and the
right-hand side.
Parameters
----------
lhs : Function or SparseFunction
The left-hand side.
rhs : expr-like
The right-hand side.
subdomain : SubDomain, optional
To restrict the computation of the Eq to a particular sub-region in the
computational domain.
coefficients : Substitutions, optional
Can be used to replace symbolic finite difference weights with user
defined weights.
implicit_dims : Dimension or list of Dimension, optional
An ordered list of Dimensions that do not explicitly appear in either the
left-hand side or in the right-hand side, but that should be honored when
constructing an Operator.
Examples
--------
Inc may be used to express tensor contractions. Below, a summation along
the user-defined Dimension ``i``.
>>> from devito import Grid, Dimension, Function, Inc
>>> grid = Grid(shape=(4, 4))
>>> x, y = grid.dimensions
>>> i = Dimension(name='i')
>>> f = Function(name='f', grid=grid)
>>> g = Function(name='g', shape=(10, 4, 4), dimensions=(i, x, y))
>>> Inc(f, g)
Inc(f(x, y), g(i, x, y))
Notes
-----
An Inc can be thought of as the augmented assignment '+=' in an imperative
programming language (e.g., ``a[i] += c``).
"""
is_Increment = True
def __str__(self):
return "Inc(%s, %s)" % (self.lhs, self.rhs)
__repr__ = __str__
|
|
'''Trains DCGAN on MNIST using Keras
DCGAN is a Generative Adversarial Network (GAN) using CNN.
The generator tries to fool the discriminator by generating fake images.
The discriminator learns to discriminate real from fake images.
The generator + discriminator form an adversarial network.
DCGAN trains the discriminator and adversarial networks alternately.
During training, not only the discriminator learns to distinguish real from
fake images, it also coaches the generator part of the adversarial on how
to improve its ability to generate fake images.
[1] Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional
generative adversarial networks." arXiv preprint arXiv:1511.06434 (2015).
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import Activation, Dense, Input
from keras.layers import Conv2D, Flatten
from keras.layers import Reshape, Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import BatchNormalization
from keras.optimizers import RMSprop
from keras.models import Model
from keras.datasets import mnist
import numpy as np
import math
import matplotlib.pyplot as plt
def generator(inputs, image_size):
"""Build a Generator Model
Stacks of BN-ReLU-Conv2DTranpose to generate fake images
Output activation is sigmoid instead of tanh in [1].
Sigmoid converges easily.
# Arguments
inputs (Layer): Input layer of the generator (the z-vector)
image_size: Target size of one side (assuming square image)
# Returns
Model: Generator Model
"""
image_resize = image_size // 4
kernel_size = 5
layer_filters = [128, 64, 32, 1]
x = Dense(image_resize * image_resize * layer_filters[0])(inputs)
x = Reshape((image_resize, image_resize, layer_filters[0]))(x)
for filters in layer_filters:
if filters > layer_filters[-2]:
strides = 2
else:
strides = 1
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Activation('sigmoid')(x)
generator = Model(inputs, x, name='generator')
return generator
def discriminator(inputs):
"""Build a Discriminator Model
Stacks of LeakyReLU-Conv2D to discriminate real from fake
The network does not converge with BN so it is not used here
unlike in [1]
# Arguments
inputs (Layer): Input layer of the discriminator (the image)
# Returns
Model: Discriminator Model
"""
kernel_size = 5
layer_filters = [32, 64, 128, 256]
x = inputs
for filters in layer_filters:
if filters == layer_filters[-1]:
strides = 1
else:
strides = 2
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Flatten()(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
discriminator = Model(inputs, x, name='discriminator')
return discriminator
def train(models, x_train, params):
"""Train the Discriminator and Adversarial Networks
Alternately train Discriminator and Adversarial networks by batch
Discriminator is trained first with properly real and fake images
Adversarial is trained next with fake images pretending to be real
Generate sample images per save_interval
# Arguments
models (list): Generator, Discriminator, Adversarial models
x_train (tensor): Train images
params (list) : Networks parameters
"""
generator, discriminator, adversarial = models
batch_size, latent_size, train_steps = params
save_interval = 500
noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
for i in range(train_steps):
# Random real images
rand_indexes = np.random.randint(0, x_train.shape[0], size=batch_size)
train_images = x_train[rand_indexes]
# Generate fake images
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
fake_images = generator.predict(noise)
x = np.concatenate((train_images, fake_images))
# Label real and fake images
y = np.ones([2 * batch_size, 1])
y[batch_size:, :] = 0
# Train the Discriminator network
metrics = discriminator.train_on_batch(x, y)
loss = metrics[0]
acc = metrics[1]
log = "%d: [discriminator loss: %f, acc: %f]" % (i, loss, acc)
# Generate random noise
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
# Label fake images as real
y = np.ones([batch_size, 1])
# Train the Adversarial network
metrics = adversarial.train_on_batch(noise, y)
loss = metrics[0]
acc = metrics[1]
log = "%s [adversarial loss: %f, acc: %f]" % (log, loss, acc)
print(log)
if (i + 1) % save_interval == 0:
if (i + 1) == train_steps:
show = True
else:
show = False
plot_images(generator,
noise_input=noise_input,
show=show,
step=(i + 1))
def plot_images(generator,
noise_input,
show=False,
step=0):
"""Generate fake images and plot them
For visualization purposes, generate fake images
then plot them in a square grid
# Arguments
generator (Model): The Generator Model for fake images generation
noise_input (ndarray): Array of z-vectors
show (bool): Whether to show plot or not
step (int): Appended to filename of the save images
"""
filename = "mnist_dcgan_%d.png" % step
images = generator.predict(noise_input)
plt.figure(figsize=(2.4, 2.4))
num_images = images.shape[0]
image_size = images.shape[1]
rows = int(math.sqrt(noise_input.shape[0]))
for i in range(num_images):
plt.subplot(rows, rows, i + 1)
image = images[i, :, :, :]
image = np.reshape(image, [image_size, image_size])
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.savefig(filename)
if show:
plt.show()
else:
plt.close('all')
# MNIST dataset
(x_train, _), (_, _) = mnist.load_data()
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
# The latent or z vector is 100-dim
latent_size = 100
batch_size = 64
train_steps = 40000
lr = 0.0002
decay = 6e-8
input_shape = (image_size, image_size, 1)
# Build Discriminator Model
inputs = Input(shape=input_shape, name='discriminator_input')
discriminator = discriminator(inputs)
# [1] uses Adam, but discriminator converges easily with RMSprop
optimizer = RMSprop(lr=lr, decay=decay)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
discriminator.summary()
# Build Generator Model
input_shape = (latent_size, )
inputs = Input(shape=input_shape, name='z_input')
generator = generator(inputs, image_size)
generator.summary()
# Build Adversarial Model = Generator + Discriminator
optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
discriminator.trainable = False
adversarial = Model(inputs, discriminator(generator(inputs)), name='dcgan')
adversarial.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
adversarial.summary()
# Train Discriminator and Adversarial Networks
models = (generator, discriminator, adversarial)
params = (batch_size, latent_size, train_steps)
train(models, x_train, params)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import DistanceMatrix
from skbio.stats.distance import (DissimilarityMatrixError,
DistanceMatrixError, mantel, pwmantel)
from skbio.stats.distance._mantel import _order_dms
from skbio.util import get_data_path, assert_data_frame_almost_equal
class MantelTestData(TestCase):
def setUp(self):
# Small dataset of minimal size (3x3). Mix of floats and ints in a
# native Python nested list structure.
self.minx = [[0, 1, 2], [1, 0, 3], [2, 3, 0]]
self.miny = [[0, 2, 7], [2, 0, 6], [7, 6, 0]]
self.minz = [[0, 0.5, 0.25], [0.5, 0, 0.1], [0.25, 0.1, 0]]
# Version of the above dataset stored as DistanceMatrix instances.
self.minx_dm = DistanceMatrix(self.minx)
self.miny_dm = DistanceMatrix(self.miny)
self.minz_dm = DistanceMatrix(self.minz)
# Versions of self.minx_dm and self.minz_dm that each have an extra ID
# on the end.
self.minx_dm_extra = DistanceMatrix([[0, 1, 2, 7],
[1, 0, 3, 2],
[2, 3, 0, 4],
[7, 2, 4, 0]],
['0', '1', '2', 'foo'])
self.minz_dm_extra = DistanceMatrix([[0, 0.5, 0.25, 3],
[0.5, 0, 0.1, 24],
[0.25, 0.1, 0, 5],
[3, 24, 5, 0]],
['0', '1', '2', 'bar'])
class MantelTests(MantelTestData):
"""Results were verified with R 3.1.0 and vegan 2.0-10 (vegan::mantel).
vegan::mantel performs a one-sided (greater) test and does not have the
option to specify different alternative hypotheses. In order to test the
other alternative hypotheses, I modified vegan::mantel to perform the
appropriate test, source()'d the file and verified the output.
"""
def setUp(self):
super(MantelTests, self).setUp()
self.methods = ('pearson', 'spearman')
self.alternatives = ('two-sided', 'greater', 'less')
# No variation in distances. Taken from Figure 10.20(b), pg. 603 in L&L
# 3rd edition. Their example is 4x4 but using 3x3 here for easy
# comparison to the minimal dataset above.
self.no_variation = [[0, 0.667, 0.667],
[0.667, 0, 0.667],
[0.667, 0.667, 0]]
# This second dataset is derived from vegan::mantel's example dataset.
# The "veg" distance matrix contains Bray-Curtis distances derived from
# the varespec data (named "veg.dist" in the example). The "env"
# distance matrix contains Euclidean distances derived from scaled
# varechem data (named "env.dist" in the example).
self.veg_dm_vegan = np.loadtxt(
get_data_path('mantel_veg_dm_vegan.txt'))
self.env_dm_vegan = np.loadtxt(
get_data_path('mantel_env_dm_vegan.txt'))
# Expected test statistic when comparing x and y with method='pearson'.
self.exp_x_vs_y = 0.7559289
# Expected test statistic when comparing x and z with method='pearson'.
self.exp_x_vs_z = -0.9897433
def test_statistic_same_across_alternatives_and_permutations(self):
# Varying permutations and alternative hypotheses shouldn't affect the
# computed test statistics.
for n in (0, 99, 999):
for alt in self.alternatives:
for method, exp in (('pearson', self.exp_x_vs_y),
('spearman', 0.5)):
obs = mantel(self.minx, self.miny, method=method,
permutations=n, alternative=alt)[0]
self.assertAlmostEqual(obs, exp)
def test_comparing_same_matrices(self):
for method in self.methods:
obs = mantel(self.minx, self.minx, method=method)[0]
self.assertAlmostEqual(obs, 1)
obs = mantel(self.miny, self.miny, method=method)[0]
self.assertAlmostEqual(obs, 1)
def test_negative_correlation(self):
for method, exp in (('pearson', self.exp_x_vs_z), ('spearman', -1)):
obs = mantel(self.minx, self.minz, method=method)[0]
self.assertAlmostEqual(obs, exp)
def test_zero_permutations(self):
for alt in self.alternatives:
for method, exp in (('pearson', self.exp_x_vs_y),
('spearman', 0.5)):
obs = mantel(self.minx, self.miny, permutations=0,
method=method, alternative=alt)
self.assertAlmostEqual(obs[0], exp)
npt.assert_equal(obs[1], np.nan)
self.assertEqual(obs[2], 3)
# swapping order of matrices should give same result
obs = mantel(self.miny, self.minx, permutations=0,
method=method, alternative=alt)
self.assertAlmostEqual(obs[0], exp)
npt.assert_equal(obs[1], np.nan)
self.assertEqual(obs[2], 3)
def test_distance_matrix_instances_as_input(self):
# Matrices with all matching IDs in the same order.
np.random.seed(0)
obs = mantel(self.minx_dm, self.miny_dm, alternative='less')
self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
self.assertAlmostEqual(obs[1], 0.843)
self.assertEqual(obs[2], 3)
def test_distance_matrix_instances_with_reordering_and_nonmatching(self):
x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
y = self.miny_dm.filter(['0', '2', '1'])
# strict=True should disallow IDs that aren't found in both matrices
with self.assertRaises(ValueError):
mantel(x, y, alternative='less', strict=True)
np.random.seed(0)
# strict=False should ignore IDs that aren't found in both matrices
obs = mantel(x, y, alternative='less', strict=False)
self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
self.assertAlmostEqual(obs[1], 0.843)
self.assertEqual(obs[2], 3)
def test_distance_matrix_instances_with_lookup(self):
self.minx_dm.ids = ('a', 'b', 'c')
self.miny_dm.ids = ('d', 'e', 'f')
lookup = {'a': 'A', 'b': 'B', 'c': 'C',
'd': 'A', 'e': 'B', 'f': 'C'}
np.random.seed(0)
obs = mantel(self.minx_dm, self.miny_dm, alternative='less',
lookup=lookup)
self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
self.assertAlmostEqual(obs[1], 0.843)
self.assertEqual(obs[2], 3)
def test_one_sided_greater(self):
np.random.seed(0)
obs = mantel(self.minx, self.miny, alternative='greater')
self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
self.assertAlmostEqual(obs[1], 0.324)
self.assertEqual(obs[2], 3)
obs = mantel(self.minx, self.minx, alternative='greater')
self.assertAlmostEqual(obs[0], 1)
self.assertAlmostEqual(obs[1], 0.172)
self.assertEqual(obs[2], 3)
def test_one_sided_less(self):
# no need to seed here as permuted test statistics will all be less
# than or equal to the observed test statistic (1.0)
for method in self.methods:
obs = mantel(self.minx, self.minx, method=method,
alternative='less')
self.assertEqual(obs, (1, 1, 3))
np.random.seed(0)
obs = mantel(self.minx, self.miny, alternative='less')
self.assertAlmostEqual(obs[0], self.exp_x_vs_y)
self.assertAlmostEqual(obs[1], 0.843)
self.assertEqual(obs[2], 3)
obs = mantel(self.minx, self.minz, alternative='less')
self.assertAlmostEqual(obs[0], self.exp_x_vs_z)
self.assertAlmostEqual(obs[1], 0.172)
self.assertEqual(obs[2], 3)
def test_two_sided(self):
np.random.seed(0)
obs = mantel(self.minx, self.minx, method='spearman',
alternative='two-sided')
self.assertEqual(obs[0], 1)
self.assertAlmostEqual(obs[1], 0.328)
self.assertEqual(obs[2], 3)
obs = mantel(self.minx, self.miny, method='spearman',
alternative='two-sided')
self.assertAlmostEqual(obs[0], 0.5)
self.assertAlmostEqual(obs[1], 1.0)
self.assertEqual(obs[2], 3)
obs = mantel(self.minx, self.minz, method='spearman',
alternative='two-sided')
self.assertAlmostEqual(obs[0], -1)
self.assertAlmostEqual(obs[1], 0.322)
self.assertEqual(obs[2], 3)
def test_vegan_example(self):
np.random.seed(0)
# pearson
obs = mantel(self.veg_dm_vegan, self.env_dm_vegan,
alternative='greater')
self.assertAlmostEqual(obs[0], 0.3047454)
self.assertAlmostEqual(obs[1], 0.002)
self.assertEqual(obs[2], 24)
# spearman
obs = mantel(self.veg_dm_vegan, self.env_dm_vegan,
alternative='greater', method='spearman')
self.assertAlmostEqual(obs[0], 0.283791)
self.assertAlmostEqual(obs[1], 0.003)
self.assertEqual(obs[2], 24)
def test_no_variation_pearson(self):
# Output doesn't match vegan::mantel with method='pearson'. Consider
# revising output and this test depending on outcome of
# https://github.com/scipy/scipy/issues/3728
for alt in self.alternatives:
# test one or both inputs having no variation in their
# distances
obs = mantel(self.miny, self.no_variation, method='pearson',
alternative=alt)
npt.assert_equal(obs, (0.0, 1.0, 3))
obs = mantel(self.no_variation, self.miny, method='pearson',
alternative=alt)
npt.assert_equal(obs, (0.0, 1.0, 3))
obs = mantel(self.no_variation, self.no_variation,
method='pearson', alternative=alt)
npt.assert_equal(obs, (1.0, 1.0, 3))
def test_no_variation_spearman(self):
exp = (np.nan, np.nan, 3)
for alt in self.alternatives:
obs = mantel(self.miny, self.no_variation, method='spearman',
alternative=alt)
npt.assert_equal(obs, exp)
obs = mantel(self.no_variation, self.miny, method='spearman',
alternative=alt)
npt.assert_equal(obs, exp)
obs = mantel(self.no_variation, self.no_variation,
method='spearman', alternative=alt)
npt.assert_equal(obs, exp)
def test_no_side_effects(self):
minx = np.asarray(self.minx, dtype='float')
miny = np.asarray(self.miny, dtype='float')
minx_copy = np.copy(minx)
miny_copy = np.copy(miny)
mantel(minx, miny)
# Make sure we haven't modified the input.
npt.assert_equal(minx, minx_copy)
npt.assert_equal(miny, miny_copy)
def test_invalid_distance_matrix(self):
# Single asymmetric, non-hollow distance matrix.
with self.assertRaises(DissimilarityMatrixError):
mantel([[1, 2], [3, 4]], [[0, 0], [0, 0]])
# Two asymmetric distance matrices.
with self.assertRaises(DistanceMatrixError):
mantel([[0, 2], [3, 0]], [[0, 1], [0, 0]])
def test_invalid_input(self):
# invalid correlation method
with self.assertRaises(ValueError):
mantel([[1]], [[1]], method='brofist')
# invalid permutations
with self.assertRaises(ValueError):
mantel([[1]], [[1]], permutations=-1)
# invalid alternative
with self.assertRaises(ValueError):
mantel([[1]], [[1]], alternative='no cog yay')
# too small dms
with self.assertRaises(ValueError):
mantel([[0, 3], [3, 0]], [[0, 2], [2, 0]])
class PairwiseMantelTests(MantelTestData):
def setUp(self):
super(PairwiseMantelTests, self).setUp()
self.min_dms = (self.minx_dm, self.miny_dm, self.minz_dm)
self.exp_results_minimal = pd.read_csv(
get_data_path('pwmantel_exp_results_minimal.txt'), sep='\t',
index_col=(0, 1))
self.exp_results_minimal_with_labels = pd.read_csv(
get_data_path('pwmantel_exp_results_minimal_with_labels.txt'),
sep='\t', index_col=(0, 1))
self.exp_results_duplicate_dms = pd.read_csv(
get_data_path('pwmantel_exp_results_duplicate_dms.txt'),
sep='\t', index_col=(0, 1))
self.exp_results_na_p_value = pd.read_csv(
get_data_path('pwmantel_exp_results_na_p_value.txt'),
sep='\t', index_col=(0, 1))
self.exp_results_reordered_distance_matrices = pd.read_csv(
get_data_path('pwmantel_exp_results_reordered_distance_matrices'
'.txt'),
sep='\t', index_col=(0, 1))
self.exp_results_dm_dm2 = pd.read_csv(
get_data_path('pwmantel_exp_results_dm_dm2.txt'),
sep='\t', index_col=(0, 1))
self.exp_results_all_dms = pd.read_csv(
get_data_path('pwmantel_exp_results_all_dms.txt'),
sep='\t', index_col=(0, 1))
def test_minimal_compatible_input(self):
# Matrices are already in the correct order and have matching IDs.
np.random.seed(0)
# input as DistanceMatrix instances
obs = pwmantel(self.min_dms, alternative='greater')
assert_data_frame_almost_equal(obs, self.exp_results_minimal)
np.random.seed(0)
# input as array_like
obs = pwmantel((self.minx, self.miny, self.minz),
alternative='greater')
assert_data_frame_almost_equal(obs, self.exp_results_minimal)
def test_minimal_compatible_input_with_labels(self):
np.random.seed(0)
obs = pwmantel(self.min_dms, alternative='greater',
labels=('minx', 'miny', 'minz'))
assert_data_frame_almost_equal(
obs,
self.exp_results_minimal_with_labels)
def test_duplicate_dms(self):
obs = pwmantel((self.minx_dm, self.minx_dm, self.minx_dm),
alternative='less')
assert_data_frame_almost_equal(obs, self.exp_results_duplicate_dms)
def test_na_p_value(self):
obs = pwmantel((self.miny_dm, self.minx_dm), method='spearman',
permutations=0)
assert_data_frame_almost_equal(obs, self.exp_results_na_p_value)
def test_reordered_distance_matrices(self):
# Matrices have matching IDs but they all have different ordering.
x = self.minx_dm.filter(['1', '0', '2'])
y = self.miny_dm.filter(['0', '2', '1'])
z = self.minz_dm.filter(['1', '2', '0'])
np.random.seed(0)
obs = pwmantel((x, y, z), alternative='greater')
assert_data_frame_almost_equal(
obs,
self.exp_results_reordered_distance_matrices)
def test_strict(self):
# Matrices have some matching and nonmatching IDs, with different
# ordering.
x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
y = self.miny_dm.filter(['0', '2', '1'])
z = self.minz_dm_extra.filter(['bar', '1', '2', '0'])
np.random.seed(0)
# strict=False should discard IDs that aren't found in both matrices
obs = pwmantel((x, y, z), alternative='greater', strict=False)
assert_data_frame_almost_equal(
obs,
self.exp_results_reordered_distance_matrices)
def test_id_lookup(self):
# Matrices have mismatched IDs but a lookup is provided.
self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo']
self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar']
lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo',
'd': '0', 'e': '1', 'f': '2', 'bar': 'bar',
'0': '0', '1': '1', '2': '2'}
x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c'])
y = self.miny_dm.filter(['0', '2', '1'])
z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd'])
x_copy = x.copy()
y_copy = y.copy()
z_copy = z.copy()
np.random.seed(0)
obs = pwmantel((x, y, z), alternative='greater', strict=False,
lookup=lookup)
assert_data_frame_almost_equal(
obs,
self.exp_results_reordered_distance_matrices)
# Make sure the inputs aren't modified.
self.assertEqual(x, x_copy)
self.assertEqual(y, y_copy)
self.assertEqual(z, z_copy)
def test_too_few_dms(self):
with self.assertRaises(ValueError):
pwmantel([self.miny_dm])
def test_wrong_number_of_labels(self):
with self.assertRaises(ValueError):
pwmantel(self.min_dms, labels=['foo', 'bar'])
def test_duplicate_labels(self):
with self.assertRaises(ValueError):
pwmantel(self.min_dms, labels=['foo', 'bar', 'foo'])
def test_mixed_input_types(self):
# DistanceMatrix, DistanceMatrix, array_like
with self.assertRaises(TypeError):
pwmantel((self.miny_dm, self.minx_dm, self.minz))
def test_filepaths_as_input(self):
dms = [
get_data_path('dm.txt'),
get_data_path('dm2.txt'),
]
np.random.seed(0)
obs = pwmantel(dms)
assert_data_frame_almost_equal(obs, self.exp_results_dm_dm2)
def test_many_filepaths_as_input(self):
dms = [
get_data_path('dm2.txt'),
get_data_path('dm.txt'),
get_data_path('dm4.txt'),
get_data_path('dm3.txt')
]
np.random.seed(0)
obs = pwmantel(dms)
assert_data_frame_almost_equal(obs, self.exp_results_all_dms)
class OrderDistanceMatricesTests(MantelTestData):
def setUp(self):
super(OrderDistanceMatricesTests, self).setUp()
def test_array_like_input(self):
obs = _order_dms(self.minx, self.miny)
self.assertEqual(obs, (self.minx_dm, self.miny_dm))
def test_reordered_distance_matrices(self):
# All matching IDs but with different orderings.
x = self.minx_dm.filter(['1', '0', '2'])
y = self.miny_dm.filter(['0', '2', '1'])
exp = (x, y.filter(['1', '0', '2']))
obs = _order_dms(x, y)
self.assertEqual(obs, exp)
def test_reordered_and_nonmatching_distance_matrices(self):
# Some matching and nonmatching IDs, with different ordering.
x = self.minx_dm_extra.filter(['1', '0', 'foo', '2'])
z = self.minz_dm_extra.filter(['bar', '0', '2', '1'])
exp = (x.filter(['1', '0', '2']), z.filter(['1', '0', '2']))
obs = _order_dms(x, z, strict=False)
self.assertEqual(obs, exp)
def test_id_lookup(self):
# Matrices have mismatched IDs but a lookup is provided.
self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo']
self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar']
lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo',
'd': '0', 'e': '1', 'f': '2', 'bar': 'bar'}
x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c'])
z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd'])
x_copy = x.copy()
z_copy = z.copy()
exp = (self.minx_dm.filter(['1', '0', '2']),
self.minz_dm.filter(['1', '0', '2']))
obs = _order_dms(x, z, strict=False, lookup=lookup)
self.assertEqual(obs, exp)
# Make sure the inputs aren't modified.
self.assertEqual(x, x_copy)
self.assertEqual(z, z_copy)
def test_lookup_with_array_like(self):
lookup = {'0': 'a', '1': 'b', '2': 'c'}
with self.assertRaises(ValueError):
_order_dms(self.minx, self.miny, lookup=lookup)
def test_shape_mismatch(self):
with self.assertRaises(ValueError):
_order_dms(self.minx, [[0, 2], [2, 0]])
def test_missing_ids_in_lookup(self):
# Mapping for '1' is missing. Should get an error while remapping IDs
# for the first distance matrix.
lookup = {'0': 'a', '2': 'c'}
with self.assertRaisesRegexp(KeyError, "first.*(x).*'1'\"$"):
_order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
# Mapping for 'bar' is missing. Should get an error while remapping IDs
# for the second distance matrix.
lookup = {'0': 'a', '1': 'b', '2': 'c',
'foo': 'a', 'baz': 'c'}
self.miny_dm.ids = ('foo', 'bar', 'baz')
with self.assertRaisesRegexp(KeyError, "second.*(y).*'bar'\"$"):
_order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
def test_nonmatching_ids_strict_true(self):
with self.assertRaises(ValueError):
_order_dms(self.minx_dm, self.minz_dm_extra, strict=True)
def test_no_matching_ids(self):
self.minx_dm.ids = ['foo', 'bar', 'baz']
self.miny_dm.ids = ['a', 'b', 'c']
with self.assertRaises(ValueError):
_order_dms(self.minx_dm, self.miny_dm, strict=False)
def test_mixed_input_types(self):
with self.assertRaises(TypeError):
_order_dms(self.minx, self.minz_dm)
with self.assertRaises(TypeError):
_order_dms(self.minz_dm, self.minx)
if __name__ == '__main__':
main()
|
|
"""Tests related to the authorization of api calls.
NOTE: while all of these are conceptually authorization related, some illegal
operations will raise exceptions other than AuthorizationError. This usually
happens when the operation is illegal *in principle*, and would not be fixed by
authenticating as someone else. We were already raising exceptions in
these cases before actually adding authentication and authorization to
the mix. They are still tested here, since they are important for security.
"""
import pytest
import unittest
import json
from hil import api, config, model, deferred
from hil.auth import get_auth_backend
from hil.errors import AuthorizationError, BadArgumentError, \
ProjectMismatchError, BlockedError
from hil.test_common import config_testsuite, config_merge, fresh_database, \
with_request_context, additional_db, fail_on_log_warnings, server_init
MOCK_OBM_API_NAME = 'http://schema.massopencloud.org/haas/v0/obm/mock'
MOCK_SWITCH_API_NAME = 'http://schema.massopencloud.org/haas/v0/switches/mock'
def auth_call_test(fn, error, admin, project, args, kwargs={}):
"""Test the authorization properties of an api call.
Parmeters:
* `fn` - the api function to call
* `error` - The error that should be raised. None if no error should
be raised.
* `admin` - Whether the request should have admin access.
* `project` - The name of the project the request should be
authenticated as. Can be None if `admin` is True.
* `args` - the arguments (as a list) to `fn`.
"""
auth_backend = get_auth_backend()
auth_backend.set_admin(admin)
if not admin:
project = model.Project.query \
.filter_by(label=project).one()
auth_backend.set_project(project)
if error is None:
fn(*args, **kwargs)
else:
with pytest.raises(error):
fn(*args, **kwargs)
additional_db = pytest.fixture(additional_db)
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
@pytest.fixture
def configure():
"Configure HIL"
config_testsuite()
config_merge({
'extensions': {
'hil.ext.auth.mock': '',
# This extension is enabled by default in the tests, so we need to
# disable it explicitly:
'hil.ext.auth.null': None,
'hil.ext.switches.mock': '',
'hil.ext.obm.mock': ''
},
})
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
with_request_context = pytest.yield_fixture(with_request_context)
server_init = pytest.fixture(server_init)
pytestmark = pytest.mark.usefixtures('configure',
'fresh_database',
'additional_db',
'server_init',
'with_request_context')
# We have a *lot* of different parameters with which we're going to invoke
# `test_auth_call`, below. Rather than passing one giant list to the decorator
# in-line, we construct it in stages here:
auth_call_params = [
#
# network_create
#
# Legal Cases:
# Admin creates a public network internal to HIL.
dict(fn=api.network_create,
error=None,
admin=True,
project=None,
args=['pub', 'admin', '', '']),
# Admin creates a public network with an existing net_id.
dict(fn=api.network_create,
error=None,
admin=True,
project=None,
args=['pub', 'admin', '', 'some-id']),
# Admin creates a provider network for some project.
dict(fn=api.network_create,
error=None,
admin=True,
project=None,
args=['pxe', 'admin', 'runway', 'some-id']),
# Admin creates an allocated network on behalf of a project.
dict(fn=api.network_create,
error=None,
admin=True,
project=None,
args=['pxe', 'admin', 'runway', '']),
# Project creates a private network for themselves.
dict(fn=api.network_create,
error=None,
admin=False,
project='runway',
args=['pxe', 'runway', 'runway', '']),
# Illegal Cases:
# Project tries to create a private network for another project.
dict(fn=api.network_create,
error=AuthorizationError,
admin=False,
project='runway',
args=['pxe', 'manhattan', 'manhattan', '']),
# Project tries to specify a net_id.
dict(fn=api.network_create,
error=BadArgumentError,
admin=False,
project='runway',
args=['pxe', 'runway', 'runway', 'some-id']),
# Project tries to create a public network.
dict(fn=api.network_create,
error=AuthorizationError,
admin=False,
project='runway',
args=['pub', 'admin', '', '']),
# Project tries to set owner to 'admin' on its own network:
dict(fn=api.network_create,
error=AuthorizationError,
admin=False,
project='runway',
args=['pxe', 'admin', 'runway', '']),
]
#
# network_delete
#
# Legal Cases
# Admin should be able to delete any network.
for net in [
'stock_int_pub',
'stock_ext_pub',
'runway_pxe',
'runway_provider',
'manhattan_pxe',
'manhattan_provider',
]:
auth_call_params.append(dict(
fn=api.network_delete,
error=None,
admin=True,
project=None,
args=[net]
))
# Project should be able to delete it's own (created) network.
auth_call_params.append(dict(
fn=api.network_delete,
error=None,
admin=False,
project='runway',
args=['runway_pxe']
))
# Illegal Cases:
# Project should not be able to delete admin-created networks.
for net in [
'stock_int_pub',
'stock_ext_pub',
'runway_provider', # ... including networks created for said project.
'manhattan_runway_provider',
]:
auth_call_params.append(dict(
fn=api.network_delete,
error=AuthorizationError,
admin=False,
project='runway',
args=[net]
))
# Project should not be able to delete networks created by other projects.
for net in [
'manhattan_pxe',
'manhattan_provider',
'manhattan_runway_pxe',
]:
auth_call_params.append(dict(
fn=api.network_delete,
error=AuthorizationError,
admin=False,
project='runway',
args=[net]))
#
# network_grant_project_access
#
# Legal cases
# admin should be able to add access to a network
# for any project (that does not already have access)
for (project, net) in [
('manhattan', 'runway_provider'),
('runway', 'manhattan_provider'),
('runway', 'manhattan_pxe'),
('manhattan', 'runway_pxe'),
]:
auth_call_params.append(dict(
fn=api.network_grant_project_access,
error=None,
admin=True,
project=None,
args=[project, net]
))
# project that is the owner of the network should
# be able to add access for other projects
for (project, project_access, net) in [
('manhattan', 'runway', 'manhattan_pxe'),
('runway', 'manhattan', 'runway_pxe'),
]:
auth_call_params.append(dict(
fn=api.network_grant_project_access,
error=None,
admin=False,
project=project,
args=[project_access, net]
))
# Illegal cases:
# Projects other than the network owner should not be ble to grant access
for (project, project_access, net) in [
('manhattan', 'manhattan', 'runway_pxe'),
('runway', 'runway', 'manhattan_pxe'),
]:
auth_call_params.append(dict(
fn=api.network_grant_project_access,
error=AuthorizationError,
admin=False,
project=project,
args=[project_access, net]
))
#
# network_revoke_project_access
#
# Legal cases
# admin should be able to remove access to a network
# for any project (that was not the owner of the network)
# admin created networks with all the access removed will become
# public networks
for (project, net) in [
('runway', 'runway_provider'),
('runway', 'manhattan_runway_pxe'),
('manhattan', 'manhattan_provider'),
('runway', 'manhattan_runway_provider'),
('manhattan', 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.network_revoke_project_access,
error=None,
admin=True,
project=None,
args=[project, net]
))
# project that is the owner of the network should
# be able to remove the access of other projects
# projects should be able to remove their own access
for (project, project_access, net) in [
('manhattan', 'runway', 'manhattan_runway_pxe'),
('runway', 'runway', 'manhattan_runway_pxe'),
('manhattan', 'manhattan', 'manhattan_runway_provider'),
('runway', 'runway', 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.network_revoke_project_access,
error=None,
admin=False,
project=project,
args=[project_access, net]
))
# Illegal cases:
# Projects other than the network owner or the project
# itself should not be able to remove access of other projects
for (project, project_access, net) in [
('manhattan', 'runway', 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.network_revoke_project_access,
error=AuthorizationError,
admin=False,
project=project,
args=[project_access, net]
))
#
# list_network_attachments
#
# Legal cases
# Admin should be able to list attachments for public network:
for net in ('stock_int_pub', 'stock_ext_pub'):
for project in ('runway', 'manhattan'):
auth_call_params.append(dict(
fn=api.list_network_attachments,
error=None,
admin=True,
project=project,
args=[net]
))
# Projects should be able to view their own nodes in a network:
for (project, net) in [
('runway', 'runway_pxe'),
('runway', 'runway_provider'),
('manhattan', 'manhattan_pxe'),
('manhattan', 'manhattan_provider'),
('manhattan', 'manhattan_runway_pxe'),
('manhattan', 'manhattan_runway_provider'),
('runway', 'manhattan_runway_pxe'),
('runway', 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.list_network_attachments,
error=None,
admin=False,
project=project,
args=[net, project]
))
# owner of a network should be able to view all nodes in the network:
for (project, net) in [
('runway', 'runway_pxe'),
('manhattan', 'manhattan_pxe'),
('manhattan', 'manhattan_runway_pxe'),
]:
auth_call_params.append(dict(
fn=api.list_network_attachments,
error=None,
admin=False,
project=project,
args=[net]
))
# Illegal cases
# Projects should not be able to list nodes that do not belong to them
# (on network they do not own)
for (project, access_project, net) in [
('runway', 'manhattan', 'manhattan_runway_pxe'),
('runway', 'manhattan', 'manhattan_runway_provider'),
('runway', None, 'manhattan_runway_pxe'),
('runway', None, 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.list_network_attachments,
error=AuthorizationError,
admin=False,
project=project,
args=[net, access_project]
))
# or on networks they do not have access to
for (project, net) in [
('runway', 'manhattan_pxe'),
('runway', 'manhattan_provider'),
('manhattan', 'runway_pxe'),
('manhattan', 'runway_provider'),
]:
auth_call_params.append(dict(
fn=api.list_network_attachments,
error=AuthorizationError,
admin=False,
project=project,
args=[net, project]
))
#
# show_network
#
# Legal Cases
# Public networks should be accessible by anyone.
for net in ('stock_int_pub', 'stock_ext_pub'):
for project in ('runway', 'manhattan'):
for admin in (True, False):
auth_call_params.append(dict(
fn=api.show_network,
error=None,
admin=admin,
project=project,
args=[net]
))
# Projects should be able to view networks they have access to.
for (project, net) in [
('runway', 'runway_pxe'),
('runway', 'runway_provider'),
('manhattan', 'manhattan_pxe'),
('manhattan', 'manhattan_provider'),
('manhattan', 'manhattan_runway_pxe'),
('manhattan', 'manhattan_runway_provider'),
('runway', 'manhattan_runway_pxe'),
('runway', 'manhattan_runway_provider'),
]:
auth_call_params.append(dict(
fn=api.show_network,
error=None,
admin=False,
project=project,
args=[net]
))
# Illegal Cases
# Projects should not be able to access each other's networks.
for (project, net) in [
('runway', 'manhattan_pxe'),
('runway', 'manhattan_provider'),
('manhattan', 'runway_pxe'),
('manhattan', 'runway_provider'),
]:
auth_call_params.append(dict(
fn=api.show_network,
error=AuthorizationError,
admin=False,
project=project,
args=[net]
))
#
# node_connect_network
#
# Legal Cases
# Projects should be able to connect their own nodes to their own networks.
for (project, node, net) in [
('runway', 'runway_node_0', 'runway_pxe'),
('runway', 'runway_node_1', 'runway_provider'),
('manhattan', 'manhattan_node_0', 'manhattan_pxe'),
('manhattan', 'manhattan_node_1', 'manhattan_provider'),
]:
auth_call_params.append(dict(
fn=api.node_connect_network,
error=None,
admin=False,
project=project,
args=[node, 'boot-nic', net]
))
# Projects should be able to connect their nodes to public networks.
for net in ('stock_int_pub', 'stock_ext_pub'):
for (project, node) in [
('runway', 'runway_node_0'),
('runway', 'runway_node_1'),
('manhattan', 'manhattan_node_0'),
('manhattan', 'manhattan_node_1'),
]:
auth_call_params.append(dict(
fn=api.node_connect_network,
error=None,
admin=False,
project=project,
args=[node, 'boot-nic', net]))
# Illegal Cases
# Projects are not able to connect their nodes to each other's networks.
for (node, net) in [
('runway_node_0', 'manhattan_pxe'),
('runway_node_1', 'manhattan_provider'),
]:
auth_call_params.append(dict(
fn=api.node_connect_network,
error=ProjectMismatchError,
admin=False,
project='runway',
args=[node, 'boot-nic', net]
))
auth_call_params += [
# Projects are not able to attach each other's nodes to public networks.
dict(fn=api.node_connect_network,
error=AuthorizationError,
admin=False,
project='runway',
args=['manhattan_node_0', 'boot-nic', 'stock_int_pub']),
# Projects should not be able to attach free nodes to networks.
# The same node about the exception as above applies.
dict(fn=api.node_connect_network,
error=ProjectMismatchError,
admin=False,
project='runway',
args=['free_node_0', 'boot-nic', 'stock_int_pub']),
#
# list_project_nodes
#
# Legal Cases
# Admin lists a project's nodes.
dict(fn=api.list_project_nodes,
error=None,
admin=True,
project=None,
args=['runway']),
# Project lists its own nodes.
dict(fn=api.list_project_nodes,
error=None,
admin=False,
project='runway',
args=['runway']),
# Illegal Cases
# Project lists another project's nodes.
dict(fn=api.list_project_nodes,
error=AuthorizationError,
admin=False,
project='runway',
args=['manhattan']),
#
# show_node
#
# Legal Cases:
# Project shows a free node.
dict(fn=api.show_node,
error=None,
admin=False,
project='runway',
args=['free_node_0']),
# Project shows its own node.
dict(fn=api.show_node,
error=None,
admin=False,
project='runway',
args=['runway_node_0']),
# Illegal Cases:
# Project tries to show another project's node.
dict(fn=api.show_node,
error=AuthorizationError,
admin=False,
project='runway',
args=['manhattan_node_0']),
#
# project_connect_node:
#
# Project tries to connect someone else's node to itself. The basic cases
# of connecting a free node are covered by project_calls, below.
dict(fn=api.project_connect_node,
error=BlockedError,
admin=False,
project='runway',
args=['runway', 'manhattan_node_0']),
]
@pytest.mark.parametrize('kwargs', auth_call_params)
def test_auth_call(kwargs):
"""Call auth_call_test on our huge list of cases.
We use auth_call_test in a few other places, hence the separate wrapper
for the actual test case.
"""
return auth_call_test(**kwargs)
# There are a whole bunch of api calls that just unconditionally require admin
# access. This is a list of (function, args) pairs, each of which should
# succed as admin and fail as a regular project. The actual test functions for
# these are below.
admin_calls = [
(api.node_register, ['new_node'], {
'obm': {
"type": MOCK_OBM_API_NAME,
"host": "ipmihost",
"user": "root",
"password": "tapeworm",
},
'obmd': {
'uri': 'http://obmd.example.com/node/new_node',
'admin_token': 'secret',
},
}),
# (api.node_register, ['new_node', obm=obm, {}),
(api.node_delete, ['no_nic_node'], {}),
(api.node_register_nic, ['free_node_0', 'extra-nic',
'de:ad:be:ef:20:16'], {}),
(api.node_delete_nic, ['free_node_0', 'boot-nic'], {}),
(api.project_create, ['anvil-nextgen'], {}),
(api.list_projects, [], {}),
# node_power_* and node_set_bootdev, on free nodes only.
# Nodes assigned to a project are tested in project_calls, below.
(api.node_power_cycle, ['free_node_0'], {}),
(api.node_power_off, ['free_node_0'], {}),
(api.node_set_bootdev, ['free_node_0'], {'bootdev': {'none'}}),
(api.project_delete, ['empty-project'], {}),
(api.switch_register, ['new-switch', MOCK_SWITCH_API_NAME], {
'hostname': 'oak-ridge',
'username': 'alice',
'password': 'changeme',
}),
(api.switch_delete, ['empty-switch'], {}),
(api.switch_register_port, ['stock_switch_0', 'gi1/0/13'], {}),
(api.switch_delete_port, ['stock_switch_0', 'free_port_0'], {}),
(api.port_connect_nic, ['stock_switch_0', 'free_port_0',
'free_node_0', 'boot-nic'], {}),
(api.show_port, ['stock_switch_0', 'free_port_0'], {}),
(api.port_detach_nic, ['stock_switch_0', 'free_node_0_port'], {}),
(api.node_set_metadata, ['free_node_0', 'EK', 'pk'], {}),
(api.node_delete_metadata, ['runway_node_0', 'EK'], {}),
(api.port_revert, ['stock_switch_0', 'free_node_0_port'], {}),
(api.list_active_extensions, [], {}),
]
# Similarly, there are a large number of calls that require access to a
# particular project. This is a list of (function, args) pairs that should
# succeed as project 'runway', and fail as project 'manhattan'.
project_calls = [
# node_power_* and node_set_bootdev, on allocated nodes only.
# Free nodes are testsed in admin_calls, above.
(api.node_power_cycle, ['runway_node_0'], {}),
(api.node_power_off, ['runway_node_0'], {}),
(api.node_set_bootdev, ['runway_node_0'], {'bootdev': {'none'}}),
(api.project_connect_node, ['runway', 'free_node_0'], {}),
(api.project_detach_node, ['runway', 'runway_node_0'], {}),
(api.headnode_create, ['new-headnode', 'runway', 'base-headnode'], {}),
(api.headnode_delete, ['runway_headnode_off'], {}),
(api.headnode_start, ['runway_headnode_off'], {}),
(api.headnode_stop, ['runway_headnode_on'], {}),
(api.headnode_create_hnic, ['runway_headnode_off', 'extra-hnic'], {}),
(api.headnode_delete_hnic, ['runway_headnode_off', 'pxe'], {}),
(api.headnode_connect_network, ['runway_headnode_off',
'pxe', 'stock_int_pub'], {}),
(api.headnode_connect_network, ['runway_headnode_off',
'pxe', 'runway_pxe'], {}),
(api.headnode_detach_network, ['runway_headnode_off', 'public'], {}),
(api.list_project_headnodes, ['runway'], {}),
(api.show_headnode, ['runway_headnode_on'], {}),
]
@pytest.mark.parametrize('fn,args,kwargs', admin_calls)
def test_admin_succeed(fn, args, kwargs):
"""Verify that a call succeeds as admin."""
auth_call_test(fn=fn,
error=None,
admin=True,
project=None,
args=args,
kwargs=kwargs)
@pytest.mark.parametrize('fn,args,kwargs', admin_calls)
def test_admin_fail(fn, args, kwargs):
"""Verify that a call fails when not admin."""
auth_call_test(fn=fn,
error=AuthorizationError,
admin=False,
project='runway',
args=args,
kwargs=kwargs)
@pytest.mark.parametrize('fn,args,kwargs', project_calls)
def test_runway_succeed(fn, args, kwargs):
"""Verify that a call succeeds when run as the 'runway' project."""
auth_call_test(fn=fn,
error=None,
admin=False,
project='runway',
args=args,
kwargs=kwargs)
@pytest.mark.parametrize('fn,args,kwargs', project_calls)
def test_manhattan_fail(fn, args, kwargs):
"""Verify that a call fails when run as the 'manhattan' project."""
auth_call_test(fn=fn,
error=AuthorizationError,
admin=False,
project='manhattan',
args=args,
kwargs=kwargs)
class Test_node_detach_network(unittest.TestCase):
"""Test authorization properties of node_detach_network."""
def setUp(self):
"""Common setup for the tests.
* node 'manhattan_node_0' is attached to network 'stock_int_pub', via
'boot-nic'.
This also sets some properties for easy access to the projects.
"""
self.auth_backend = get_auth_backend()
self.runway = model.Project.query.filter_by(label='runway').one()
self.manhattan = model.Project.query.filter_by(label='manhattan').one()
# The individual tests set the right project, but we need this to
# connect the network during setup:
self.auth_backend.set_project(self.manhattan)
api.node_connect_network('manhattan_node_0',
'boot-nic',
'stock_int_pub')
deferred.apply_networking()
def test_success(self):
"""Project 'manhattan' can detach its own node."""
self.auth_backend.set_project(self.manhattan)
api.node_detach_network('manhattan_node_0',
'boot-nic',
'stock_int_pub')
def test_wrong_project(self):
"""Project 'runway' cannot detach "manhattan"'s node."""
self.auth_backend.set_project(self.runway)
with pytest.raises(AuthorizationError):
api.node_detach_network('manhattan_node_0',
'boot-nic',
'stock_int_pub')
class Test_show_networking_action(unittest.TestCase):
"""Test authorization properties of show_networking_action."""
def setUp(self):
"""Common setup for the tests.
* node 'manhattan_node_0' is attached to network 'stock_int_pub', via
'boot-nic'.
This also sets some properties for easy access to the projects.
"""
self.auth_backend = get_auth_backend()
self.runway = model.Project.query.filter_by(label='runway').one()
self.manhattan = model.Project.query.filter_by(label='manhattan').one()
self.auth_backend.set_project(self.manhattan)
response = api.node_connect_network('manhattan_node_0',
'boot-nic',
'stock_int_pub')
self.status_id = json.loads(response[0])
self.status_id = self.status_id['status_id']
def test_show_networking_action_success(self):
"""Test that project that has access to the node can run
show_networking_action"""
self.auth_backend.set_project(self.manhattan)
response = json.loads(api.show_networking_action(self.status_id))
assert response == {'status': 'PENDING',
'node': 'manhattan_node_0',
'nic': 'boot-nic',
'type': 'modify_port',
'channel': 'null',
'new_network': 'stock_int_pub'}
def test_show_networking_action_failure(self):
"""Test that project with no access to node can't get the status"""
self.auth_backend.set_project(self.runway)
with pytest.raises(AuthorizationError):
api.show_networking_action(self.status_id)
def test_show_networking_action_admin(self):
"""Test that admins can get status"""
self.auth_backend.set_project(self.runway)
self.auth_backend.set_admin(True)
response = json.loads(api.show_networking_action(self.status_id))
assert response == {'status': 'PENDING',
'node': 'manhattan_node_0',
'nic': 'boot-nic',
'type': 'modify_port',
'channel': 'null',
'new_network': 'stock_int_pub'}
|
|
import hashlib
import os
import random
import re
import string
import time
from base64 import decodestring
from contextlib import contextmanager
from datetime import datetime
from django import dispatch, forms
from django.conf import settings
from django.contrib.auth.hashers import BasePasswordHasher, mask_hash
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core import validators
from django.db import models, transaction
from django.template import Context, loader
from django.utils import translation
from django.utils.crypto import constant_time_compare
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str, smart_unicode
from django.utils.functional import lazy
import caching.base as caching
import commonware.log
import tower
from tower import ugettext as _
import waffle
import amo
import amo.models
from access.models import Group, GroupUser
from amo.urlresolvers import reverse
from translations.fields import NoLinksField, save_signal
from translations.models import Translation
from translations.query import order_by_translation
log = commonware.log.getLogger('z.users')
class SHA512PasswordHasher(BasePasswordHasher):
"""
The SHA2 password hashing algorithm, 512 bits.
"""
algorithm = 'sha512'
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.new('sha512', smart_str(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def get_hexdigest(algorithm, salt, raw_password):
if 'base64' in algorithm:
# These are getpersonas passwords with base64 encoded salts.
salt = decodestring(salt)
algorithm = algorithm.replace('+base64', '')
if algorithm.startswith('sha512+MD5'):
# These are persona specific passwords when we imported
# users from getpersonas.com. The password is md5 hashed
# and then sha512'd.
md5 = hashlib.new('md5', raw_password).hexdigest()
return hashlib.new('sha512', smart_str(salt + md5)).hexdigest()
return hashlib.new(algorithm, smart_str(salt + raw_password)).hexdigest()
def rand_string(length):
return ''.join(random.choice(string.letters) for i in xrange(length))
def create_password(algorithm, raw_password):
salt = get_hexdigest(algorithm, rand_string(12), rand_string(12))[:64]
hsh = get_hexdigest(algorithm, salt, raw_password)
return '$'.join([algorithm, salt, hsh])
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
class UserManager(BaseUserManager, amo.models.ManagerBase):
pass
AbstractBaseUser._meta.get_field('password').max_length = 255
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase, AbstractBaseUser):
objects = UserManager()
USERNAME_FIELD = 'username'
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = NoLinksField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
emailhidden = models.BooleanField(default=True)
homepage = models.URLField(max_length=255, blank=True, default='')
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
resetcode = models.CharField(max_length=255, default='', blank=True)
resetcode_expires = models.DateTimeField(default=datetime.now, null=True,
blank=True)
read_dev_agreement = models.DateTimeField(null=True, blank=True)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
source = models.PositiveIntegerField(default=amo.LOGIN_SOURCE_UNKNOWN,
editable=False, db_index=True)
is_verified = models.BooleanField(default=True)
region = models.CharField(max_length=11, null=True, blank=True,
editable=False)
lang = models.CharField(max_length=5, null=True, blank=True,
default=settings.LANGUAGE_CODE)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
self.username = smart_unicode(self.username)
def __unicode__(self):
return u'%s: %s' % (self.id, self.display_name or self.username)
@property
def is_superuser(self):
return self.groups.filter(rules='*:*').exists()
@property
def is_staff(self):
from access import acl
return acl.action_allowed_user(self, 'Admin', '%')
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
def get_backend(self):
if waffle.switch_is_active('browserid-login'):
return 'django_browserid.auth.BrowserIDBackend'
else:
return 'users.backends.AmoUserBackend'
def set_backend(self, val):
pass
backend = property(get_backend, set_backend)
def is_anonymous(self):
return False
def get_user_url(self, name='profile', src=None, args=None):
"""
We use <username> as the slug, unless it contains gross
characters - in which case use <id> as the slug.
"""
from amo.utils import urlparams
chars = '/<>"\''
slug = self.username
if not self.username or any(x in chars for x in self.username):
slug = self.id
args = args or []
url = reverse('users.%s' % name, args=[slug] + args)
return urlparams(url, src=src)
def get_url_path(self, src=None):
return self.get_user_url('profile', src=src)
def flush_urls(self):
urls = ['*/user/%d/' % self.id,
self.picture_url,
]
return urls
@amo.cached_property
def addons_listed(self):
"""Public add-ons this user is listed as author of."""
return self.addons.reviewed().filter(
addonuser__user=self, addonuser__listed=True)
@property
def num_addons_listed(self):
"""Number of public add-ons this user is listed as author of."""
return self.addons.reviewed().filter(
addonuser__user=self, addonuser__listed=True).count()
def my_addons(self, n=8):
"""Returns n addons"""
qs = order_by_translation(self.addons, 'name')
return qs[:n]
@property
def picture_dir(self):
from amo.helpers import user_media_path
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
return os.path.join(user_media_path('userpics'), split_id.group(2) or '0',
split_id.group(1) or '0')
@property
def picture_path(self):
return os.path.join(self.picture_dir, str(self.id) + '.png')
@property
def picture_url(self):
from amo.helpers import user_media_url
if not self.picture_type:
return settings.STATIC_URL + '/img/zamboni/anon_user.png'
else:
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = "/".join([
split_id.group(2) or '0',
split_id.group(1) or '0',
"%s.png?modified=%s" % (self.id, modified)
])
return user_media_url('userpics') + path
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@amo.cached_property
def is_addon_developer(self):
return self.addonuser_set.exclude(
addon__type=amo.ADDON_PERSONA).exists()
@amo.cached_property
def is_artist(self):
"""Is this user a Personas Artist?"""
return self.addonuser_set.filter(
addon__type=amo.ADDON_PERSONA).exists()
@amo.cached_property
def needs_tougher_password(user):
if user.source in amo.LOGIN_SOURCE_BROWSERIDS:
return False
from access import acl
return (acl.action_allowed_user(user, 'Admin', '%') or
acl.action_allowed_user(user, 'Addons', 'Edit') or
acl.action_allowed_user(user, 'Addons', 'Review') or
acl.action_allowed_user(user, 'Apps', 'Review') or
acl.action_allowed_user(user, 'Personas', 'Review') or
acl.action_allowed_user(user, 'Users', 'Edit'))
@property
def name(self):
return smart_unicode(self.display_name or self.username)
welcome_name = name
@amo.cached_property
def reviews(self):
"""All reviews that are not dev replies."""
qs = self._reviews_all.filter(reply_to=None)
# Force the query to occur immediately. Several
# reviews-related tests hang if this isn't done.
return qs
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
@transaction.commit_on_success
def restrict(self):
from amo.utils import send_mail
log.info(u'User (%s: <%s>) is being restricted and '
'its user-generated content removed.' % (self, self.email))
g = Group.objects.get(rules='Restricted:UGC')
GroupUser.objects.create(user=self, group=g)
self.reviews.all().delete()
self.collections.all().delete()
t = loader.get_template('users/email/restricted.ltxt')
send_mail(_('Your account has been restricted'),
t.render(Context({})), None, [self.email],
use_blacklist=False, real_email=True)
def unrestrict(self):
log.info(u'User (%s: <%s>) is being unrestricted.' % (self,
self.email))
GroupUser.objects.filter(user=self,
group__rules='Restricted:UGC').delete()
def generate_confirmationcode(self):
if not self.confirmationcode:
self.confirmationcode = ''.join(random.sample(string.letters +
string.digits, 60))
return self.confirmationcode
def save(self, force_insert=False, force_update=False, using=None, **kwargs):
# we have to fix stupid things that we defined poorly in remora
if not self.resetcode_expires:
self.resetcode_expires = datetime.now()
super(UserProfile, self).save(force_insert, force_update, using, **kwargs)
def has_usable_password(self):
"""Override AbstractBaseUser.has_usable_password."""
# We also override the check_password method, and don't rely on
# settings.PASSWORD_HASHERS, and don't use "set_unusable_password", so
# we want to bypass most of AbstractBaseUser.has_usable_password
# checks.
return bool(self.password) # Not None and not empty.
def check_password(self, raw_password):
if '$' not in self.password:
valid = (get_hexdigest('md5', '', raw_password) == self.password)
if valid:
# Upgrade an old password.
self.set_password(raw_password)
self.save()
return valid
algo, salt, hsh = self.password.split('$')
#Complication due to getpersonas account migration; we don't
#know if passwords were utf-8 or latin-1 when hashed. If you
#can prove that they are one or the other, you can delete one
#of these branches.
if '+base64' in algo and isinstance(raw_password, unicode):
if hsh == get_hexdigest(algo, salt, raw_password.encode('utf-8')):
return True
else:
try:
return hsh == get_hexdigest(algo, salt,
raw_password.encode('latin1'))
except UnicodeEncodeError:
return False
else:
return hsh == get_hexdigest(algo, salt, raw_password)
def set_password(self, raw_password, algorithm='sha512'):
self.password = create_password(algorithm, raw_password)
# Can't do CEF logging here because we don't have a request object.
def email_confirmation_code(self):
from amo.utils import send_mail
log.debug("Sending account confirmation code for user (%s)", self)
url = "%s%s" % (settings.SITE_URL,
reverse('users.confirm',
args=[self.id, self.confirmationcode]))
domain = settings.DOMAIN
t = loader.get_template('users/email/confirm.ltxt')
c = {'domain': domain, 'url': url, }
send_mail(_("Please confirm your email address"),
t.render(Context(c)), None, [self.email],
use_blacklist=False, real_email=True)
def log_login_attempt(self, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save(update_fields=['last_login_ip', 'last_login_attempt',
'last_login_attempt_ip',
'failed_login_attempts'])
def mobile_collection(self):
return self.special_collection(amo.COLLECTION_MOBILE,
defaults={'slug': 'mobile', 'listed': False,
'name': _('My Mobile Add-ons')})
def favorites_collection(self):
return self.special_collection(amo.COLLECTION_FAVORITES,
defaults={'slug': 'favorites', 'listed': False,
'name': _('My Favorite Add-ons')})
def special_collection(self, type_, defaults):
from bandwagon.models import Collection
c, new = Collection.objects.get_or_create(
author=self, type=type_, defaults=defaults)
if new:
# Do an extra query to make sure this gets transformed.
c = Collection.objects.using('default').get(id=c.id)
return c
@contextmanager
def activate_lang(self):
"""
Activate the language for the user. If none is set will go to the site
default which is en-US.
"""
lang = self.lang if self.lang else settings.LANGUAGE_CODE
old = translation.get_language()
tower.activate(lang)
yield
tower.activate(old)
def remove_locale(self, locale):
"""Remove the given locale for the user."""
Translation.objects.remove_for(self, locale)
@classmethod
def get_fallback(cls):
return cls._meta.get_field('lang')
models.signals.pre_save.connect(save_signal, sender=UserProfile,
dispatch_uid='userprofile_translations')
@dispatch.receiver(models.signals.post_save, sender=UserProfile,
dispatch_uid='user.post_save')
def user_post_save(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.index_users.delay([instance.id])
@dispatch.receiver(models.signals.post_delete, sender=UserProfile,
dispatch_uid='user.post_delete')
def user_post_delete(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.unindex_users.delay([instance.id])
class UserNotification(amo.models.ModelBase):
user = models.ForeignKey(UserProfile, related_name='notifications')
notification_id = models.IntegerField()
enabled = models.BooleanField(default=False)
class Meta:
db_table = 'users_notifications'
@staticmethod
def update_or_create(update={}, **kwargs):
rows = UserNotification.objects.filter(**kwargs).update(**update)
if not rows:
update.update(dict(**kwargs))
UserNotification.objects.create(**update)
class RequestUserManager(amo.models.ManagerBase):
def get_query_set(self):
qs = super(RequestUserManager, self).get_query_set()
return qs.transform(RequestUser.transformer)
class RequestUser(UserProfile):
"""
A RequestUser has extra attributes we don't care about for normal users.
"""
objects = RequestUserManager()
def __init__(self, *args, **kw):
super(RequestUser, self).__init__(*args, **kw)
self.mobile_addons = []
self.favorite_addons = []
self.watching = []
class Meta:
proxy = True
@staticmethod
def transformer(users):
# We don't want to cache these things on every UserProfile; they're
# only used by a user attached to a request.
if not users:
return
# Touch this @cached_property so the answer is cached with the object.
user = users[0]
user.is_developer
from bandwagon.models import CollectionAddon, CollectionWatcher
SPECIAL = amo.COLLECTION_SPECIAL_SLUGS.keys()
qs = CollectionAddon.objects.filter(
collection__author=user, collection__type__in=SPECIAL)
addons = dict((type_, []) for type_ in SPECIAL)
for addon, ctype in qs.values_list('addon', 'collection__type'):
addons[ctype].append(addon)
user.mobile_addons = addons[amo.COLLECTION_MOBILE]
user.favorite_addons = addons[amo.COLLECTION_FAVORITES]
user.watching = list((CollectionWatcher.objects.filter(user=user)
.values_list('collection', flat=True)))
def _cache_keys(self):
# Add UserProfile.cache_key so RequestUser gets invalidated when the
# UserProfile is changed.
keys = super(RequestUser, self)._cache_keys()
return keys + (UserProfile._cache_key(self.id, 'default'),)
class BlacklistedName(amo.models.ModelBase):
"""Blacklisted User usernames and display_names + Collections' names."""
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'users_blacklistedname'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, name):
"""
Check to see if a given name is in the (cached) blacklist.
Return True if the name contains one of the blacklisted terms.
"""
name = name.lower()
qs = cls.objects.all()
f = lambda: [n.lower() for n in qs.values_list('name', flat=True)]
blacklist = caching.cached_with(qs, f, 'blocked')
return any(n in name for n in blacklist)
class BlacklistedEmailDomain(amo.models.ModelBase):
"""Blacklisted user e-mail domains."""
domain = models.CharField(max_length=255, unique=True, default='',
blank=False)
def __unicode__(self):
return self.domain
@classmethod
def blocked(cls, domain):
qs = cls.objects.all()
f = lambda: list(qs.values_list('domain', flat=True))
blacklist = caching.cached_with(qs, f, 'blocked')
# because there isn't a good way to know if the domain is
# "example.com" or "example.co.jp", we'll re-construct it...
# so if it's "bad.example.co.jp", the following check the
# values in ['bad.example.co.jp', 'example.co.jp', 'co.jp']
x = domain.lower().split('.')
for d in ['.'.join(x[y:]) for y in range(len(x) - 1)]:
if d in blacklist:
return True
class BlacklistedPassword(amo.models.ModelBase):
"""Blacklisted passwords"""
password = models.CharField(max_length=255, unique=True, blank=False)
def __unicode__(self):
return self.password
@classmethod
def blocked(cls, password):
return cls.objects.filter(password=password)
class UserHistory(amo.models.ModelBase):
email = models.EmailField()
user = models.ForeignKey(UserProfile, related_name='history')
class Meta:
db_table = 'users_history'
ordering = ('-created',)
@UserProfile.on_change
def watch_email(old_attr={}, new_attr={}, instance=None,
sender=None, **kw):
new_email, old_email = new_attr.get('email'), old_attr.get('email')
if old_email and new_email != old_email:
log.debug('Creating user history for user: %s' % instance.pk)
UserHistory.objects.create(email=old_email, user_id=instance.pk)
|
|
# File: ticTacToeTests.py
# from chapter 18 of _Genetic Algorithms with Python_
#
# Author: Clinton Sheppard <fluentcoder@gmail.com>
# Copyright (c) 2016 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import random
import unittest
from functools import partial
import genetic
def get_fitness(genes):
localCopy = genes[:]
fitness = get_fitness_for_games(localCopy)
fitness.GeneCount = len(genes)
return fitness
squareIndexes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def play1on1(xGenes, oGenes):
board = dict((i, Square(i, ContentType.Empty)) for i in range(1, 9 + 1))
empties = [v for v in board.values() if v.Content == ContentType.Empty]
roundData = [[xGenes, ContentType.Mine, genetic.CompetitionResult.Loss,
genetic.CompetitionResult.Win],
[oGenes, ContentType.Opponent, genetic.CompetitionResult.Win,
genetic.CompetitionResult.Loss]]
playerIndex = 0
while len(empties) > 0:
playerData = roundData[playerIndex]
playerIndex = 1 - playerIndex
genes, piece, lossResult, winResult = playerData
moveAndRuleIndex = get_move(genes, board, empties)
if moveAndRuleIndex is None: # could not find a move
return lossResult
index = moveAndRuleIndex[0]
board[index] = Square(index, piece)
mostRecentMoveOnly = [board[index]]
if len(RowContentFilter(piece, 3).get_matches(board, mostRecentMoveOnly)) > 0 or \
len(ColumnContentFilter(piece, 3).get_matches(board, mostRecentMoveOnly)) > 0 or \
len(DiagonalContentFilter(piece, 3).get_matches(board, mostRecentMoveOnly)) > 0:
return winResult
empties = [v for v in board.values() if v.Content == ContentType.Empty]
return genetic.CompetitionResult.Tie
def get_fitness_for_games(genes):
def getBoardString(b):
return ''.join(map(lambda i:
'.' if b[i].Content == ContentType.Empty
else 'x' if b[i].Content == ContentType.Mine
else 'o', squareIndexes))
board = dict((i, Square(i, ContentType.Empty)) for i in range(1, 9 + 1))
queue = [board]
for square in board.values():
candiateCopy = board.copy()
candiateCopy[square.Index] = Square(square.Index, ContentType.Opponent)
queue.append(candiateCopy)
winningRules = {}
wins = ties = losses = 0
while len(queue) > 0:
board = queue.pop()
boardString = getBoardString(board)
empties = [v for v in board.values() if v.Content == ContentType.Empty]
if len(empties) == 0:
ties += 1
continue
candidateIndexAndRuleIndex = get_move(genes, board, empties)
if candidateIndexAndRuleIndex is None: # could not find a move
# there are empties but didn't find a move
losses += 1
# go to next board
continue
# found at least one move
index = candidateIndexAndRuleIndex[0]
board[index] = Square(index, ContentType.Mine)
# newBoardString = getBoardString(board)
# if we now have three MINE in any ROW, COLUMN or DIAGONAL, we won
mostRecentMoveOnly = [board[index]]
if len(iHaveThreeInRow.get_matches(board, mostRecentMoveOnly)) > 0 or \
len(iHaveThreeInColumn.get_matches(board, mostRecentMoveOnly)) > 0 or \
len(iHaveThreeInDiagonal.get_matches(board, mostRecentMoveOnly)) > 0:
ruleId = candidateIndexAndRuleIndex[1]
if ruleId not in winningRules:
winningRules[ruleId] = list()
winningRules[ruleId].append(boardString)
wins += 1
# go to next board
continue
# we lose if any empties have two OPPONENT pieces in ROW, COL or DIAG
empties = [v for v in board.values() if v.Content == ContentType.Empty]
if len(opponentHasTwoInARow.get_matches(board, empties)) > 0:
losses += 1
# go to next board
continue
# queue all possible OPPONENT responses
for square in empties:
candiateCopy = board.copy()
candiateCopy[square.Index] = Square(square.Index,
ContentType.Opponent)
queue.append(candiateCopy)
return Fitness(wins, ties, losses, len(genes))
def get_move(ruleSet, board, empties, startingRuleIndex=0):
ruleSetCopy = ruleSet[:]
for ruleIndex in range(startingRuleIndex, len(ruleSetCopy)):
gene = ruleSetCopy[ruleIndex]
matches = gene.get_matches(board, empties)
if len(matches) == 0:
continue
if len(matches) == 1:
return [list(matches)[0], ruleIndex]
if len(empties) > len(matches):
empties = [e for e in empties if e.Index in matches]
return None
def display(candidate, startTime):
timeDiff = datetime.datetime.now() - startTime
localCopy = candidate.Genes[:]
for i in reversed(range(len(localCopy))):
localCopy[i] = str(localCopy[i])
print("\t{}\n{}\n{}".format(
'\n\t'.join([d for d in localCopy]),
candidate.Fitness,
timeDiff))
def mutate_add(genes, geneset):
index = random.randrange(0, len(genes) + 1) if len(genes) > 0 else 0
genes[index:index] = [random.choice(geneset)]
return True
def mutate_remove(genes):
if len(genes) < 1:
return False
del genes[random.randrange(0, len(genes))]
if len(genes) > 1 and random.randint(0, 1) == 1:
del genes[random.randrange(0, len(genes))]
return True
def mutate_replace(genes, geneset):
if len(genes) < 1:
return False
index = random.randrange(0, len(genes))
genes[index] = random.choice(geneset)
return True
def mutate_swap_adjacent(genes):
if len(genes) < 2:
return False
index = random.choice(range(len(genes) - 1))
genes[index], genes[index + 1] = genes[index + 1], genes[index]
return True
def mutate_move(genes):
if len(genes) < 3:
return False
start = random.choice(range(len(genes)))
stop = start + random.randint(1, 2)
toMove = genes[start:stop]
genes[start:stop] = []
index = random.choice(range(len(genes)))
if index >= start:
index += 1
genes[index:index] = toMove
return True
def mutate(genes, fnGetFitness, mutationOperators, mutationRoundCounts):
initialFitness = fnGetFitness(genes)
count = random.choice(mutationRoundCounts)
for i in range(1, count + 2):
copy = mutationOperators[:]
func = random.choice(copy)
while not func(genes):
copy.remove(func)
func = random.choice(copy)
if fnGetFitness(genes) > initialFitness:
mutationRoundCounts.append(i)
return
def create_geneset():
options = [[ContentType.Opponent, [0, 1, 2]],
[ContentType.Mine, [0, 1, 2]]]
geneset = [
RuleMetadata(RowContentFilter, options),
RuleMetadata(lambda expectedContent, count: TopRowFilter(), options),
RuleMetadata(lambda expectedContent, count: MiddleRowFilter(),
options),
RuleMetadata(lambda expectedContent, count: BottomRowFilter(),
options),
RuleMetadata(ColumnContentFilter, options),
RuleMetadata(lambda expectedContent, count: LeftColumnFilter(),
options),
RuleMetadata(lambda expectedContent, count: MiddleColumnFilter(),
options),
RuleMetadata(lambda expectedContent, count: RightColumnFilter(),
options),
RuleMetadata(DiagonalContentFilter, options),
RuleMetadata(lambda expectedContent, count: DiagonalLocationFilter(),
options),
RuleMetadata(lambda expectedContent, count: CornerFilter()),
RuleMetadata(lambda expectedContent, count: SideFilter()),
RuleMetadata(lambda expectedContent, count: CenterFilter()),
RuleMetadata(lambda expectedContent, count:
RowOppositeFilter(expectedContent), options,
needsSpecificContent=True),
RuleMetadata(lambda expectedContent, count: ColumnOppositeFilter(
expectedContent), options, needsSpecificContent=True),
RuleMetadata(lambda expectedContent, count: DiagonalOppositeFilter(
expectedContent), options, needsSpecificContent=True),
]
genes = list()
for gene in geneset:
genes.extend(gene.create_rules())
print("created " + str(len(genes)) + " genes")
return genes
class TicTacToeTests(unittest.TestCase):
def test_perfect_knowledge(self):
minGenes = 10
maxGenes = 20
geneset = create_geneset()
startTime = datetime.datetime.now()
def fnDisplay(candidate):
display(candidate, startTime)
def fnGetFitness(genes):
return get_fitness(genes)
mutationRoundCounts = [1]
mutationOperators = [
partial(mutate_add, geneset=geneset),
partial(mutate_replace, geneset=geneset),
mutate_remove,
mutate_swap_adjacent,
mutate_move,
]
def fnMutate(genes):
mutate(genes, fnGetFitness, mutationOperators, mutationRoundCounts)
def fnCrossover(parent, donor):
child = parent[0:int(len(parent) / 2)] + \
donor[int(len(donor) / 2):]
fnMutate(child)
return child
def fnCreate():
return random.sample(geneset, random.randrange(minGenes, maxGenes))
optimalFitness = Fitness(620, 120, 0, 11)
best = genetic.get_best(fnGetFitness, minGenes, optimalFitness, None,
fnDisplay, fnMutate, fnCreate, maxAge=500,
poolSize=20, crossover=fnCrossover)
self.assertTrue(not optimalFitness > best.Fitness)
def test_tornament(self):
minGenes = 10
maxGenes = 20
geneset = create_geneset()
startTime = datetime.datetime.now()
def fnDisplay(genes, wins, ties, losses, generation):
print("-- generation {} --".format(generation))
display(genetic.Chromosome(genes,
Fitness(wins, ties, losses, len(genes)),
None), startTime)
mutationRoundCounts = [1]
mutationOperators = [
partial(mutate_add, geneset=geneset),
partial(mutate_replace, geneset=geneset),
mutate_remove,
mutate_swap_adjacent,
mutate_move,
]
def fnMutate(genes):
mutate(genes, lambda x: 0, mutationOperators, mutationRoundCounts)
def fnCrossover(parent, donor):
child = parent[0:int(len(parent) / 2)] + \
donor[int(len(donor) / 2):]
fnMutate(child)
return child
def fnCreate():
return random.sample(geneset, random.randrange(minGenes, maxGenes))
def fnSortKey(genes, wins, ties, losses):
return -1000 * losses - ties + 1 / len(genes)
genetic.tournament(fnCreate, fnCrossover, play1on1, fnDisplay,
fnSortKey, 13)
class ContentType:
Empty = 'EMPTY'
Mine = 'MINE'
Opponent = 'OPPONENT'
class Square:
def __init__(self, index, content=ContentType.Empty):
self.Content = content
self.Index = index
self.Diagonals = []
# board layout is
# 1 2 3
# 4 5 6
# 7 8 9
self.IsCenter = False
self.IsCorner = False
self.IsSide = False
self.IsTopRow = False
self.IsMiddleRow = False
self.IsBottomRow = False
self.IsLeftColumn = False
self.IsMiddleColumn = False
self.IsRightColumn = False
self.Row = None
self.Column = None
self.DiagonalOpposite = None
self.RowOpposite = None
self.ColumnOpposite = None
if index == 1 or index == 2 or index == 3:
self.IsTopRow = True
self.Row = [1, 2, 3]
elif index == 4 or index == 5 or index == 6:
self.IsMiddleRow = True
self.Row = [4, 5, 6]
elif index == 7 or index == 8 or index == 9:
self.IsBottomRow = True
self.Row = [7, 8, 9]
if index % 3 == 1:
self.Column = [1, 4, 7]
self.IsLeftColumn = True
elif index % 3 == 2:
self.Column = [2, 5, 8]
self.IsMiddleColumn = True
elif index % 3 == 0:
self.Column = [3, 6, 9]
self.IsRightColumn = True
if index == 5:
self.IsCenter = True
else:
if index == 1 or index == 3 or index == 7 or index == 9:
self.IsCorner = True
elif index == 2 or index == 4 or index == 6 or index == 8:
self.IsSide = True
if index == 1:
self.RowOpposite = 3
self.ColumnOpposite = 7
self.DiagonalOpposite = 9
elif index == 2:
self.ColumnOpposite = 8
elif index == 3:
self.RowOpposite = 1
self.ColumnOpposite = 9
self.DiagonalOpposite = 7
elif index == 4:
self.RowOpposite = 6
elif index == 6:
self.RowOpposite = 4
elif index == 7:
self.RowOpposite = 9
self.ColumnOpposite = 1
self.DiagonalOpposite = 3
elif index == 8:
self.ColumnOpposite = 2
else: # index == 9
self.RowOpposite = 7
self.ColumnOpposite = 3
self.DiagonalOpposite = 1
if index == 1 or self.DiagonalOpposite == 1 or self.IsCenter:
self.Diagonals.append([1, 5, 9])
if index == 3 or self.DiagonalOpposite == 3 or self.IsCenter:
self.Diagonals.append([7, 5, 3])
class Rule:
def __init__(self, descriptionPrefix, expectedContent=None, count=None):
self.DescriptionPrefix = descriptionPrefix
self.ExpectedContent = expectedContent
self.Count = count
def __str__(self):
result = self.DescriptionPrefix + " "
if self.Count is not None:
result += str(self.Count) + " "
if self.ExpectedContent is not None:
result += self.ExpectedContent + " "
return result
class RuleMetadata:
def __init__(self, create, options=None, needsSpecificContent=True,
needsSpecificCount=True):
if options is None:
needsSpecificContent = False
needsSpecificCount = False
if needsSpecificCount and not needsSpecificContent:
raise ValueError('needsSpecificCount is only valid if needsSpecificContent is true')
self.create = create
self.options = options
self.needsSpecificContent = needsSpecificContent
self.needsSpecificCount = needsSpecificCount
def create_rules(self):
option = None
count = None
seen = set()
if self.needsSpecificContent:
rules = list()
for optionInfo in self.options:
option = optionInfo[0]
if self.needsSpecificCount:
optionCounts = optionInfo[1]
for count in optionCounts:
gene = self.create(option, count)
if str(gene) not in seen:
seen.add(str(gene))
rules.append(gene)
else:
gene = self.create(option, None)
if str(gene) not in seen:
seen.add(str(gene))
rules.append(gene)
return rules
else:
return [self.create(option, count)]
class ContentFilter(Rule):
def __init__(self, description, expectedContent, expectedCount,
getValueFromSquare):
super().__init__(description, expectedContent, expectedCount)
self.getValueFromSquare = getValueFromSquare
def get_matches(self, board, squares):
result = set()
for square in squares:
m = list(map(lambda i: board[i].Content,
self.getValueFromSquare(square)))
if m.count(self.ExpectedContent) == self.Count:
result.add(square.Index)
return result
class RowContentFilter(ContentFilter):
def __init__(self, expectedContent, expectedCount):
super().__init__("its ROW has", expectedContent, expectedCount,
lambda s: s.Row)
class ColumnContentFilter(ContentFilter):
def __init__(self, expectedContent, expectedCount):
super().__init__("its COLUMN has", expectedContent, expectedCount,
lambda s: s.Column)
class LocationFilter(Rule):
def __init__(self, expectedLocation, containerDescription, func):
super().__init__(
"is in " + expectedLocation + " " + containerDescription)
self.func = func
def get_matches(self, board, squares):
result = set()
for square in squares:
if self.func(square):
result.add(square.Index)
return result
class RowLocationFilter(LocationFilter):
def __init__(self, expectedLocation, func):
super().__init__(expectedLocation, "ROW", func)
class ColumnLocationFilter(LocationFilter):
def __init__(self, expectedLocation, func):
super().__init__(expectedLocation, "COLUMN", func)
class TopRowFilter(RowLocationFilter):
def __init__(self):
super().__init__("TOP", lambda square: square.IsTopRow)
class MiddleRowFilter(RowLocationFilter):
def __init__(self):
super().__init__("MIDDLE", lambda square: square.IsMiddleRow)
class BottomRowFilter(RowLocationFilter):
def __init__(self):
super().__init__("BOTTOM", lambda square: square.IsBottomRow)
class LeftColumnFilter(ColumnLocationFilter):
def __init__(self):
super().__init__("LEFT", lambda square: square.IsLeftColumn)
class MiddleColumnFilter(ColumnLocationFilter):
def __init__(self):
super().__init__("MIDDLE", lambda square: square.IsMiddleColumn)
class RightColumnFilter(ColumnLocationFilter):
def __init__(self):
super().__init__("RIGHT", lambda square: square.IsRightColumn)
class DiagonalLocationFilter(LocationFilter):
def __init__(self):
super().__init__("DIAGONAL", "",
lambda square: not (square.IsMiddleRow or
square.IsMiddleColumn) or
square.IsCenter)
class DiagonalContentFilter(Rule):
def __init__(self, expectedContent, count):
super().__init__("its DIAGONAL has", expectedContent, count)
def get_matches(self, board, squares):
result = set()
for square in squares:
for diagonal in square.Diagonals:
m = list(map(lambda i: board[i].Content, diagonal))
if m.count(self.ExpectedContent) == self.Count:
result.add(square.Index)
break
return result
class WinFilter(Rule):
def __init__(self, content):
super().__init__("WIN" if content == ContentType
.Mine else "block OPPONENT WIN")
self.rowRule = RowContentFilter(content, 2)
self.columnRule = ColumnContentFilter(content, 2)
self.diagonalRule = DiagonalContentFilter(content, 2)
def get_matches(self, board, squares):
inDiagonal = self.diagonalRule.get_matches(board, squares)
if len(inDiagonal) > 0:
return inDiagonal
inRow = self.rowRule.get_matches(board, squares)
if len(inRow) > 0:
return inRow
inColumn = self.columnRule.get_matches(board, squares)
return inColumn
class DiagonalOppositeFilter(Rule):
def __init__(self, expectedContent):
super().__init__("DIAGONAL-OPPOSITE is", expectedContent)
def get_matches(self, board, squares):
result = set()
for square in squares:
if square.DiagonalOpposite is None:
continue
if board[square.DiagonalOpposite].Content == self.ExpectedContent:
result.add(square.Index)
return result
class RowOppositeFilter(Rule):
def __init__(self, expectedContent):
super().__init__("ROW-OPPOSITE is", expectedContent)
def get_matches(self, board, squares):
result = set()
for square in squares:
if square.RowOpposite is None:
continue
if board[square.RowOpposite].Content == self.ExpectedContent:
result.add(square.Index)
return result
class ColumnOppositeFilter(Rule):
def __init__(self, expectedContent):
super().__init__("COLUMN-OPPOSITE is", expectedContent)
def get_matches(self, board, squares):
result = set()
for square in squares:
if square.ColumnOpposite is None:
continue
if board[square.ColumnOpposite].Content == self.ExpectedContent:
result.add(square.Index)
return result
class CenterFilter(Rule):
def __init__(self):
super().__init__("is in CENTER")
@staticmethod
def get_matches(board, squares):
result = set()
for square in squares:
if square.IsCenter:
result.add(square.Index)
return result
class CornerFilter(Rule):
def __init__(self):
super().__init__("is a CORNER")
@staticmethod
def get_matches(board, squares):
result = set()
for square in squares:
if square.IsCorner:
result.add(square.Index)
return result
class SideFilter(Rule):
def __init__(self):
super().__init__("is SIDE")
@staticmethod
def get_matches(board, squares):
result = set()
for square in squares:
if square.IsSide:
result.add(square.Index)
return result
iHaveThreeInRow = RowContentFilter(ContentType.Mine, 3)
iHaveThreeInColumn = ColumnContentFilter(ContentType.Mine, 3)
iHaveThreeInDiagonal = DiagonalContentFilter(ContentType.Mine, 3)
opponentHasTwoInARow = WinFilter(ContentType.Opponent)
class Fitness:
def __init__(self, wins, ties, losses, geneCount):
self.Wins = wins
self.Ties = ties
self.Losses = losses
totalGames = wins + ties + losses
percentWins = 100 * round(wins / totalGames, 3)
percentLosses = 100 * round(losses / totalGames, 3)
percentTies = 100 * round(ties / totalGames, 3)
self.PercentTies = percentTies
self.PercentWins = percentWins
self.PercentLosses = percentLosses
self.GeneCount = geneCount
def __gt__(self, other):
if self.PercentLosses != other.PercentLosses:
return self.PercentLosses < other.PercentLosses
if self.Losses > 0:
return False
if self.Ties != other.Ties:
return self.Ties < other.Ties
return self.GeneCount < other.GeneCount
def __str__(self):
return "{:.1f}% Losses ({}), {:.1f}% Ties ({}), {:.1f}% Wins ({}), {} rules".format(
self.PercentLosses,
self.Losses,
self.PercentTies,
self.Ties,
self.PercentWins,
self.Wins,
self.GeneCount)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
from __future__ import unicode_literals, absolute_import
import sys
import json
import hashlib
import logging
# noinspection PyProtectedMember, PyPackageRequirements
from pip._vendor import requests, os
logger = logging.getLogger(__name__)
__version__ = '1.0.4'
def main(args=None):
if args is None:
args = sys.argv[1:]
if '-v' in args:
args.pop(args.index('-v'))
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logging.getLogger('pip._vendor.requests').setLevel(logging.CRITICAL)
else:
logging.basicConfig(stream=sys.stderr, level=logging.ERROR)
logging.getLogger('pip._vendor.requests').setLevel(logging.CRITICAL)
if '-h' in args or '--help' in args:
if '-h' in args:
args.pop(args.index('-h'))
if '--help' in args:
args.pop(args.index('--help'))
OvhApiCli.help()
return 1
try:
logger.info('args: % r' % args)
cli = OvhApiCli()
if len(args) > 0 and args[0].startswith('--complete'):
cli.parse_args(args[1:])
cur = ''
if '=' in args[0]:
cur = args[0].split('=', 1)[1]
logger.info('cur: %s' % cur)
options = cli.autocomplete(cur)
print(' '.join(options))
else:
cli.parse_args(args)
try:
data = cli.run()
except AttributeError:
return OvhApiCli.help()
print(json.dumps(data, indent=2))
return 0
except Exception as err:
logger.exception(err)
logger.fatal('Fatal error occurred: %s' % err)
return 1
# noinspection PyMethodMayBeStatic
class OvhApiCli(object):
@staticmethod
def help():
print('Usage: ovhcli METHOD PATH [--args=value,]\n'
'Tab completion can list available path, method and arguments')
return 0
def __init__(self, method=None, path=None, **kwargs):
self.method = method
self.path = path
self.args = kwargs
def __str__(self):
return 'OVH_API_CLI(method=%r, path=%r, **%r)' % (self.method, self.path, self.args)
def _sanitize_arg(self, arg):
if (arg.startswith('"') and arg.endswith('"')) or \
(arg.startswith("'") and arg.endswith("'")):
arg = arg[1:-1]
if '\{' in arg or '\}' in arg or '\ ' in arg:
arg = arg.replace('\{', '{') \
.replace('\}', '}') \
.replace('\ ', ' ')
return arg
def parse_args(self, args):
for arg in args:
arg = self._sanitize_arg(arg)
if arg.lower() in ['get', 'put', 'post', 'delete']:
self.method = arg.lower()
elif arg.startswith('/'):
self.path = arg
elif arg.startswith('--') and '=' in arg:
k, v = arg.split('=', 1)
self.args[k[2:]] = v
else:
logger.warn('Ignoring wrong argument %r' % arg)
logger.debug(self)
def autocomplete(self, cur):
cur = self._sanitize_arg(cur)
options = self.__autocomplete(cur)
return [o for o in options if o.startswith(cur)]
def run(self, schema=None):
if not self.path:
raise AttributeError('No path to query')
if not self.method:
raise AttributeError('No method to query')
if not schema:
root = requests.get('https://api.ovh.com/1.0/').json()
root_paths = [api.get('path') for api in root.get('apis')]
root_path = next((path for path in root_paths if self.path.startswith(path)), None)
schema = requests.get('https://api.ovh.com/1.0%s.json' % root_path).json()
# retrieve param list
api = next(api for api in schema.get('apis')
if api.get('path') == self.path)
op = next(op for op in api.get('operations')
if op.get('httpMethod').lower() == self.method)
arguments = [param for param in op.get('parameters')]
path_params = {}
query_params = {}
body_params = {}
for arg in arguments:
arg_type = arg.get('paramType')
if arg_type == 'path':
if arg.get('name') not in self.args:
raise Exception('Missing required path parameter %r' % arg.get('name'))
path_params[arg.get('name')] = self.args.get(arg.get('name'))
else:
if arg_type == 'query':
params = query_params
else:
params = body_params
if arg.get('required', 0) and (arg.get('name') in self.args or arg.get('default')):
params[arg.get('name')] = self.args.get(arg.get('name')) or arg.get('default')
else:
params[arg.get('name')] = self.args.get(arg.get('name')) or arg.get('default') or ''
query_path = self.path.format(**path_params)
return self.signed_call(self.method, query_path, query_params, body_params or None)
def signed_call(self, method, path, query_params=None, body_params=None):
if query_params is None:
query_params = {}
credentials = self.get_credentials()
time = requests.get('https://eu.api.ovh.com/1.0/auth/time').content.decode('utf8')
req = requests.Request(
method, 'https://eu.api.ovh.com/1.0%s' % path, headers={
'X-Ovh-Application': credentials.get('AK'),
'X-Ovh-Timestamp': time,
'X-Ovh-Consumer': credentials.get('CK')
}, params=query_params, json=body_params
)
prepped = req.prepare()
signature_str = '+'.join([
credentials.get('AS'),
credentials.get('CK'),
prepped.method,
prepped.url,
prepped.body or '',
time]).encode('utf8')
prepped.headers['X-Ovh-Signature'] = '$1$' + hashlib.sha1(signature_str).hexdigest()
res = requests.Session().send(prepped)
return res.json()
def __autocomplete(self, cur):
root = requests.get('https://api.ovh.com/1.0/').json()
root_paths = [api.get('path') for api in root.get('apis')]
root_path = next((path for path in root_paths if (self.path or cur).startswith(path)), None)
if not self.path:
# if we are selecting a path, and root_path already present
if cur and root_path and cur.startswith(root_path):
pass
else:
return root_paths
# we did not match...
if not root_path:
return []
schema = requests.get('https://api.ovh.com/1.0%s.json' % root_path).json()
# we are on a path
if cur.startswith('/'):
if self.path: # if trying to add path twice
return []
else:
return self.__autocomplete_path(schema, cur)
# we are on an arguments
elif cur.startswith('--'):
if not self.path or not self.method:
return []
if '=' in cur:
return self.__autocomplete_arguments_value(schema, cur)
return self.__autocomplete_arguments(schema)
# we are on nothing
elif cur == '':
if self.path and self.method:
return self.__autocomplete_arguments(schema)
elif self.path.endswith('/'):
return self.__autocomplete_path(schema, cur)
# already got method, not need to complete again
if self.method:
return []
return self.__autocomplete_method(schema, cur)
def __autocomplete_path(self, schema, cur):
available_paths = [api.get('path') for api in schema.get('apis') if api.get('path').startswith(cur)]
# reduce with only lowest paths
# only keep /test if present and remove /test*
sorted_path = sorted(available_paths)
available_paths = []
# brute force ... may be refactored
for path in sorted_path:
dodge = False
for available_path in available_paths:
if path.startswith(available_path) and available_path != cur:
dodge = True
if dodge:
continue
available_paths.append(path)
return available_paths
def __autocomplete_method(self, schema, cur):
api = next(api for api in schema.get('apis')
if api.get('path') == self.path)
if not api:
return []
methods = [op.get('httpMethod') for op in api.get('operations')]
if cur.islower():
methods = [m.lower() for m in methods]
return methods
def __autocomplete_arguments(self, schema):
api = next(api for api in schema.get('apis')
if api.get('path') == self.path)
op = next(op for op in api.get('operations')
if op.get('httpMethod').lower() == self.method)
arguments = [param.get('name') for param in op.get('parameters')
if param.get('name') not in self.args]
return ['--%s=' % arg for arg in arguments]
def __autocomplete_arguments_value(self, schema, cur):
api = next(api for api in schema.get('apis')
if api.get('path') == self.path)
op = next(op for op in api.get('operations')
if op.get('httpMethod').lower() == self.method)
param = next(param for param in op.get('parameters')
if cur[2:-1] == param.get('name'))
if param.get('paramType') == 'path':
try:
return self.__autocomplete_arguments_value_path(param)
except Exception as err:
logger.warn(err)
return [param.get('name')]
# todo: add completion for other type...
return []
def __autocomplete_arguments_value_path(self, param):
arg_path = self.path[0:self.path.index(param.get('name')) - 1]
# noinspection PyBroadException
try:
arg_path.format(self.args) # raise if not all previous params
except:
raise Exception('Not all previous params are present')
data = self.signed_call('GET', arg_path)
if not isinstance(data, list):
raise Exception('Unable to list for path param %r, api did not returned a list' % param.get('name'))
return ['--%s=%s' % (param.get('name'), o) for o in data]
def get_credentials(self):
credentials_path = os.path.expanduser('~/.ovhcli')
with open(credentials_path, 'r+') as f:
data = json.load(f)
for i in ['AK', 'AS', 'CK']:
if i not in data:
raise Exception('Need %r in %s' % (i, credentials_path))
return data
"""
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logging.getLogger('pip._vendor.requests').setLevel(logging.CRITICAL)
test_args = [
[],
['/'],
['/hosting/reseller'],
['/hosting/reseller/\{serviceName\}'],
['/hosting/reseller/{serviceName}/serviceInfos'],
['/hosting/reseller/{serviceName}/serviceInfos', 'GET'],
]
test_set = [
'', 'a', 'g', '/', '/h', '/hosting', '/hosting/resel', '/hosting/reseller/\{serviceName\}',
'/hosting/reseller', '/hosting/reseller/', '/hosting/reseller/{serviceName}',
'/hosting/reseller/{serviceName}/', '--serviceName='
]
for a in test_args:
print('\nTESTARGS(%r)' % a)
c = OvhApiCli()
c.parse_args(a)
for t in test_set:
print('TEST(%r): %s' % (t, ' '.join(c.autocomplete(t))))
print('done')
c = OvhApiCli()
c.parse_args(['/hosting/reseller/{serviceName}/serviceInfos', 'GET', '--serviceName=hr-os5651-2'])
data = c.run()
print(json.dumps(data, indent=2))
#"""
|
|
from wpc import db, app, login_manager
from wpc.utils import requests_get_with_retries
from flask.ext.login import UserMixin, current_user
from sqlalchemy.orm.properties import ColumnProperty
from flask import url_for
import humanize
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
@login_manager.user_loader
def load_user(reddit_username):
return Streamer.query.filter_by(reddit_username=reddit_username).first()
stream_tag = db.Table('stream_tag',
db.Column('stream_id', db.Integer(), db.ForeignKey('stream.id')),
db.Column('tag_name', db.String(256), db.ForeignKey('tag.name')))
stream_sub = db.Table('stream_sub',
db.Column('stream_id', db.Integer(), db.ForeignKey('stream.id')),
db.Column('submission_id', db.String(6), db.ForeignKey('submission.submission_id')))
class Submission(db.Model):
submission_id = db.Column(db.String(6), primary_key=True)
recording_available = db.Column(db.Boolean())
def __repr__(self):
return '<Submission %r>' % (self.submission_id)
class Stream(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(50))
scheduled_start_time = db.Column(db.DateTime())
actual_start_time = db.Column(db.DateTime())
status = db.Column(db.Enum('upcoming', 'live', 'completed', name='stream_status'))
title = db.Column(db.String(200))
submissions = db.relationship('Submission', secondary=stream_sub, backref=db.backref('streams', lazy='dynamic'))
streamer_id = db.Column('streamer_id', db.Integer(), db.ForeignKey('streamer.id'))
streamer = db.relationship('Streamer', backref=db.backref('streams', lazy='dynamic'))
tags = db.relationship('Tag', secondary=stream_tag, backref=db.backref('streams', lazy='dynamic'))
current_viewers = db.Column(db.Integer)
confstream = db.Column(db.Boolean(), default=False)
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'stream'
}
def format_start_time(self, countdown=True, start_time=True):
if not self.scheduled_start_time or (not countdown and not start_time):
return None
if countdown:
return humanize.naturaltime(datetime.utcnow() - self.scheduled_start_time) +\
((", " + datetime.strftime(self.scheduled_start_time, "%Y-%m-%d %H:%M UTC")) if start_time else "")
else:
return datetime.strftime(self.scheduled_start_time, "%Y-%m-%d %H:%M UTC")
def add_submission(self, submission):
if submission not in self.submissions:
self.submissions.append(submission)
class WPCStream(Stream):
channel_name = db.Column(db.String(30), unique=True)
def __init__(self, channel_name):
self.status = 'upcoming'
self.channel_name = channel_name
self.submissions = []
def __eq__(self, other):
return type(self) == type(other) and self.channel_name == other.channel_name
def __hash__(self):
return hash(self.channel_name)
def __repr__(self):
return '<WPC Stream %d %r>' % (self.id, self.channel_name)
def _update_status(self):
app.logger.info("Updating status for {}".format(self))
try:
r = requests_get_with_retries(
"http://{}:{}@{}/stat".format(
app.config['RTMP_LOGIN'], app.config['RTMP_PASSWORD'], app.config['RTMP_SERVER']))
r.raise_for_status()
except Exception as e:
app.logger.error("Error while updating {}".format(self))
app.logger.exception(e)
raise
soup = BeautifulSoup(r.content, 'xml')
for stream in soup.find_all('stream'):
if stream.find('name').string == self.channel_name:
client_num = int(stream.find('nclients').string)
is_live = stream.find('codec')
if is_live:
self.status = 'live'
self.current_viewers = client_num - 1
if self.actual_start_time is None:
self.actual_start_time = datetime.utcnow()
# workaround for situations when update_state changes status before streamer get authorization
elif self.status == 'live' and (self.actual_start_time is None or datetime.utcnow() - self.actual_start_time > timedelta(seconds=30)):
self.status = 'completed'
self.actual_start_time = None
self.current_viewers = None
break
# same workaround
else:
if self.status == 'live' and (self.actual_start_time is None or datetime.utcnow() - self.actual_start_time > timedelta(seconds=30)):
self.status = 'completed'
self.actual_start_time = None
self.current_viewers = None
def normal_url(self):
return url_for('.streamer_page', streamer_name=self.streamer.reddit_username, _external=True)
def html_code(self, autoplay=False):
return """
<div id="{0}">Loading the player...</div>
<script type="text/javascript">
jwplayer("{0}").setup({{
playlist: [{{
sources: [{{
file: 'rtmp://{3}/live/flv:{0}'
}},{{
file: "http://{3}/hls/{0}.m3u8"
}}]
}}],
width: "640",
height: "390",
autostart: {1},
androidhls: true,
rtmp: {{
bufferlength: 0.4
}}
}});
jwplayer("{0}").onBuffer(function(){{
theTimeout{0} = setTimeout(function(){{
var playlistItem = jwplayer("{0}").getPlaylistItem(0);
playlistItem.image = "{2}";
jwplayer("{0}").load([playlistItem]);
}},7000);
}});
jwplayer("{0}").onPlay(function(){{
clearTimeout(theTimeout{0});
}});
</script>
""".format(self.channel_name, "true" if autoplay else "false", url_for("static", filename="dragon_is_offline.png"), app.config['RTMP_SERVER'])
def _get_flair(self):
fst = self.format_start_time(start_time=False)
status_to_flair = {"live": (u"Live", u"one"),
"completed": (u"Finished", u"three"),
"upcoming": (fst if fst else u"Upcoming", u"two"),
None: (None, None)}
return status_to_flair[self.status]
def add_submission(self, submission):
if submission not in self.submissions:
self.status = 'upcoming'
self.scheduled_start_time = None
self.actual_start_time = None
Stream.add_submission(self, submission)
__mapper_args__ = {
'polymorphic_identity': 'wpc_stream'
}
class YoutubeStream(Stream):
ytid = db.Column(db.String(11), unique=True)
def __init__(self, id):
self.ytid = id
self.submissions = []
def __eq__(self, other):
return type(self) == type(other) and self.ytid == other.ytid
def __hash__(self):
return hash(self.ytid)
def __repr__(self):
return '<YoutubeStream %d %r>' % (self.id, self.ytid)
def _update_status(self):
app.logger.info("Updating status for {}".format(self))
try:
r = requests_get_with_retries(
"https://www.googleapis.com/youtube/v3/videos?id={}&part=snippet,liveStreamingDetails&key={}".format(
self.ytid, app.config['YOUTUBE_KEY']), retries_num=15)
r.raise_for_status()
except Exception as e:
app.logger.error("Error while updating {}".format(self))
app.logger.exception(e)
raise
if not r.json()['items']:
self.status = 'completed'
self.current_viewers = None
return
for item in r.json()['items']:
self.title = item['snippet']['title']
if 'liveStreamingDetails' in item:
self.scheduled_start_time = item['liveStreamingDetails']['scheduledStartTime']
if 'concurrentViewers' in item['liveStreamingDetails']:
self.current_viewers = item['liveStreamingDetails']['concurrentViewers']
if item['snippet']['liveBroadcastContent'] == 'live':
self.status = 'live'
if 'actualStartTime' in item['liveStreamingDetails']:
self.actual_start_time = item['liveStreamingDetails']['actualStartTime']
else: # Youtube is weird, and sometimes this happens. If there is no actual start time, then we fall back to scheduledStartTime
self.actual_start_time = item['liveStreamingDetails']['scheduledStartTime']
elif item['snippet']['liveBroadcastContent'] == 'upcoming':
self.status = 'upcoming'
else:
self.status = 'completed'
self.current_viewers = None
# add channel to streamer table if it's needed and fix if it's needed
if self.streamer is not None:
yc = item['snippet']['channelId']
streamer = Streamer.query.filter_by(youtube_channel=yc).first()
# if there is streamer with that channel
if streamer:
self.streamer = streamer
# there is no streamer with that channel
elif not self.streamer.checked:
self.streamer.youtube_channel = yc
self.streamer.youtube_name = item['snippet']['channelTitle']
def _get_flair(self):
fst = self.format_start_time(start_time=False)
status_to_flair = {"live": (u"Live", u"one"),
"completed": (u"Recording Available", u"four"),
"upcoming": (fst if fst else u"Upcoming", u"two"),
None: (None, None)}
return status_to_flair[self.status]
def normal_url(self):
return "http://www.youtube.com/watch?v={}".format(self.ytid)
def html_code(self, autoplay=False):
return """
<iframe width="640" height="390"
src="http://www.youtube.com/embed/{}?rel=0&autoplay={}">
</iframe>
""".format(self.ytid, int(autoplay))
__mapper_args__ = {
'polymorphic_identity': 'youtube_stream'
}
class TwitchStream(Stream):
channel = db.Column(db.String(25), unique=True)
last_time_live = db.Column(db.DateTime())
def __init__(self, channel):
self.channel = channel
self.status = 'upcoming'
self.submissions = []
def __eq__(self, other):
return type(self) == type(other) and self.channel == other.channel
def __hash__(self):
return hash(self.channel)
def __repr__(self):
return '<TwitchStream {} {}>'.format(self.id, self.channel)
def _update_title_from_channel(self):
r = requests_get_with_retries("https://api.twitch.tv/kraken/channels/{}".format(self.channel))
r.raise_for_status()
stream = r.json()
if stream is not None:
if stream['status'] is not None:
self.title = stream['status']
def _update_status(self):
app.logger.info("Updating status for {}".format(self))
try:
r = requests_get_with_retries("https://api.twitch.tv/kraken/streams/{}".format(self.channel))
r.raise_for_status()
except Exception as e:
app.logger.error("Error while updating {}".format(self))
app.logger.exception(e)
raise
stream = r.json()['stream']
if stream is not None:
self.status = 'live'
self.title = stream['channel']['status']
self.current_viewers = stream['viewers']
self.last_time_live = datetime.utcnow()
if self.actual_start_time is None:
self.actual_start_time = self.last_time_live
else:
if self.status == 'live':
# this is workaround for situations like stream going offline shortly
if datetime.utcnow() - self.last_time_live > timedelta(minutes=12):
self.status = 'completed'
self.current_viewers = None
if self.status == 'upcoming':
self._update_title_from_channel()
# add channel to streamer table if it's needed and fix if it's needed
if self.streamer is not None:
streamer = Streamer.query.filter_by(twitch_channel=self.channel).first()
# if there is streamer with that channel
if streamer:
self.streamer = streamer
# there is no streamer with that channel
elif not self.streamer.checked:
self.streamer.twitch_channel = self.channel
def _get_flair(self):
fst = self.format_start_time(start_time=False)
status_to_flair = {"live": (u"Live", u"one"),
"completed": (u"Finished", u"three"),
"upcoming": (fst if fst else u"Upcoming", u"two"),
None: (None, None)}
return status_to_flair[self.status]
def add_submission(self, submission):
if submission not in self.submissions:
self.status = 'upcoming'
self.scheduled_start_time = None
self.actual_start_time = None
Stream.add_submission(self, submission)
def normal_url(self):
return "http://www.twitch.tv/" + self.channel
def html_code(self, autoplay=False):
return """
<object type="application/x-shockwave-flash"
height="390"
width="640"
id="live_embed_player_flash"
data="http://www.twitch.tv/widgets/live_embed_player.swf?channel={}"
bgcolor="#000000">
<param name="allowFullScreen"
value="true" />
<param name="allowScriptAccess"
value="always" />
<param name="allowNetworking"
value="all" />
<param name="movie"
value="http://www.twitch.tv/widgets/live_embed_player.swf" />
<param name="flashvars"
value="hostname=www.twitch.tv&channel={}&auto_play={}" />
</object>
""".format(self.channel, self.channel, "true" if autoplay else "false")
__mapper_args__ = {
'polymorphic_identity': 'twitch_stream'
}
class MozillaStreamHack(object):
def html_code(self, autoplay=None):
return '''<iframe src="https://air.mozilla.org/the-joy-of-coding-mconley-livehacks-on-firefox-episode-6/video/" width="640" height="380" frameborder="0" allowfullscreen></iframe>''' # NOQA
def normal_url(self):
return "https://air.mozilla.org/the-joy-of-coding-mconley-livehacks-on-firefox-episode-6/"
class CaseInsensitiveComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return db.func.lower(self.__clause_element__()) == db.func.lower(other)
class Subscriber(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.column_property(db.Column(db.String(256), unique=True, nullable=False), comparator_factory=CaseInsensitiveComparator)
def __repr__(self):
return '<Subscriber %d %r>' % (self.id, self.email)
class Idea(db.Model):
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.Text(), nullable=False)
class ChatMessage(db.Model):
id = db.Column(db.Integer, primary_key=True)
sent_on = db.Column(db.DateTime, default=db.func.now())
streamer = db.relationship('Streamer', backref=db.backref('chat_messages', lazy='dynamic'))
streamer_id = db.Column('streamer_id', db.Integer(), db.ForeignKey('streamer.id'))
sender = db.Column(db.String())
text = db.Column(db.String())
class Streamer(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
reddit_username = db.column_property(db.Column(db.String(20), unique=True), comparator_factory=CaseInsensitiveComparator)
twitch_channel = db.column_property(db.Column(db.String(25), unique=True), comparator_factory=CaseInsensitiveComparator)
youtube_channel = db.Column(db.String(24), unique=True)
youtube_name = db.Column(db.String(30))
info = db.Column(db.Text())
checked = db.Column(db.Boolean(), default=False)
rtmp_secret = db.Column(db.String(50))
test = db.Column(db.Boolean(), default=False)
# XXX: this is kinda ugly, but simple
# nginx-rtmp supports only fixed number of redirects
# TODO: This should be fixed later
rtmp_redirect_1 = db.Column(db.String())
rtmp_redirect_2 = db.Column(db.String())
rtmp_redirect_3 = db.Column(db.String())
def __init__(self, reddit_username, checked=False):
self.reddit_username = reddit_username
self.checked = checked
def __repr__(self):
return '<Streamer %d %r>' % (self.id, self.reddit_username)
def get_id(self):
return self.reddit_username
def populate(self, form):
self.info = form.info.data
tc = form.twitch_channel_extract()
# delete inapropriate tstream
if tc != self.twitch_channel:
ts = self.streams.filter_by(type='twitch_stream').first()
if ts:
ts.streamer = None
# rebind tstream
streamer = Streamer.query.filter_by(twitch_channel=tc).first()
if streamer and streamer != current_user:
streamer.twitch_channel = None
for ts in streamer.streams.filter_by(type='twitch_stream'):
ts.streamer = self
self.twitch_channel = tc if tc else None
yc = form.youtube_channel_extract()
# delete inapropriate ystreams
if yc != self.youtube_channel:
for ys in self.streams.filter_by(type='youtube_stream'):
ys.streamer = None
# rebind ystreams
streamer = Streamer.query.filter_by(youtube_channel=yc).first()
if streamer and streamer != current_user:
# to not make api-requests
yn = streamer.youtube_name
if yn is not None:
self.youtube_name = yn
self.youtube_channel = streamer.youtube_channel
streamer.youtube_name = None
streamer.youtube_channel = None
for ys in streamer.streams.filter_by(type='youtube_stream'):
ys.streamer = self
# get yc name
if yc and (yc != self.youtube_channel or self.youtube_name is None):
try:
r = requests_get_with_retries(
"https://www.googleapis.com/youtube/v3/channels?id={}&part=snippet&key={}".format(
yc, app.config['YOUTUBE_KEY']), retries_num=15)
r.raise_for_status()
except Exception as e:
app.logger.error("Error while updating {}".format(self))
app.logger.exception(e)
raise
for item in r.json()['items']:
self.youtube_name = item['snippet']['title']
self.youtube_channel = yc if yc else None
class Tag(db.Model):
__tablename__ = 'tag'
name = db.column_property(db.Column(db.String(256), primary_key=True), comparator_factory=CaseInsensitiveComparator)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Tag {}>'.format(self.name)
def get_or_create(model, **kwargs):
instance = model.query.filter_by(**kwargs).first()
if instance is None:
instance = model(**kwargs)
db.session.add(instance)
return instance
|
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Chris Maxwell <chris@wrathofchris.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Thanks to Mahesh Paolini-Subramanya (@dieswaytoofast) for his help
#
# CONFIGURATION
# -------------
#
# Environment: (optional)
# SFL_USERNAME
# SFL_APIKEY
# SFL_CREDS_FILE
#
# ~/.softlayer_credentials:
# [softlayer]
# username=
# apikey=
#
import ConfigParser
import os
import os.path
import sys
from pprint import pprint
try:
import json
except ImportError:
import simplejson as json
try:
import SoftLayer
except ImportError:
print('SoftLayer required')
sys.exit(1)
SFL_CONFIG_SECTION = 'softlayer'
SFL_DEFAULT_CREDS_FILE = '~/.softlayer_credentials'
class SoftLayerInventory(object):
def __init__(self):
self.inventory = {} # Ansible Inventory
self.inventory['_meta'] = {}
self.inventory['_meta']['hostvars'] = {}
self.username = None
self.apikey = None
self.credsfile = None
self.setup_creds()
self.client = SoftLayer.Client(username=self.username,
api_key=self.apikey)
self.get_inventory()
print json.dumps(self.inventory)
def setup_creds(self):
if 'SFL_CREDS_FILE' in os.environ:
self.credsfile = os.path.expanduser(os.environ['SFL_CREDS_FILE'])
if not os.path.isfile(self.credsfile):
self.credsfile = None
if not self.credsfile:
self.credsfile = os.path.expanduser(SFL_DEFAULT_CREDS_FILE)
if not os.path.isfile(self.credsfile):
self.credsfile = None
if self.credsfile:
config = ConfigParser.SafeConfigParser()
config.read(self.credsfile)
self.username = config.get(SFL_CONFIG_SECTION, 'username')
self.apikey = config.get(SFL_CONFIG_SECTION, 'apikey')
# environment overrides config
if 'SFL_USERNAME' in os.environ:
self.username = os.environ['SFL_USERNAME']
if 'SFL_APIKEY' in os.environ:
self.apikey = os.environ['SFL_APIKEY']
if not self.username or not self.apikey:
sys.stderr.write('No environment set or no creds file %s\n'
% SFL_DEFAULT_CREDS_FILE)
sys.exit(1)
def get_inventory(self):
# NOTE: API is eventually consistent, but returns partial data during
# creation and deletion of instances
for v in self.client['Account'].getVirtualGuests(mask='datacenter, \
host, operatingSystem, orderedPackageId, powerState, \
serverRoom, sshKeys, status, tagReferences, userData, \
networkComponents'):
self.host = {}
self.host['sfl_launch_time'] = ''
if 'createDate' in v:
self.host['sfl_launch_time'] = v['createDate']
self.host['sfl_dns_name'] = ''
if 'fullyQualifiedDomainName' in v:
self.host['sfl_dns_name'] = v['fullyQualifiedDomainName']
self.host['sfl_id'] = v['id']
self.host['sfl_guid'] = v['globalIdentifier']
self.host['sfl_uuid'] = v['uuid']
self.host['sfl_state'] = v['powerState']['name']
self.host['sfl_ip_address'] = ''
if 'primaryIpAddress' in v:
self.host['sfl_ip_address'] = v['primaryIpAddress']
self.host['sfl_private_ip_address'] = ''
if 'primaryBackendIpAddress' in v:
self.host['sfl_private_ip_address'] = v['primaryBackendIpAddress']
self.host['sfl_cpu'] = v['maxCpu']
self.host['sfl_mem'] = v['maxMemory']
self.host['sfl_hostname'] = ''
if 'hostname' in v:
self.host['sfl_hostname'] = v['hostname']
self.host['sfl_domain'] = ''
if 'domain' in v:
self.host['sfl_domain'] = v['domain']
self.host['sfl_region'] = ''
if 'datacenter' in v:
self.host['sfl_region'] = v['datacenter']['name']
self.host['sfl_rack'] = ''
if 'serverRoom' in v:
self.host['sfl_rack'] = v['serverRoom']['name']
self.host['sfl_key_name'] = ''
if len(v['sshKeys']) > 0:
self.host['sfl_key_name'] = v['sshKeys'][0]['label']
self.host['sfl_kernel'] = ''
if 'operatingSystem' in v:
self.host['sfl_kernel'] = \
v['operatingSystem']['softwareLicense']['softwareDescription']['referenceCode']
# Create a usable type by mashing cpu/memory/network
# ie: 4 CPU, 8GB RAM, 100Mbit Net ==> c4m8n100
self.host['sfl_type'] = 'c%sm%s' % (v['maxCpu'],
v['maxMemory'] / 1024)
if 'networkComponents' in v:
if len(v['networkComponents']) > 0:
self.host['sfl_type'] += \
'n%s' % v['networkComponents'][0]['maxSpeed']
#
# Inventory Mappings
#
# XXX really want a reachable hostname here
hostkey = self.host['sfl_ip_address']
# host -> _meta.hostvars.fqdn
self.inventory['_meta']['hostvars'][hostkey] = self.host
# host -> RPTR (a.b.c.d-static.reverse.softlayer.com.)
ipbytes = self.host['sfl_ip_address'].split('.')
rptr = "%s-%s-%s-%s-static.reverse.softlayer.com" % (
ipbytes[3], ipbytes[2], ipbytes[1], ipbytes[0])
self.inventory[rptr] = list()
self.inventory[rptr].append(hostkey)
# host -> fqdn
if self.host['sfl_dns_name'] not in self.inventory:
self.inventory[ self.host['sfl_dns_name'] ] = list()
self.inventory[ self.host['sfl_dns_name'] ].append(hostkey)
# host -> domain
if self.host['sfl_domain'] not in self.inventory:
self.inventory[ self.host['sfl_domain'] ] = list()
self.inventory[ self.host['sfl_domain'] ].append(hostkey)
# host -> tags
if 'tagReferences' in v:
for t in v['tagReferences']:
if 'tag_' + t['tag']['name'] not in self.inventory:
self.inventory[ 'tag_' + t['tag']['name'] ] = list()
self.inventory[ 'tag_' + t['tag']['name'] ].append(hostkey)
# host -> DC
if self.host['sfl_region'] not in self.inventory:
self.inventory[ self.host['sfl_region'] ] = list()
self.inventory[ self.host['sfl_region'] ].append(hostkey)
# Run!
SoftLayerInventory()
|
|
from numpy import mean
from cssvmutil import *
from eval import *
from subprocess import Popen,PIPE
cssvm_train='/home/arya/workspace/cssvm/svm-train'
cssvm_classify='/home/arya/workspace/cssvm/svm-predict'
path= '/home/arya/datasets/cssvm/'
measure={'CSA':'Risk','CSU':'AUC','IDL':'AUC','CSE':'Income'}
algs=['BM','BP','CS']
def get_range_for_algorithm(param):
RangeC, RangeG, RangeCp, RangeK= param['RangeC'], param['RangeG'], param['RangeCp'], param['RangeK']
if param['alg']=='BM':
param['SearchC'] =True
param['SearchG'] =True
param['SearchCp']=False
param['SearchK'] =False
if param['alg']=='BP':
param['SearchC'] =True
param['SearchG'] =False
param['SearchCp']=True
param['SearchK'] =False
if param['alg']=='CS':
param['SearchC'] =True
param['SearchG'] =False
param['SearchCp']=True
param['SearchK'] =True
# param['SearchG'] =True
if not param['SearchC']:
with open( get_out_file_name(param, 'BM')) as filein:
fields = filein.readlines()[-1].split()
RangeC = [float(fields[1])]
if not param['SearchG']:
with open( get_out_file_name(param, 'BM')) as filein:
fields = filein.readlines()[-1].split()
RangeG = [float(fields[2])]
if not param['SearchK']:
RangeK=[1]
if not param['SearchCp']:
RangeCp=[1]
return RangeC, RangeG, RangeCp, RangeK
def get_out_file_name(param, alg=None):
if alg==None:
alg=param['alg']
out='{0}.{1}.{2}'.format( param['dataset'] , alg, param['measure'])
if param['measure'] == 'AUC':
out+='-{0}'.format(param['t'])
out+='.out'
return out
def set_default_params(param):
keys=param.keys()
if 'fold' not in keys:
param['fold']=10
if 'verb' not in keys:
param['verb']=0
if 'measure' not in keys:
param['measure']='AUC'
if param['measure']=='AUC':
if 't' not in keys:
param['t']=0.9
if 'RangeC' not in keys:
param['RangeC'] = [1e-2, 1e-1, 1e0, 1e1, 1e2]
if 'RangeG' not in keys:
param['RangeG'] = [1e-2, 1e-1, 1e0, 1e1, 1e2]
if 'RangeCp' not in keys:
param['RangeCp'] = [1, 5, 10, 50, 100]
if 'RangeK' not in keys:
param['RangeK'] = [1, 0.975, 0.95, 0.925, 0.9, 0.7, 0.6, 0.5, 0.3, 0.4, 0.2, 0.1, 0.01]
if 'cmdline' not in keys:
param['cmdline'] = False
return param
def performance_is_better(param, performance, best, best_Cp, best_Cn): #sometimes we want to minimize and sometimes maximize
if param['measure'] == 'Risk' or param['measure'] == 'Error' or param['measure'] == 'PError':
if (performance < best) or (performance == best and param['Cp'] < best_Cp) or (performance == best and param['Cn'] < best_Cn):
return True
if param['measure'] == 'AUC' or param['measure'] == 'Income' or param['measure'] == 'Accuracy':
if (performance > best) or (performance == best and param['Cp'] < best_Cp) or (performance == best and param['Cn'] < best_Cn):
return True
def grid_search(param):
best_performance, best_Cp, best_Cn, best_c, best_g, best_TH = 1, 1, 1, 1, 1, 0
RangeC, RangeG, RangeCp, RangeK= get_range_for_algorithm(param)
if param['verb']>0:
print 'RangeC =', RangeC, 'RangeG =', RangeG, 'RangeCp =', RangeCp, 'RangeK =', RangeK
with open(get_out_file_name(param), 'a') as out_file:
print >> out_file, 'C\tGamma\tCp\tKappa\tPerformance'
for i in RangeK:
for j in RangeCp:
for k in RangeC:
for l in RangeG:
if 1/i > j:
continue
param['Cn'], param['Cp'], param['C'], param['gamma'] = 1 / i, j, k, l #************** C_n = 1/Kappa
performance, th = train(param.copy())
if performance_is_better(param, performance, best_performance, best_Cp, best_Cn):
best_performance,best_Cp, best_Cn, best_c, best_g, best_TH = performance, param['Cp'], param['Cn'], param['C'], param['gamma'], th
print >> out_file, '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format( param['C'], param['gamma'],param['Cp'], param['Cn'], th, performance)
print >> out_file, 'Bests:\t{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(best_c, best_g, best_Cp, best_Cn, best_TH, best_performance)
if param['verb']>0:
print"{0} Grid on {1} Finished in {2} Iterations. C={3} Gamma={4} Cp={5} Kappa={6} Threshold:{7:.2f} \t {8}={9:.3f}\n".format(param['alg'], param['dataset_name'],len(RangeG)*len(RangeC)*len(RangeCp)*len(RangeK), best_c, best_g, best_Cp, 1./best_Cn, best_TH, param['measure'], best_performance)
return best_performance, best_c, best_g, best_Cp, best_Cn, best_TH
def train(param, do_test=False):
"""
Trains and computes the performance of the model
when fold is
-1 it targets the test dataset (*.test) for evaluation
0 it targets the validation dataset (*.val) for evaluation
1 it targets the traitrainnin dataset (*.val) for evaluation
any natural integer k, it performs k-fold cross validation on training set
"""
if(do_test):
param['fold']=-1
param['Pp'] = mean(array(param['train_y']) == 1)
param['Pn'] = 1 - param['Pp']
deci = None
cv_th=0
if param['cmdline']:
deci, label = get_deci_cmdline(param)
else:
deci, label = get_cv_deci(param)
assert deci and label and len(deci)==len(label)
if param['measure']=='Income':
performance = get_income(deci, label, param)
elif param['measure']=='AUC':
performance = get_auc(deci, label, param)
elif param['measure']=='Risk':
performance, cv_th = get_risk(deci, label, param['Pn'], param['Pp'], 1, 5, do_test, do_test, param) #plots in the test phase
elif param['measure']=='Accuracy':
performance = get_acc(deci, label)
elif param['measure']=='Error':
performance = get_error(deci, label)
elif param['measure']=='PError':
performance = get_perror(deci, label, param['Pn'], param['Pp'])
if param['verb']>1:
print"{0} Train on {1} with C={2} Gamma={3} Cp={4} Kappa={5} Threshold= {6} \t {7}={8} ".format(param['alg'], param['dataset_name'], param['C'], param['gamma'], param['Cp'], 1./param['Cn'], th, param['measure'], performance)
return performance,cv_th
def get_pos_deci(param):
params = '-q -h 0 -m 2000 -c {0} -g {1} -w1 {2} -w-1 {3} '.format(param['C'], param['gamma'], param['Cp'], param['Cn'])
if param['alg'] == 'EDBP' or param['alg'] == 'EDCS':
params = '-C 2 ' + params
model = svm_train(train_y, train_x, params, param['train_costs'])
else:
if param['alg'] == 'BM' or param['alg'] == 'BP' or param['alg'] == 'EDBM':
params = '-C 0 ' + params
elif param['alg'] == 'CS':
params = '-C 1 ' + params
model = svm_train(param['train_y'], param['train_x'], params)
labels = model.get_labels()
py, evals, deci = svm_predict(param['test_y'], param['test_x'], model)
if model.get_labels() != [1, -1] and model.get_labels() != [-1, 1]:
return None
decV = [ labels[0]*val[0] for val in deci]
return decV
def get_deci_cmdline(param):
cv_option=""
model_file= '{0}.{1}.model'.format(param['dataset'], param['alg'])
pred_file= '{0}.{1}.pred'.format(param['dataset'], param['alg'])
train_file= param['dataset']+'.train'
if param['fold'] == -1:
test_file= param['dataset']+'.test'
elif param['fold'] == 0 :
test_file= param['dataset']+'.val'
elif param['fold'] == 1 :
test_file= train_file
else:
test_file= ""
cv_option = "-v {0}".format(param['fold'])
cmd = '{0} -h 0 {1} -m 2000 -c {2} -g {3} {4}'.format(cssvm_train,('','q')[param['verb']>4], param['C'], param['gamma'], cv_option)
if param['alg'] == 'EDBP' or param['alg'] == 'EDCS':
cmd += ' -C 2 -W {0}.train.cost'.format(param['name'])
if param['alg'] == 'CS':
cmd += ' -C 1 '
cmd += ' -w1 {0} -w-1 {1} {2} {3} '.format(param['Cp'], param['Cn'], train_file, model_file)
p = Popen(cmd, shell=True, stdout=PIPE)
p.wait()
if cv_option == "":
cmd = '{0} {1} {2} {3} '.format(cssvm_classify,test_file, model_file, pred_file)
p = Popen(cmd, shell=True, stdout=PIPE)
p.wait()
deci=read_deci(pred_file)
model = svm_load_model(model_file)
labels = model.get_labels()
deci = [labels[0]*val for val in deci]
label=read_labels(test_file)
else:
deci=read_deci(model_file+".cv")
label=read_labels(train_file)
return deci, label
def get_cv_deci(param):
seed(0)
if param['fold'] == -1:
deci = get_pos_deci(param)
label= param['test_y']
elif param['fold'] == 0 :
param['test_y'], param['test_x']=param['val_y'], param['val_x']
deci = get_pos_deci(param)
label= param['val_y']
elif param['fold'] == 1 :
param['test_y'], param['test_x'] = param['train_y'], param['train_x']
deci = get_pos_deci(param)
label= param['train_y']
else:
deci, model, label = [], [], []
subparam = param.copy()
prob_l = len(param['train_y']) #random permutation by swapping i and j instance
for i in range(prob_l):
j = randrange(i, prob_l)
param['train_x'][i], param['train_x'][j] = param['train_x'][j], param['train_x'][i]
param['train_y'][i], param['train_y'][j] = param['train_y'][j], param['train_y'][i]
if param['alg'] == 'EDBP' or param['alg'] == 'EDCS' or param['alg'] == 'EDBM':
param['costs'][i], param['costs'][j] = param['costs'][j], param['costs'][i]
for i in range(param['fold']): #cross training : folding
begin = i * prob_l // param['fold']
end = (i + 1) * prob_l // param['fold']
subparam['train_x'] = param['train_x'][:begin] + param['train_x'][end:]
subparam['train_y'] = param['train_y'][:begin] + param['train_y'][end:]
subparam['test_x'] = param['train_x'][begin:end]
subparam['test_y'] = param['train_y'][begin:end]
subdeci = get_pos_deci(subparam)
assert subdeci
deci += subdeci
label+=subparam['test_y']
return deci, label
def read_costs(dataset):
records = open(dataset).readlines()
costs = []
for record in records:
costs.append(float(record.split()[0].strip()))
return costs
def read_labels(dataset):
labels=[]
with open(dataset) as filein:
for line in filein:
labels.append( int(line.split()[0]))
return labels
def read_deci(dataset):
labels=[]
with open(dataset) as filein:
for line in filein:
labels.append(float(line.split()[0]))
return labels
|
|
from typing import Dict, List, Optional, Union, cast
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from django.conf import settings
from zerver.lib.cache import generic_bulk_cached_fetch, user_profile_cache_key_id, \
user_profile_by_id_cache_key
from zerver.lib.request import JsonableError
from zerver.lib.avatar import avatar_url
from zerver.models import UserProfile, Service, Realm, \
get_user_profile_by_id, query_for_ids, get_user_profile_by_id_in_realm, \
CustomProfileField
from zulip_bots.custom_exceptions import ConfigValidationError
def check_full_name(full_name_raw: str) -> str:
full_name = full_name_raw.strip()
if len(full_name) > UserProfile.MAX_NAME_LENGTH:
raise JsonableError(_("Name too long!"))
if len(full_name) < UserProfile.MIN_NAME_LENGTH:
raise JsonableError(_("Name too short!"))
if list(set(full_name).intersection(UserProfile.NAME_INVALID_CHARS)):
raise JsonableError(_("Invalid characters in name!"))
return full_name
# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name). This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
dup_exists = UserProfile.objects.filter(
realm_id=realm_id,
full_name=full_name.strip(),
is_active=True,
).exists()
if dup_exists:
raise JsonableError(_("Name is already in use!"))
def check_short_name(short_name_raw: str) -> str:
short_name = short_name_raw.strip()
if len(short_name) == 0:
raise JsonableError(_("Bad name or username"))
return short_name
def check_valid_bot_config(service_name: str, config_data: Dict[str, str]) -> None:
try:
from zerver.lib.bot_lib import get_bot_handler
bot_handler = get_bot_handler(service_name)
if hasattr(bot_handler, 'validate_config'):
bot_handler.validate_config(config_data)
except ConfigValidationError:
# The exception provides a specific error message, but that
# message is not tagged translatable, because it is
# triggered in the external zulip_bots package.
# TODO: Think of some clever way to provide a more specific
# error message.
raise JsonableError(_("Invalid configuration data!"))
# Adds an outgoing webhook or embedded bot service.
def add_service(name: str, user_profile: UserProfile, base_url: Optional[str]=None,
interface: Optional[int]=None, token: Optional[str]=None) -> None:
Service.objects.create(name=name,
user_profile=user_profile,
base_url=base_url,
interface=interface,
token=token)
def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
# Realm administrators can always add bot
if user_profile.is_realm_admin:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
raise JsonableError(_("Must be an organization administrator"))
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS and \
bot_type == UserProfile.DEFAULT_BOT:
raise JsonableError(_("Must be an organization administrator"))
def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
if bot_type not in user_profile.allowed_bot_types:
raise JsonableError(_('Invalid bot type'))
def check_valid_interface_type(interface_type: Optional[int]) -> None:
if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
raise JsonableError(_('Invalid interface type'))
def bulk_get_users(emails: List[str], realm: Optional[Realm],
base_query: 'QuerySet[UserProfile]'=None) -> Dict[str, UserProfile]:
if base_query is None:
assert realm is not None
query = UserProfile.objects.filter(realm=realm, is_active=True)
realm_id = realm.id
else:
# WARNING: Currently, this code path only really supports one
# version of `base_query` being used (because otherwise,
# they'll share the cache, which can screw up the filtering).
# If you're using this flow, you'll need to re-do any filters
# in base_query in the code itself; base_query is just a perf
# optimization.
query = base_query
realm_id = 0
def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
# This should be just
#
# UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
# realm=realm)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(emails) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(emails))
where_clause = "UPPER(zerver_userprofile.email::text) IN (%s)" % (upper_list,)
return query.select_related("realm").extra(
where=[where_clause],
params=emails)
return generic_bulk_cached_fetch(
# Use a separate cache key to protect us from conflicts with
# the get_user cache.
lambda email: 'bulk_get_users:' + user_profile_cache_key_id(email, realm_id),
fetch_users_by_email,
[email.lower() for email in emails],
id_fetcher=lambda user_profile: user_profile.email.lower()
)
def user_ids_to_users(user_ids: List[int], realm: Realm) -> List[UserProfile]:
# TODO: Consider adding a flag to control whether deactivated
# users should be included.
def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
if len(user_ids) == 0:
return []
return list(UserProfile.objects.filter(id__in=user_ids).select_related())
user_profiles_by_id = generic_bulk_cached_fetch(
cache_key_function=user_profile_by_id_cache_key,
query_function=fetch_users_by_id,
object_ids=user_ids
) # type: Dict[int, UserProfile]
found_user_ids = user_profiles_by_id.keys()
missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
if missed_user_ids:
raise JsonableError(_("Invalid user ID: %s" % (missed_user_ids[0])))
user_profiles = list(user_profiles_by_id.values())
for user_profile in user_profiles:
if user_profile.realm != realm:
raise JsonableError(_("Invalid user ID: %s" % (user_profile.id,)))
return user_profiles
def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such bot"))
if not target.is_bot:
raise JsonableError(_("No such bot"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def access_user_by_id(user_profile: UserProfile, user_id: int,
allow_deactivated: bool=False, allow_bots: bool=False) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
if target.is_bot and not allow_bots:
raise JsonableError(_("No such user"))
if not target.is_active and not allow_deactivated:
raise JsonableError(_("User is deactivated"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def get_accounts_for_email(email: str) -> List[Dict[str, Optional[str]]]:
if settings.PRODUCTION: # nocoverage
return []
profiles = UserProfile.objects.select_related('realm').filter(delivery_email__iexact=email.strip(),
is_active=True,
is_bot=False,
realm__deactivated=False)
return [{"realm_name": profile.realm.name,
"string_id": profile.realm.string_id,
"full_name": profile.full_name,
"avatar": avatar_url(profile)}
for profile in profiles]
def get_api_key(user_profile: UserProfile) -> str:
return user_profile.api_key
def get_all_api_keys(user_profile: UserProfile) -> List[str]:
# Users can only have one API key for now
return [user_profile.api_key]
def validate_user_custom_profile_data(realm_id: int,
profile_data: List[Dict[str, Union[int, str, List[int]]]]) -> None:
# This function validate all custom field values according to their field type.
for item in profile_data:
field_id = item['id']
try:
field = CustomProfileField.objects.get(id=field_id)
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
validators = CustomProfileField.FIELD_VALIDATORS
field_type = field.field_type
var_name = '{}'.format(field.name)
value = item['value']
if field_type in validators:
validator = validators[field_type]
result = validator(var_name, value)
elif field_type == CustomProfileField.CHOICE:
choice_field_validator = CustomProfileField.CHOICE_FIELD_VALIDATORS[field_type]
field_data = field.field_data
result = choice_field_validator(var_name, field_data, value)
elif field_type == CustomProfileField.USER:
user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
result = user_field_validator(realm_id, cast(List[int], value),
False)
else:
raise AssertionError("Invalid field type")
if result is not None:
raise JsonableError(result)
|
|
from sympy.core.sympify import _sympify, sympify
from sympy.core.basic import Basic
from sympy.core.singleton import Singleton, S
from sympy.core.evalf import EvalfMixin
from sympy.core.numbers import Float
from sympy.core.compatibility import iterable
from sympy.core.decorators import deprecated
from sympy.mpmath import mpi, mpf
from sympy.assumptions import ask
from sympy.logic.boolalg import And, Or
from sympy.utilities import default_sort_key
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items.
It does not behave like the builtin set; see FiniteSet for that.
Real intervals are represented by the Interval class and unions of sets
by the Union class. The empty set is represented by the EmptySet class
and available as a singleton as S.EmptySet.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_EmptySet = None
is_UniversalSet = None
def sort_key(self, order=None):
"""
Give sort_key of infimum (if possible) else sort_key of the set.
"""
try:
infimum = self.inf
if infimum.is_comparable:
return default_sort_key(infimum, order)
except (NotImplementedError, ValueError):
pass
args = tuple([default_sort_key(a, order) for a in self._sorted_args])
return self.class_key(), (len(args), args), S.One.class_key(), S.One
def union(self, other):
"""
Returns the union of 'self' and 'other'.
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
[0, 1] U [2, 3]
>>> Interval(0, 1) + Interval(2, 3)
[0, 1] U [2, 3]
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
(1, 2] U {3}
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
(1, 2]
>>> Interval(1, 3) - FiniteSet(2)
[1, 2) U (2, 3]
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
[1, 2]
"""
return Intersection(self, other)
def _intersect(self, other):
"""
This function should only be used internally
self._intersect(other) returns a new, intersected set if self knows how
to intersect itself with other, otherwise it returns None
When making a new set class you can be assured that other will not
be a Union, FiniteSet, or EmptySet
Used within the Intersection class
"""
return None
def _union(self, other):
"""
This function should only be used internally
self._union(other) returns a new, joined set if self knows how
to join itself with other, otherwise it returns None.
It may also return a python set of SymPy Sets if they are somehow
simpler. If it does this it must be idempotent i.e. the sets returned
must return None with _union'ed with each other
Used within the Union class
"""
return None
@property
def complement(self):
"""
The complement of 'self'.
As a shortcut it is possible to use the '~' or '-' operators:
>>> from sympy import Interval
>>> Interval(0, 1).complement
(-oo, 0) U (1, oo)
>>> ~Interval(0, 1)
(-oo, 0) U (1, oo)
>>> -Interval(0, 1)
(-oo, 0) U (1, oo)
"""
return self._complement
@property
def _complement(self):
raise NotImplementedError("(%s)._complement" % self)
@property
def inf(self):
"""
The infimum of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns True if 'other' is contained in 'self' as an element.
As a shortcut it is possible to use the 'in' operator:
>>> from sympy import Interval
>>> Interval(0, 1).contains(0.5)
True
>>> 0.5 in Interval(0, 1)
True
"""
return self._contains(sympify(other, strict=True))
def _contains(self, other):
raise NotImplementedError("(%s)._contains(%s)" % (self, other))
def subset(self, other):
"""
Returns True if 'other' is a subset of 'self'.
>>> from sympy import Interval
>>> Interval(0, 1).contains(0)
True
>>> Interval(0, 1, left_open=True).contains(0)
False
"""
if isinstance(other, Set):
return self.intersect(other) == other
else:
raise ValueError("Unknown argument '%s'" % other)
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def __add__(self, other):
return self.union(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersect(other)
def __mul__(self, other):
return ProductSet(self, other)
def __pow__(self, exp):
if not sympify(exp).is_Integer and exp >= 0:
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet([self]*exp)
def __sub__(self, other):
return self.intersect(other.complement)
def __neg__(self):
return self.complement
def __invert__(self):
return self.complement
def __contains__(self, other):
symb = self.contains(other)
result = ask(symb)
if result is None:
raise TypeError('contains did not evaluate to a bool: %r' % symb)
return result
@property
def is_real(self):
return None
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
[0, 5] x {1, 2, 3}
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
[0, 1] x [0, 1]
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
set([(H, H), (H, T), (T, H), (T, T)])
Notes
=====
- Passes most operations down to the argument sets
- Flattens Products of ProductSets
References
==========
http://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
def flatten(arg):
if isinstance(arg, Set):
if arg.is_ProductSet:
return sum(map(flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
sets = flatten(list(sets))
if EmptySet() in sets or len(sets) == 0:
return EmptySet()
return Basic.__new__(cls, *sets, **assumptions)
def _contains(self, element):
"""
'in' operator for ProductSets
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return False
except TypeError: # maybe element isn't an iterable
return False
return And(*[set.contains(item) for set, item in zip(self.sets, element)])
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return S.EmptySet
return ProductSet(a.intersect(b)
for a, b in zip(self.sets, other.sets))
@property
def sets(self):
return self.args
@property
def _complement(self):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a ProductSet
switch_sets = ProductSet(FiniteSet(s, s.complement) for s in self.sets)
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != self)
@property
def is_real(self):
return all(set.is_real for set in self.sets)
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
if self.is_iterable:
from sympy.core.compatibility import product
return product(*self.sets)
else:
raise TypeError("Not all constituent sets are iterable")
@property
def _measure(self):
measure = 1
for set in self.sets:
measure *= set.measure
return measure
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval, sets
>>> Interval(0, 1)
[0, 1]
>>> Interval(0, 1, False, True)
[0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
[0, a]
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
<http://en.wikipedia.org/wiki/Interval_(mathematics)>
"""
is_Interval = True
is_real = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
# Only allow real intervals (use symbols with 'is_real=True').
if not start.is_real or not end.is_real:
raise ValueError("Only real intervals are supported")
# Make sure that the created interval will be valid.
if end.is_comparable and start.is_comparable:
if end < start:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start == S.NegativeInfinity:
left_open = True
if end == S.Infinity:
right_open = True
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
# We only know how to intersect with other intervals
if not other.is_Interval:
return None
# We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0
if not self._is_comparable(other):
return None
empty = False
if self.start <= other.end and other.start <= self.end:
# Get topology right.
if self.start < other.start:
start = other.start
left_open = other.left_open
elif self.start > other.start:
start = self.start
left_open = self.left_open
else:
start = self.start
left_open = self.left_open or other.left_open
if self.end < other.end:
end = self.end
right_open = self.right_open
elif self.end > other.end:
end = other.end
right_open = other.right_open
else:
end = self.end
right_open = self.right_open or other.right_open
if end - start == 0 and (left_open or right_open):
empty = True
else:
empty = True
if empty:
return S.EmptySet
return Interval(start, end, left_open, right_open)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_Interval and self._is_comparable(other):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(self.end, other.end)
start = Max(self.start, other.start)
if (end < start or
(end == start and (end not in self and end not in other))):
return None
else:
start = Min(self.start, other.start)
end = Max(self.end, other.end)
left_open = ((self.start != start or self.left_open) and
(other.start != start or other.left_open))
right_open = ((self.end != end or self.right_open) and
(other.end != end or other.right_open))
return Interval(start, end, left_open, right_open)
# If I have open end points and these endpoints are contained in other
if ((self.left_open and other.contains(self.start) is True) or
(self.right_open and other.contains(self.end) is True)):
# Fill in my end points and return
open_left = self.left_open and self.start not in other
open_right = self.right_open and self.end not in other
new_self = Interval(self.start, self.end, open_left, open_right)
return set((new_self, other))
return None
@property
def _complement(self):
a = Interval(S.NegativeInfinity, self.start, True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
def _contains(self, other):
if self.left_open:
expr = other > self.start
else:
expr = other >= self.start
if self.right_open:
expr = And(expr, other < self.end)
else:
expr = And(expr, other <= self.end)
return expr
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start.evalf(prec)), mpf(self.end.evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left.evalf(), self.right.evalf(),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def as_relational(self, symbol):
"""Rewrite an interval in terms of inequalities and logic operators. """
from sympy.core.relational import Lt, Le
if not self.is_left_unbounded:
if self.left_open:
left = Lt(self.start, symbol)
else:
left = Le(self.start, symbol)
if not self.is_right_unbounded:
if self.right_open:
right = Lt(symbol, self.right)
else:
right = Le(symbol, self.right)
if self.is_left_unbounded and self.is_right_unbounded:
return True # XXX: Contained(symbol, Floats)
elif self.is_left_unbounded:
return right
elif self.is_right_unbounded:
return left
else:
return And(left, right)
@property
def free_symbols(self):
return self.start.free_symbols | self.end.free_symbols
class Union(Set, EvalfMixin):
"""
Represents a union of sets as a Set.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
[1, 2] U [3, 4]
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
[1, 3]
See Also
========
Intersection
References
==========
<http://en.wikipedia.org/wiki/Union_(set_theory)>
"""
is_Union = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', True)
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Union:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Union of no sets is EmptySet
if len(args) == 0:
return S.EmptySet
args = sorted(args, key=default_sort_key)
# Reduce sets using known rules
if evaluate:
return Union.reduce(args)
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""
Simplify a Union using known rules
We first start with global rules like
'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
finite_set = FiniteSet(x for set in finite_sets for x in set)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._union(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(args, evaluate=False)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def _complement(self):
# De Morgan's formula.
complement = self.args[0].complement
for set in self.args[1:]:
complement = complement.intersect(set.complement)
return complement
def _contains(self, other):
or_args = [the_set.contains(other) for the_set in self.args]
return Or(*or_args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
return Or(*[set.as_relational(symbol) for set in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(set.evalf() for set in self.args)
except:
raise TypeError("Not all sets are evalf-able")
def __iter__(self):
import itertools
if all(set.is_iterable for set in self.args):
return itertools.chain(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
@property
def is_real(self):
return all(set.is_real for set in self.args)
class Intersection(Set):
"""
Represents an intersection of sets as a Set.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
[2, 3]
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
[2, 3]
See Also
========
Union
References
==========
<http://en.wikipedia.org/wiki/Intersection_(set_theory)>
"""
is_Intersection = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', True)
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Intersection:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Intersection of no sets is everything
if len(args) == 0:
return S.UniversalSet
args = sorted(args, key=default_sort_key)
# Reduce sets using known rules
if evaluate:
return Intersection.reduce(args)
return Basic.__new__(cls, *args)
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
@property
def _complement(self):
raise NotImplementedError()
def _contains(self, other):
from sympy.logic.boolalg import And
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
for s in self.args:
if s.is_iterable:
other_sets = set(self.args) - set((s,))
other = Intersection(other_sets, evaluate=False)
return (x for x in s if x in other)
raise ValueError("None of the constituent sets are iterable")
@staticmethod
def reduce(args):
"""
Simplify an intersection using known rules
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# If any EmptySets return EmptySet
if any(s.is_EmptySet for s in args):
return S.EmptySet
# If any FiniteSets see which elements of that finite set occur within
# all other sets in the intersection
for s in args:
if s.is_FiniteSet:
return s.__class__(x for x in s
if all(x in other for other in args))
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
other = Intersection(other_sets)
return Union(Intersection(arg, other) for arg in s.args)
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._intersect(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(args, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class EmptySet(Set):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet()
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet()
See Also
========
UniversalSet
References
==========
http://en.wikipedia.org/wiki/Empty_set
"""
__metaclass__ = Singleton
is_EmptySet = True
def _intersect(self, other):
return S.EmptySet
@property
def _complement(self):
return S.UniversalSet
@property
def _measure(self):
return 0
def _contains(self, other):
return False
def as_relational(self, symbol):
return False
def __len__(self):
return 0
def _union(self, other):
return other
def __iter__(self):
return iter([])
class UniversalSet(Set):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet()
>>> Interval(1, 2).intersect(S.UniversalSet)
[1, 2]
See Also
========
EmptySet
References
==========
http://en.wikipedia.org/wiki/Universal_set
"""
__metaclass__ = Singleton
is_UniversalSet = True
def _intersect(self, other):
return other
@property
def _complement(self):
return S.EmptySet
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return True
def as_relational(self, symbol):
return True
def _union(self, other):
return self
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import Symbol, FiniteSet, sets
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
References
==========
http://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', True)
if evaluate:
if len(args) == 1 and iterable(args[0]):
args = args[0]
args = map(sympify, args)
if len(args) == 0:
return EmptySet()
args = frozenset(args) # remove duplicates
obj = Basic.__new__(cls, *args)
obj._elements = args
return obj
def __iter__(self):
return iter(self.args)
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if isinstance(other, self.__class__):
return self.__class__(*(self._elements & other._elements))
return self.__class__(el for el in self if el in other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_FiniteSet:
return FiniteSet(*(self._elements | other._elements))
# If other set contains one of my elements, remove it from myself
if any(other.contains(x) is True for x in self):
return set((
FiniteSet(x for x in self if other.contains(x) is not True),
other))
return None
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
return other in self._elements
@property
def _complement(self):
"""
The complement of a real finite set is the Union of open Intervals
between the elements of the set.
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3).complement
(-oo, 1) U (1, 2) U (2, 3) U (3, oo)
"""
if not all(elem.is_number for elem in self):
raise ValueError("%s: Complement not defined for symbolic inputs"
% self)
# as there are only numbers involved, a straight sort is sufficient;
# default_sort_key is not needed
args = sorted(self.args)
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, args[0], True, True)]
for a, b in zip(args[:-1], args[1:]):
intervals.append(Interval(a, b, True, True)) # open intervals
intervals.append(Interval(args[-1], S.Infinity, True, True))
return Union(intervals, evaluate=False)
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def __sub__(self, other):
return FiniteSet(el for el in self if el not in other)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
@property
def is_real(self):
return all(el.is_real for el in self)
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(elem.evalf(prec) for elem in self)
def _hashable_content(self):
return (self._elements,)
@property
def _sorted_args(self):
from sympy.utilities import default_sort_key
return sorted(self.args, key=default_sort_key)
|
|
import os
import tempfile
import pytest
from opentaxii.config import ServerConfig
BACKWARDS_COMPAT_CONFIG = """
---
dummy: some
persistence_api:
class: some.test.PersistenceClass
parameters:
a: 1
b: 2
auth_api:
class: other.test.AuthClass
parameters:
c: 3
"""
COMBINED_CONFIG = """
---
dummy: some
auth_api:
class: other.test.AuthClass
parameters:
c: 3
taxii1:
persistence_api:
class: some.test.PersistenceClass
parameters:
a: 1
b: 2
taxii2:
persistence_api:
class: some.test.Taxii2PersistenceClass
parameters:
a: 1
b: 2
max_content_length: 1024
"""
TAXII2_CONFIG = """
---
dummy: some
persistence_api:
class: some.test.PersistenceClass
auth_api:
class: other.test.AuthClass
parameters:
c: 3
taxii1:
taxii2:
persistence_api:
class: some.test.Taxii2PersistenceClass
parameters:
a: 1
b: 2
max_content_length: 1024
"""
DEFAULT_BASE_VALUES = {
"domain": "localhost:9000",
"support_basic_auth": True,
"return_server_error_details": False,
"logging": {"opentaxii": "info", "root": "info"},
"auth_api": {
"class": "other.test.AuthClass",
"parameters": {
"c": 3,
"create_tables": True,
"db_connection": "sqlite:////tmp/auth.db",
"secret": "SECRET-STRING-NEEDS-TO-BE-CHANGED",
"token_ttl_secs": 3600,
},
},
}
DEFAULT_TAXII1_VALUES = {
"save_raw_inbox_messages": True,
"xml_parser_supports_huge_tree": True,
"unauthorized_status": "UNAUTHORIZED",
"hooks": None,
"count_blocks_in_poll_responses": False,
}
TAXII1_VALUES = {
"persistence_api": {
"class": "some.test.PersistenceClass",
"parameters": {
"a": 1,
"b": 2,
"create_tables": True,
"db_connection": "sqlite:////tmp/data.db",
},
},
}
TAXII2_VALUES = {
"persistence_api": {
"class": "some.test.Taxii2PersistenceClass",
"parameters": {
"a": 1,
"b": 2,
},
},
"max_content_length": 1024,
}
EXPECTED_VALUES = {
BACKWARDS_COMPAT_CONFIG: {
**DEFAULT_BASE_VALUES,
"taxii1": {
**DEFAULT_TAXII1_VALUES,
**TAXII1_VALUES,
},
"taxii2": None,
},
COMBINED_CONFIG: {
**DEFAULT_BASE_VALUES,
"taxii1": {
**DEFAULT_TAXII1_VALUES,
**TAXII1_VALUES,
},
"taxii2": {
**TAXII2_VALUES,
},
},
TAXII2_CONFIG: {
**DEFAULT_BASE_VALUES,
"taxii1": None,
"taxii2": {
**TAXII2_VALUES,
},
},
}
DEPRECATION_WARNING = {
BACKWARDS_COMPAT_CONFIG: True,
COMBINED_CONFIG: False,
TAXII2_CONFIG: False,
}
TAXII2_ONLY_WARNING = {
BACKWARDS_COMPAT_CONFIG: False,
COMBINED_CONFIG: False,
TAXII2_CONFIG: True,
}
@pytest.fixture(
scope="module",
params=[BACKWARDS_COMPAT_CONFIG, COMBINED_CONFIG, TAXII2_CONFIG],
ids=["BACKWARDS_COMPAT_CONFIG", "COMBINED_CONFIG", "TAXII2_CONFIG"],
)
def config_file_name_expected_value(request):
config = request.param
f = tempfile.NamedTemporaryFile(delete=False)
f.write(config.encode("utf-8"))
f.close()
yield f.name, EXPECTED_VALUES[config], DEPRECATION_WARNING[
config
], TAXII2_ONLY_WARNING[config]
os.unlink(f.name)
def test_custom_config_file(config_file_name_expected_value):
(
config_file_name,
expected_value,
deprecation_warning,
taxii2_only_warning,
) = config_file_name_expected_value
warning_classes = (UserWarning,)
if deprecation_warning or taxii2_only_warning:
warning_classes += (DeprecationWarning,)
expected_warnings = {"Ignoring invalid configuration item 'dummy'."}
if deprecation_warning:
expected_warnings |= {
f"Setting taxii1 attributes at top level is deprecated. Please nest '{key}' inside 'taxii1'."
for key in ["persistence_api"]
}
if taxii2_only_warning:
expected_warnings |= {
f"Running in taxii2-only mode. Dropping deprecated top level taxii1 attribute '{key}'."
for key in ["persistence_api"]
}
with pytest.warns(warning_classes) as warnings:
config = ServerConfig(extra_configs=[config_file_name])
assert dict(config) == expected_value
assert set(str(warning.message) for warning in warnings) == expected_warnings
BACKWARDS_COMPAT_ENVVARS = {
"input": {
"OPENTAXII_DOMAIN": "hostname:1337",
"OPENTAXII__SUPPORT_BASIC_AUTH": "yes",
"OPENTAXII__PERSISTENCE_API__CLASS": "something.Else",
"OPENTAXII__PERSISTENCE_API__OTHER": "1",
},
"expected": {
"domain": "hostname:1337",
"support_basic_auth": True,
"taxii1": {"persistence_api": {"class": "something.Else", "other": 1}},
},
}
COMBINED_ENVVARS = {
"input": {
"OPENTAXII__TAXII1__PERSISTENCE_API__CLASS": "something.Else",
"OPENTAXII__TAXII1__PERSISTENCE_API__OTHER": "1",
"OPENTAXII__TAXII2__PERSISTENCE_API__CLASS": "something.Else2",
"OPENTAXII__TAXII2__PERSISTENCE_API__OTHER": "2",
"OPENTAXII__TAXII2__MAX_CONTENT_LENGTH": "1024",
},
"expected": {
"taxii1": {"persistence_api": {"class": "something.Else", "other": 1}},
"taxii2": {"persistence_api": {"class": "something.Else2", "other": 2}, "max_content_length": 1024},
},
}
TAXII2_ENVVARS = {
"input": {
"OPENTAXII__TAXII2__PERSISTENCE_API__CLASS": "something.Else2",
"OPENTAXII__TAXII2__PERSISTENCE_API__OTHER": "2",
"OPENTAXII__TAXII2__MAX_CONTENT_LENGTH": "1024",
},
"expected": {
"taxii2": {"persistence_api": {"class": "something.Else2", "other": 2}, "max_content_length": 1024},
},
}
@pytest.fixture(
scope="module",
params=[BACKWARDS_COMPAT_ENVVARS, COMBINED_ENVVARS, TAXII2_ENVVARS],
ids=["BACKWARDS_COMPAT_ENVVARS", "COMBINED_ENVVARS", "TAXII2_ENVVARS"],
)
def envvars_expected_value(request):
yield request.param["input"], request.param["expected"]
def test_env_vars_config(envvars_expected_value):
envvars, expected_value = envvars_expected_value
assert (
ServerConfig._clean_options(ServerConfig._get_env_config(env=envvars))
== expected_value
)
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance_obj(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance_obj(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance_obj(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
|
import pygame as pg
import lib.inputbox as textbox
import pyglet, os, json, time
import main
pg.init()
clock = pg.time.Clock()
global player_name, FULLSCREEN, levels_number, view, menu
view = "main"
menu = True
player_name = ""
FULLSCREEN = "True"
levels_number = 0
pg.mixer.init()
pg.mixer.music.load("sounds/main_music.mp3")
pg.mixer.music.play()
class Game:
def __init__(self, FULLSCREEN):
pg.mouse.set_visible(False)
self.font = pg.font.Font('fonts/SqueakyChalkSound.ttf', 35)
self.font_small = pg.font.Font('fonts/SqueakyChalkSound.ttf', 25)
self.cursor = pg.image.load("images/cursor.png")
self.FULLSCREEN = FULLSCREEN
self.create_bg()
self.get_monitor_surface()
self.set_screen()
self.create_button()
def get_monitor_surface(self):
platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_default_screen()
screen_width = screen.width
screen_height = screen.height
return [screen_width, screen_height]
def set_screen(self):
if self.FULLSCREEN == "True":
self.screen = pg.display.set_mode((self.get_monitor_surface()[0], self.get_monitor_surface()[1]), pg.FULLSCREEN)
else:
self.screen = pg.display.set_mode((self.get_monitor_surface()[0], self.get_monitor_surface()[1]), pg.NOFRAME)
self.screen_width = pg.display.Info().current_w
self.screen_height = pg.display.Info().current_h
def create_bg(self):
self.bg = pg.image.load("images/bg.jpg")
self.rect = self.bg.get_rect()
self.rect.left, self.rect.top = [0,0]
def create_button(self):
self.but_play = pg.image.load("images/buttons/play.png")
self.but_rules = pg.image.load("images/buttons/rules.png")
self.but_scores = pg.image.load("images/buttons/scores.png")
self.but_options = pg.image.load("images/buttons/options.png")
self.but_credit = pg.image.load("images/buttons/credit.png")
self.but_quit = pg.image.load("images/buttons/quit.png")
def get_params():
try:
f = open("ressources/main_conf.json")
except IOError as e:
print(e)
with f as json_file:
params_list = json.load(json_file)
params = [key for key in params_list]
player_name = params_list[params[0]]
FULLSCREEN = params_list[params[1]]
levels_number = int(params_list[params[2]])
return(player_name, FULLSCREEN, levels_number)
def save_fullscreen_state():
try:
f = open("ressources/main_conf.json")
except IOError as e:
print(e)
with f as json_file:
params_list = json.load(json_file)
params_list["fullscreen"] = FULLSCREEN
try:
f = open("ressources/main_conf.json", "w")
except IOError as e:
print(e)
with f as json_file:
json.dump(params_list, json_file)
def get_levels():
try:
f = open("ressources/users.json")
except IOError as e:
print(e)
with f as json_file:
level_list = json.load(json_file)
player = [key for key in level_list if key == player_name][0]
player_level = int(level_list[player]["level"])
player_stars = []
for i in range(1, levels_number):
try:
player_stars.append(int(level_list[player]["levels_stars"][str(i)]))
except:
pass
return (player_level, player_stars)
def get_stats():
try:
f = open("ressources/stats.json")
except IOError as e:
print(e)
with f as json_file:
stats = json.load(json_file)
return stats
def save_user(player_name, new_name):
try:
f = open("ressources/users.json")
except IOError as e:
print(e)
with f as json_file:
users_file = json.load(json_file)
try:
f = open("ressources/main_conf.json")
except IOError as e:
print(e)
with f as json_file:
config_file = json.load(json_file)
player = [key for key in users_file if key == new_name]
if len(player) == 0:
users_file[new_name] = {"level":1, "levels_stars": {"1": "0"}}
config_file["last_player"] = new_name
try:
f = open("ressources/users.json", "w")
except IOError as e:
print(e)
with f as json_file:
json.dump(users_file, json_file)
try:
f = open("ressources/main_conf.json", "w")
except IOError as e:
print(e)
with f as json_file:
json.dump(config_file, json_file)
def launcher(v="main"):
global player_name, FULLSCREEN, levels_number, view, menu
view = v
selected_level = 0
player_name, FULLSCREEN, levels_number = get_params()
player_level, player_stars = get_levels()
levels_buttons = []
levels_buttons_blit = []
but_modify = pg.image.load("images/buttons/modify.png")
but_modify_blit = but_modify.get_rect()
username = ""
username_blit = but_modify.get_rect()
for i in range(0, player_level):
if player_stars[i] == 0:
levels_buttons.append(pg.image.load("images/buttons/no_stars.png"))
elif player_stars[i] == 1:
levels_buttons.append(pg.image.load("images/buttons/1_stars.png"))
elif player_stars[i] == 2:
levels_buttons.append(pg.image.load("images/buttons/2_stars.png"))
elif player_stars[i] == 3:
levels_buttons.append(pg.image.load("images/buttons/3_stars.png"))
game = Game(FULLSCREEN)
if view != "main":
game.bg = pg.image.load("images/bg2.jpg")
but_ret = pg.image.load("images/buttons/return.png")
but_ret_blit = pg.image.load("images/buttons/return.png").get_rect()
but_lvl = {}
load_rules_img = pg.image.load("images/rules.png")
load_credits_img = pg.image.load("images/credits.png")
blackboard = pg.image.load("images/options.png")
red_chalk = pg.image.load("images/chalk_line_red.png")
chalk = pg.image.load("images/chalk_line.png")
chalk_blit = chalk.get_rect()
fullscreen_blit, red_chalk_blit = red_chalk.get_rect(), red_chalk.get_rect()
stats = get_stats()
sound_button = pg.mixer.Sound("sounds/buttons.ogg")
while menu:
if not pg.mixer.music.get_busy():
pg.mixer.music.play()
game.screen.blit(pg.transform.scale(game.bg, (game.screen_width, game.screen_height)), (0, 0))
#Main menu
if view == "main":
but_play = game.screen.blit(game.but_play, (game.screen_width/10, 250))
but_rules = game.screen.blit(game.but_rules, (game.screen_width/10, 350))
but_scores = game.screen.blit(game.but_scores, (game.screen_width/10, 450))
but_options = game.screen.blit(game.but_options, (game.screen_width/10, 550))
but_credit = game.screen.blit(game.but_credit, (game.screen_width/10, 650))
but_quit = game.screen.blit(game.but_quit, (game.screen_width/10, 750))
elif view == "level":
but_ret_blit = game.screen.blit(but_ret, (225, game.screen_height-200))
x = 225
y = 350
for i in range(0, player_level):
#changing row when column == 6
if (i%6)%6 == 0 and i != 0:
y += 250
x = 225
but_lvl[i] = game.screen.blit(levels_buttons[i], (x, y))
num_text = pg.transform.scale(pg.image.load("images/buttons/text/{}.png".format(i+1)), (64,64))
num_text_blit = game.screen.blit(num_text, (x+55,y+55))
x += 250
elif view == "options":
but_ret_blit = game.screen.blit(but_ret, (225, game.screen_height-200))
bb_blit = game.screen.blit(blackboard, (game.screen.get_width() / 2 - 200, game.screen.get_height() / 11))
game.screen.blit(game.font.render("Nom du joueur", 1, (255,255,255)), ((game.screen.get_width() / 2) -50, (game.screen.get_height() / 2) -185))
username_blit = game.screen.blit(game.font_small.render(player_name, 1, (255,255,255)), ((game.screen.get_width() / 2) -50, (game.screen.get_height() / 2) - 120))
but_modify_blit = game.screen.blit(but_modify, ((game.screen.get_width() / 2) + 230, (game.screen.get_height() / 2) - 130))
if FULLSCREEN == "True":
fullscreen_blit = game.screen.blit(game.font.render("Fullscreen On", 1, (255,255,255)), ((game.screen.get_width() / 2)-50, (game.screen.get_height() / 2) - 55))
else:
fullscreen_blit = game.screen.blit(game.font.render("Fullscreen Off", 1, (255,255,255)), ((game.screen.get_width() / 2) -50, (game.screen.get_height() / 2) - 55))
red_chalk_blit = game.screen.blit(red_chalk, ((game.screen.get_width() / 2)-50, (game.screen.get_height() / 2) - 30))
elif view == "scores":
but_ret_blit = game.screen.blit(but_ret, (225, game.screen_height-200))
bb_blit = game.screen.blit(blackboard, (game.screen.get_width() / 2 - 200, game.screen.get_height() / 11))
game.screen.blit(game.font.render("Scores", 1, (255,255,255)), ((game.screen.get_width() / 2) +40, (game.screen.get_height() / 4)+30))
chalk_blit = game.screen.blit(chalk, ((game.screen.get_width() / 2)+20, (game.screen.get_height() / 4) +80))
stats["best"].sort(reverse=True)
stats["player"].sort(reverse=True)
for i in range(0, len(stats["best"])):
game.screen.blit(game.font_small.render(stats["player"][i], 1, (255,255,255)), ((game.screen.get_width() / 2) -100, (game.screen.get_height() / 2) - 110+i*50))
game.screen.blit(game.font_small.render(str(stats["best"][i]), 1, (255,255,255)), ((game.screen.get_width() / 2) +250, (game.screen.get_height() / 2) - 110+i*50))
elif view == "credits":
but_ret_blit = game.screen.blit(but_ret, (225, game.screen_height-200))
width = int(game.screen_width/3)
credits_img = pg.transform.scale(load_credits_img, (width,int(width*(848/600))))
credits_blit = game.screen.blit(credits_img, (game.screen_width/2, game.screen_height-game.screen_height+50))
elif view == "rules":
but_ret_blit = game.screen.blit(but_ret, (225, game.screen_height-200))
width = int(game.screen_width/3)
rules_img = pg.transform.scale(load_rules_img, (width,int(width*(848/600))))
rules_blit = game.screen.blit(rules_img, (game.screen_width/2, game.screen_height-game.screen_height+50))
for event in pg.event.get():
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
os._exit(0)
elif event.type == pg.MOUSEBUTTONUP:
if view == "main":
if but_play.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg2.jpg")
view = "level"
sound_button.play()
if but_quit.collidepoint(pg.mouse.get_pos()):
sound_button.play()
os._exit(0)
if but_credit.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg2.jpg")
view = "credits"
sound_button.play()
if but_options.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg2.jpg")
view = "options"
sound_button.play()
if but_rules.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg2.jpg")
view = "rules"
sound_button.play()
if but_scores.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg2.jpg")
view = "scores"
sound_button.play()
if view == "credits":
if but_ret_blit.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg.jpg")
view = "main"
sound_button.play()
if view == "scores":
if but_ret_blit.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg.jpg")
view = "main"
sound_button.play()
if view == "options":
if but_ret_blit.collidepoint(pg.mouse.get_pos()):
game.bg = pg.image.load("images/bg.jpg")
view = "main"
sound_button.play()
if but_modify_blit.collidepoint(pg.mouse.get_pos()) or username_blit.collidepoint(pg.mouse.get_pos()) and username == "":
sound_button.play()
username = textbox.ask(game.screen, '')
if username != "":
save_user(player_name, username)
player_name = username
username = ""
if fullscreen_blit.collidepoint(pg.mouse.get_pos()) or red_chalk_blit.collidepoint(pg.mouse.get_pos()):
sound_button.play()
if FULLSCREEN == "True":
FULLSCREEN = "False"
game.screen = pg.display.set_mode((game.get_monitor_surface()[0], game.get_monitor_surface()[1]))
else:
FULLSCREEN = "True"
game.screen = pg.display.set_mode((game.get_monitor_surface()[0], game.get_monitor_surface()[1]), pg.FULLSCREEN)
save_fullscreen_state()
if view == "rules":
if but_ret_blit.collidepoint(pg.mouse.get_pos()):
sound_button.play()
game.bg = pg.image.load("images/bg.jpg")
view = "main"
if view == "level":
if but_ret_blit.collidepoint(pg.mouse.get_pos()):
sound_button.play()
game.bg = pg.image.load("images/bg.jpg")
view = "main"
#Detect start game
for but in but_lvl:
if but_lvl[but].collidepoint(pg.mouse.get_pos()):
sound_button.play()
menu = False
selected_level = but +1
for i in range(0, 4):
sound_button.play()
time.sleep(1)
game.screen.blit(game.cursor, pg.mouse.get_pos())
pg.display.flip()
clock.tick(60)
main.start_party(selected_level, levels_number, FULLSCREEN, player_name)
if __name__ == "__main__":
launcher()
|
|
from __future__ import absolute_import, unicode_literals
# -*- coding: utf-8 -*-
import json
from datetime import datetime
from xml.dom import minidom
from StringIO import StringIO
from django.conf import settings
from django.core import serializers
from django.db import transaction, connection
from django.test import TestCase, TransactionTestCase, Approximate
from django.utils import six
from django.utils import unittest
from .models import (Category, Author, Article, AuthorProfile, Actor, Movie,
Score, Player, Team)
class SerializerRegistrationTests(unittest.TestCase):
def setUp(self):
self.old_SERIALIZATION_MODULES = getattr(settings, 'SERIALIZATION_MODULES', None)
self.old_serializers = serializers._serializers
serializers._serializers = {}
settings.SERIALIZATION_MODULES = {
"json2" : "django.core.serializers.json",
}
def tearDown(self):
serializers._serializers = self.old_serializers
if self.old_SERIALIZATION_MODULES:
settings.SERIALIZATION_MODULES = self.old_SERIALIZATION_MODULES
else:
delattr(settings, 'SERIALIZATION_MODULES')
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = b"Poker has no place on ESPN"
new_headline = b"Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970,1,1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline','pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serial_str = serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author = self.jane,
headline = "Nobody remembers the early years",
pub_date = datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 4)
class SerializersTransactionTestBase(object):
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to be contained
# within a transaction in order to test forward reference
# handling.
transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
transaction.commit()
transaction.leave_transaction_management()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = b"""<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = b"""<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = b"""[{"pk": null, "model": "serializers.category", "fields": {"name": "Reference"}}]"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = b"""[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
try:
import yaml
except ImportError:
pass
else:
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = b"""- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = b"""- fields:
name: Reference
pk: null
model: serializers.category"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = b"""- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
|
import json
import os
import shutil
import tempfile
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.core import mail
import mock
import pytest
from PIL import Image
from olympia import amo
from olympia.addons.models import Addon, AddonUser
from olympia.amo.templatetags.jinja_helpers import user_media_path
from olympia.amo.tests import (
TestCase, addon_factory, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_addon_file, get_image_path
from olympia.amo.utils import image_size, utc_millesecs_from_epoch
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey
from olympia.applications.models import AppVersion
from olympia.constants.base import VALIDATOR_SKELETON_RESULTS
from olympia.devhub import tasks
from olympia.files.models import FileUpload
from olympia.versions.models import Version
pytestmark = pytest.mark.django_db
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 350
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(
mode='r+w+b', suffix='.png', delete=False, dir=settings.TMP_PATH)
if not isinstance(final_size, list):
final_size = [final_size]
resize_size = [resize_size]
uploadto = user_media_path('addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, expected_size in zip(resize_size, final_size):
# resize_icon moves the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
assert src_image.size == original_size
dest_name = os.path.join(uploadto, '1234')
with mock.patch('olympia.amo.utils.pngcrush_image') as pngcrush_mock:
return_value = tasks.resize_icon(src.name, dest_name, [rsize])
dest_image = '%s-%s.png' % (dest_name, rsize)
assert pngcrush_mock.call_count == 1
assert pngcrush_mock.call_args_list[0][0][0] == dest_image
assert image_size(dest_image) == expected_size
# original should have been moved to -original
orig_image = '%s-original.png' % dest_name
assert os.path.exists(orig_image)
# Return value of the task should be a dict with an icon_hash key
# containing the 8 first chars of the md5 hash of the source file,
# which is bb362450b00f0461c6bddc6b97b3c30b.
assert return_value == {'icon_hash': 'bb362450'}
os.remove(dest_image)
assert not os.path.exists(dest_image)
os.remove(orig_image)
assert not os.path.exists(orig_image)
shutil.rmtree(uploadto)
assert not os.path.exists(src.name)
class ValidatorTestCase(TestCase):
def setUp(self):
# Because the validator calls dump_apps() once and then uses the json
# file to find out which appversions are valid, all tests running the
# validator need to create *all* possible appversions all tests using
# this class might need.
# 3.7a1pre is somehow required to exist by
# amo-validator.
# The other ones are app-versions we're using in our
# tests.
self.create_appversion('firefox', '2.0')
self.create_appversion('firefox', '3.6')
self.create_appversion('firefox', '3.7a1pre')
self.create_appversion('firefox', '38.0a1')
# Required for WebExtensions tests.
self.create_appversion('firefox', '*')
self.create_appversion('firefox', '42.0')
self.create_appversion('firefox', '42.*')
self.create_appversion('firefox', '43.0')
# Required for 57-specific tests.
self.create_appversion('android', '38.0a1')
self.create_appversion('android', '*')
self.create_appversion('firefox', '57.0')
# Required for Thunderbird tests.
self.create_appversion('thunderbird', '42.0')
self.create_appversion('thunderbird', '45.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(
application=amo.APPS[name].id, version=version)
class TestValidator(ValidatorTestCase):
mock_sign_addon_warning = json.dumps({
"warnings": 1,
"errors": 0,
"messages": [
{"context": None,
"editors_only": False,
"description": "Add-ons which are already signed will be "
"re-signed when published on AMO. This will "
"replace any existing signatures on the add-on.",
"column": None,
"type": "warning",
"id": ["testcases_content", "signed_xpi"],
"file": "",
"tier": 2,
"for_appversions": None,
"message": "Package already signed",
"uid": "87326f8f699f447e90b3d5a66a78513e",
"line": None,
"compatibility_type": None},
]
})
def setUp(self):
super(TestValidator, self).setUp()
self.upload = FileUpload.objects.create(
path=get_addon_file('desktop.xpi'))
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('olympia.devhub.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
assert self.get_upload().valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validate(self.upload, listed=True)
assert not self.get_upload().valid
@mock.patch('validator.submain.test_package')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('desktop.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == ['validator',
'unexpected_exception']
assert not self.upload.valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_validation_error_webextension(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('valid_webextension.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == [
'validator', 'unexpected_exception']
assert 'WebExtension' in validation['messages'][0]['message']
assert not self.upload.valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_validation_signing_warning(self, _mock):
"""If we sign addons, warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 1
assert len(validation['messages']) == 1
@mock.patch('validator.validate.validate')
@mock.patch('olympia.devhub.tasks.track_validation_stats')
def test_track_validation_stats(self, mock_track, mock_validate):
mock_validate.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
mock_track.assert_called_with(mock_validate.return_value)
class TestMeasureValidationTime(TestValidator):
def setUp(self):
super(TestMeasureValidationTime, self).setUp()
# Set created time back (just for sanity) otherwise the delta
# would be in the microsecond range.
self.upload.update(created=datetime.now() - timedelta(days=1))
@contextmanager
def statsd_timing_mock(self):
statsd_calls = {}
def capture_timing_call(metric, value):
statsd_calls[metric] = value
with mock.patch('olympia.devhub.tasks.statsd.timing') as mock_timing:
mock_timing.side_effect = capture_timing_call
yield statsd_calls
def approximate_upload_time(self):
upload_start = utc_millesecs_from_epoch(self.upload.created)
now = utc_millesecs_from_epoch()
return now - upload_start
def assert_milleseconds_are_close(self, actual_ms, calculated_ms,
fuzz=None):
if fuzz is None:
fuzz = Decimal(300)
assert (actual_ms >= (calculated_ms - fuzz) and
actual_ms <= (calculated_ms + fuzz))
def handle_upload_validation_result(self,
channel=amo.RELEASE_CHANNEL_LISTED):
validation = amo.VALIDATOR_SKELETON_RESULTS.copy()
tasks.handle_upload_validation_result(validation, self.upload.pk,
channel, False)
def test_track_upload_validation_results_time(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls['devhub.validation_results_processed']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_track_upload_validation_results_with_file_size(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# This test makes sure storage.size() works on a real file.
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
# This value should not be scaled because this package is under 1MB.
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_scale_large_xpi_times_per_megabyte(self):
megabyte = Decimal(1024 * 1024)
file_size_in_mb = Decimal(5)
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = file_size_in_mb * megabyte
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# Validation times for files larger than 1MB should be scaled.
rough_delta = self.approximate_upload_time()
rough_scaled_delta = Decimal(rough_delta) / file_size_in_mb
actual_scaled_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
self.assert_milleseconds_are_close(actual_scaled_delta,
rough_scaled_delta)
def test_measure_small_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 500 # less than 1MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_under_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_measure_large_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = (2014 * 1024) * 5 # 5MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_over_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_do_not_calculate_scaled_time_for_empty_files(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 0
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
def test_ignore_missing_upload_paths_for_now(self):
with mock.patch('olympia.devhub.tasks.storage.exists') as mock_exists:
mock_exists.return_value = False
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed' in statsd_calls
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
assert ('devhub.validation_results_processed_under_1mb' not in
statsd_calls)
class TestTrackValidatorStats(TestCase):
def setUp(self):
super(TestTrackValidatorStats, self).setUp()
patch = mock.patch('olympia.devhub.tasks.statsd.incr')
self.mock_incr = patch.start()
self.addCleanup(patch.stop)
def result(self, **overrides):
result = VALIDATOR_SKELETON_RESULTS.copy()
result.update(overrides)
return json.dumps(result)
def test_count_all_successes(self):
tasks.track_validation_stats(self.result(errors=0))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.success'
)
def test_count_all_errors(self):
tasks.track_validation_stats(self.result(errors=1))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.failure'
)
def test_count_listed_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': True}))
self.mock_incr.assert_any_call(
'devhub.validator.results.listed.success'
)
def test_count_unlisted_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': False}))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.success'
)
class TestRunAddonsLinter(ValidatorTestCase):
def setUp(self):
super(TestRunAddonsLinter, self).setUp()
valid_path = get_addon_file('valid_webextension.xpi')
invalid_path = get_addon_file('invalid_webextension_invalid_id.xpi')
self.valid_upload = FileUpload.objects.create(path=valid_path)
self.invalid_upload = FileUpload.objects.create(path=invalid_path)
def get_upload(self, upload):
return FileUpload.objects.get(pk=upload.pk)
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_calls_run_linter(self, run_linter):
run_linter.return_value = '{"errors": 0}'
assert not self.valid_upload.valid
tasks.validate(self.valid_upload, listed=True)
upload = self.get_upload(self.valid_upload)
assert upload.valid, upload.validation
def test_run_linter_fail(self):
tasks.validate(self.invalid_upload, listed=True)
assert not self.get_upload(self.invalid_upload).valid
def test_run_linter_path_doesnt_exist(self):
with pytest.raises(ValueError) as exc:
tasks.run_addons_linter('doesntexist')
assert str(exc.value) == (
'Path "doesntexist" is not a file or directory or '
'does not exist.')
def test_run_linter_use_temporary_file(self):
TemporaryFile = tempfile.TemporaryFile
with mock.patch('olympia.devhub.tasks.tempfile.TemporaryFile') as tmpf:
tmpf.side_effect = lambda *a, **kw: TemporaryFile(*a, **kw)
# This is a relatively small add-on (1.2M) but we are using
# a temporary file for all our linter output.
result = json.loads(tasks.run_addons_linter(
get_addon_file('typo-gecko.xpi')
))
assert tmpf.call_count == 2
assert result['success']
assert result['warnings'] == 24
assert not result['errors']
class TestValidateFilePath(ValidatorTestCase):
def test_amo_validator_success(self):
result = tasks.validate_file_path(
get_addon_file('valid_firefox_addon.xpi'),
hash_=None, listed=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_fail_warning(self):
result = tasks.validate_file_path(
get_addon_file('invalid_firefox_addon_warning.xpi'),
hash_=None, listed=True)
assert not result['success']
assert not result['errors']
assert result['warnings']
def test_amo_validator_fail_error(self):
result = tasks.validate_file_path(
get_addon_file('invalid_firefox_addon_error.xpi'),
hash_=None, listed=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_success(self):
result = tasks.validate_file_path(
get_addon_file('valid_webextension.xpi'),
hash_=None, listed=True, is_webextension=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_error(self):
# This test assumes that `amo-validator` doesn't correctly
# validate a invalid id in manifest.json
result = tasks.validate_file_path(
get_addon_file('invalid_webextension_invalid_id.xpi'),
hash_=None, listed=True, is_webextension=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
class TestWebextensionIncompatibilities(ValidatorTestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
# valid_webextension.xpi has version 1.0 so mock the original version
self.addon.update(guid='beastify@mozilla.org')
self.addon.current_version.update(version='0.9')
self.update_files(
version=self.addon.current_version,
filename='delicious_bookmarks-2.1.106-fx.xpi')
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def test_webextension_upgrade_is_annotated(self):
assert all(f.is_webextension is False
for f in self.addon.current_version.all_files)
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['is_upgrade_to_webextension']
expected = ['validation', 'messages', 'webext_upgrade']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['warnings'] == 1
assert upload.valid
def test_new_webextension_is_not_annotated(self):
"""https://github.com/mozilla/addons-server/issues/3679"""
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.status = amo.STATUS_AWAITING_REVIEW
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_webext_to_webext_not_annotated(self):
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_no_webext_no_warning(self):
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
def test_webextension_cannot_be_downgraded(self):
self.update_files(is_webextension=True)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_webextension_downgrade_only_warning_unlisted(self):
self.update_files(is_webextension=True)
self.make_addon_unlisted(self.addon)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'warning'
assert validation['errors'] == 0
def test_webextension_cannot_be_downgraded_ignore_deleted_version(self):
"""Make sure even deleting the previous version does not prevent
the downgrade error."""
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_no_upgrade_annotation_no_version(self):
"""Make sure there's no workaround the downgrade error."""
self.addon.update(guid='guid@xpi')
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-no-version.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
upload.addon.version = None
upload.addon.save()
upload.save(update_fields=('version',))
upload.refresh_from_db()
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = [u'testcases_installrdf', u'_test_rdf', u'missing_addon']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
class TestLegacyAddonRestrictions(ValidatorTestCase):
def setUp(self):
super(TestLegacyAddonRestrictions, self).setUp()
def test_submit_legacy_addon_restricted(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_extension_not_a_new_addon(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_1st_version_in_that_channel(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(
version_kw={'version': '0.1',
'channel': amo.RELEASE_CHANNEL_UNLISTED})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_extension_1st_version_in_that_channel_reverse(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(
version_kw={'version': '0.1',
'channel': amo.RELEASE_CHANNEL_LISTED})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_targets_older_firefox_stricly(self):
file_ = get_addon_file('valid_firefox_addon_strict_compatibility.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_non_extension(self):
file_ = get_addon_file('searchgeek-20090701.xml')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_thunderbird_extension(self):
file_ = get_addon_file('valid_firefox_and_thunderbird_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_restrict_firefox_53_alpha(self):
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'strict_compatibility': True,
'applications': {
'firefox': {
'max': '53a1'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data, is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_restricted']
def test_restrict_themes(self):
data = {
'messages': [],
'errors': 0,
'detected_type': 'theme',
'metadata': {
'is_extension': False,
'strict_compatibility': False,
'applications': {
'firefox': {
'max': '54.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data, is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_restricted']
def test_submit_legacy_upgrade(self):
# Works because it's not targeting >= 57.
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_upgrade_targeting_firefox_57(self):
# Should error since it's a legacy extension targeting 57.
file_ = get_addon_file('valid_firefox_addon_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
assert len(upload.processed_validation['messages']) == 1
assert upload.processed_validation['messages'][0]['type'] == 'error'
assert upload.processed_validation['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
assert not upload.valid
def test_submit_legacy_upgrade_targeting_57_strict_compatibility(self):
# Should error just like if it didn't have strict compatibility, that
# does not matter: it's a legacy extension, it should not target 57.
file_ = get_addon_file(
'valid_firefox_addon_targeting_57_strict_compatibility.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
assert len(upload.processed_validation['messages']) == 1
assert upload.processed_validation['messages'][0]['type'] == 'error'
assert upload.processed_validation['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
assert not upload.valid
def test_submit_legacy_upgrade_targeting_star(self):
# Should not error: extensions with a maxversion of '*' don't get the
# error, the manifest parsing code will rewrite it as '56.*' instead.
file_ = get_addon_file('valid_firefox_addon_targeting_star.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_webextension_upgrade_targeting_firefox_57(self):
# Should not error: it's targeting 57 but it's a webextension.
file_ = get_addon_file('valid_webextension_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'},
file_kw={'is_webextension': True})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['message'] == ('"strict_max_version" '
'not required.')
assert upload.valid
def test_submit_dictionary_upgrade_targeting_firefox_57(self):
# Should not error: non-extensions types are not affected by the
# restriction, even if they target 57.
file_ = get_addon_file('dictionary_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'},
type=amo.ADDON_DICT)
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_targeting_multiple_including_firefox_57(self):
# By submitting a legacy extension targeting multiple apps, this add-on
# avoids the restriction for new uploads, but it should still trigger
# the one for legacy extensions targeting 57 or higher.
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'applications': {
'firefox': {
'max': '57.0'
},
'thunderbird': {
'max': '45.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=False)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
@mock.patch('olympia.devhub.tasks.send_html_mail_jinja')
def test_send_welcome_email(send_html_mail_jinja_mock):
tasks.send_welcome_email(3615, ['del@icio.us'], {'omg': 'yes'})
send_html_mail_jinja_mock.assert_called_with(
('Mozilla Add-ons: Your add-on has been submitted to'
' addons.mozilla.org!'),
'devhub/email/submission.html',
'devhub/email/submission.txt',
{'omg': 'yes'},
recipient_list=['del@icio.us'],
from_email=settings.NOBODY_EMAIL,
use_deny_list=False,
perm_setting='individual_contact')
class TestSubmitFile(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestSubmitFile, self).setUp()
self.addon = Addon.objects.get(pk=3615)
patcher = mock.patch('olympia.devhub.tasks.create_version_for_upload')
self.create_version_for_upload = patcher.start()
self.addCleanup(patcher.stop)
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, validation='{"errors":0}',
automated_signing=False)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations', True)
def test_file_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
self.create_version_for_upload.assert_called_with(
self.addon, upload, amo.RELEASE_CHANNEL_LISTED)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations',
False)
def test_file_not_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
assert not self.create_version_for_upload.called
class TestCreateVersionForUpload(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestCreateVersionForUpload, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.create_version_for_upload = (
tasks.create_version_for_upload.non_atomic)
self.mocks = {}
for key in ['Version.from_upload', 'parse_addon']:
patcher = mock.patch('olympia.devhub.tasks.%s' % key)
self.mocks[key] = patcher.start()
self.addCleanup(patcher.stop)
self.user = user_factory()
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, user=self.user,
validation='{"errors":0}', automated_signing=False)
def test_file_passed_all_validations_not_most_recent(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
# But the newer one will.
self.create_version_for_upload(self.addon, newer_upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['Version.from_upload'].assert_called_with(
newer_upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_version_exists(self):
upload = self.create_upload()
Version.objects.create(addon=upload.addon, version=upload.version)
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent_failed(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1),
valid=False,
validation=json.dumps({"errors": 5}))
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent(self):
upload = self.create_upload(version='1.0')
newer_upload = self.create_upload(version='0.5')
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# The Version is created because the newer upload is for a different
# version_string.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_beta_string(self):
upload = self.create_upload(version='1.0-beta1')
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_no_version(self):
upload = self.create_upload(version=None)
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
class TestAPIKeyInSubmission(TestCase):
def setUp(self):
self.user = user_factory()
s = '656b16a8ab71686fcfcd04d574bc28be9a1d8252141f54cfb5041709262b84f4'
self.key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='user:12345:678',
secret=s)
self.addon = addon_factory(users=[self.user],
version_kw={'version': '0.1'},
file_kw={'is_webextension': True})
self.file = get_addon_file('webextension_containing_api_key.xpi')
def test_api_key_in_new_submission_is_found(self):
upload = FileUpload.objects.create(path=self.file, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_in_submission_is_found(self):
upload = FileUpload.objects.create(path=self.file, addon=self.addon,
user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
assert mail.outbox[0].to[0] == self.user.email
def test_coauthor_api_key_in_submission_is_found(self):
coauthor = user_factory()
AddonUser.objects.create(addon=self.addon, user_id=coauthor.id)
upload = FileUpload.objects.create(path=self.file, addon=self.addon,
user=coauthor)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('The developer API key of a coauthor was found in the '
'submitted file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
# We submit as the coauthor, the leaked key is the one from 'self.user'
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_already_revoked_by_developer(self):
self.key.update(is_active=None)
tasks.revoke_api_key(self.key.id)
# If the key has already been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
def test_api_key_already_regenerated_by_developer(self):
self.key.update(is_active=None)
current_key = APIKey.new_jwt_credentials(user=self.user)
tasks.revoke_api_key(self.key.id)
key_from_db = APIKey.get_jwt_key(user_id=self.user.id)
assert current_key.key == key_from_db.key
assert current_key.secret == key_from_db.secret
def test_revoke_task_is_called(self):
mock_str = 'olympia.devhub.tasks.revoke_api_key'
wrapped = tasks.revoke_api_key
with mock.patch(mock_str, wraps=wrapped) as mock_revoke:
upload = FileUpload.objects.create(path=self.file, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
mock_revoke.apply_async.assert_called_with(
kwargs={'key_id': self.key.id}, countdown=120)
assert not upload.valid
def test_does_not_revoke_for_different_author(self):
different_author = user_factory()
upload = FileUpload.objects.create(path=self.file,
user=different_author)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.valid
def test_does_not_revoke_safe_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_binary_content(self):
file_ = get_addon_file('webextension_containing_binary_files.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_invalid_filename(self):
file_ = get_addon_file('invalid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
# https://github.com/mozilla/addons-server/issues/8208
# causes this to be 2 (and invalid) instead of 0 (and valid).
# The invalid filename error is caught and raised outside of this
# validation task.
assert upload.processed_validation['errors'] == 2
assert not upload.valid
|
|
"""
Tests that should run in both Python and JS.
This helps ensure that both implementation work in the same way.
Focus on use-cases rather than coverage.
These tests work a bit awkward, but its very useful to be able to test
that the two systems work exactly the same way. You define a class with
signals, and then provide that class to a test function using a
decorator. The test function will then be run both in Python and in JS.
The test function should return an object, that when evaluates to a
string matches with the reference string given to the decorator. The
result string is made lowercase, and double quotes are converted to
single quotes.
"""
from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.react import source, input, connect, lazy, HasSignals, undefined
from flexx.react.pyscript import create_js_signals_class, HasSignalsJS
from flexx.pyscript.functions import py2js, evaljs, evalpy, js_rename
def run_in_both(cls, reference, extra_classes=()):
if reference.lower() != reference:
raise ValueError('Test reference should be lowercase!')
def wrapper(func):
def runner():
# Run in JS
code = js_rename(HasSignalsJS.JSCODE, 'HasSignalsJS', 'HasSignals')
for c in cls.mro()[1:]:
if c is HasSignals:
break
code += create_js_signals_class(c, c.__name__, c.__bases__[0].__name__+'.prototype')
for c in extra_classes:
code += create_js_signals_class(c, c.__name__)
code += create_js_signals_class(cls, cls.__name__, cls.__bases__[0].__name__+'.prototype')
code += py2js(func, 'test')
code += 'test(%s);' % cls.__name__
jsresult = evaljs(code)
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('"', "'")
print('js:', jsresult)
# Run in Python
pyresult = str(func(cls))
pyresult = pyresult.replace('"', "'")
print('py:', pyresult)
#
assert pyresult.lower() == reference
assert jsresult.lower() == reference
return runner
return wrapper
class Name(HasSignals):
_foo = 3
_bar = 'bar'
spam = [1, 2, 3]
def __init__(self):
self.r = []
super().__init__()
@input
def first_name(v='john'):
return str(v)
@input
def last_name(v='doe'):
return str(v)
@lazy('first_name', 'last_name')
def full_name(self, n1, n2):
self.r.append('')
return n1 + ' ' + n2
@run_in_both(Name, "['', 'john doe', '', 'almar klein', '', 'jorik klein']")
def test_pull(Name):
name = Name()
name.r.append(name.full_name())
name.first_name('almar')
name.last_name('klein')
name.r.append(name.full_name())
name.first_name('jorik')
name.r.append(name.full_name())
return name.r
@run_in_both(Name, "['', 'john doe', '', 'jane doe']")
def test_disconnecting_signal(Name):
s = Name()
s.r.append(s.full_name())
# Disconnect, but because its a react signal, it re-connects at once
s.full_name.disconnect(False) # no destroy
s.first_name('almar')
s.first_name('jorik')
s.first_name('jane')
s.r.append(s.full_name()) # connects now
return s.r
@run_in_both(Name, "[true, true, '', true, true, true, true, '', true, true]")
def test_signal_attributes(Name):
s = Name()
s.r.append(s.full_name._timestamp == 0)
s.r.append(s.full_name._value is undefined)
s.full_name()
s.r.append(s.full_name._timestamp > 0)
s.r.append(s.full_name._last_timestamp == 0)
s.r.append(s.full_name._value == 'john doe')
s.r.append(s.full_name._last_value is undefined)
s.first_name('jane')
s.full_name()
s.r.append(s.full_name._last_timestamp > 0)
s.r.append(s.full_name._last_value == 'john doe')
return s.r
@run_in_both(Name, "[3, 'bar', [1, 2, 3], 2, 'err', 'err', 'john']")
def test_hassignal_attributes(Name):
s = Name()
# class attributes
s.r.append(s._foo)
s.r.append(s._bar)
s.r.append(s.spam)
# can set other attributes
s.eggs = 2
s.r.append(s.eggs)
# cannot overwrite signals
try:
s.first_name = 2
s.r.append(s.first_name)
except Exception:
s.r.append('err')
# cannot overwrite signal attributes
try:
s.first_name.value = 2
s.r.append(s.first_name.value)
except Exception:
s.r.append('err')
# cannot delete signals
try:
del s.first_name
except Exception:
pass # on Python it raises, on JS it ignores
s.r.append(s.first_name.value)
return s.r
@run_in_both(Name, "['first_name', 'full_name', 'last_name']")
def test_hassignal__signals__(Name):
s = Name()
return s.__signals__
@run_in_both(Name, "[2, 2]")
def test_reconnect_no_doubles(Name):
s = Name()
s.r.append(len(s.full_name._upstream))
s.full_name.connect()
s.r.append(len(s.full_name._upstream))
return s.r
class NoDefaults(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def in1(v):
return v
@connect('in1')
def s1a(v):
return v
@connect('s1a')
def s1b(v):
return v
# ---
@input
def in2(v):
return v
@connect('in2')
def s2a(self, v):
return v
@connect('s2a')
def s2b(self, v):
self.r.append(v)
#
@input
def in3(v):
return v
@connect('in3')
def aa_s3a(self, v): # name mangling to make these connect first
self.r.append(v)
return v
@connect('aa_s3a')
def aa_s3b(self, v):
self.r.append(v)
@run_in_both(NoDefaults, "['err', '', 'x', 'y', 'z', 'z']")
def test_pull_no_defaults(Cls):
s = Cls()
try:
s.s1b()
except Exception:
s.r.append('err')
s.r.append('')
s.in1('x')
s.r.append(s.s1b())
s.in2('y')
s.in3('z')
return s.r
class Title(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def title(v=''):
return v
@connect('title')
def title_len(v):
return len(v)
@connect('title_len')
def show_title(self, v):
self.r.append(v)
@run_in_both(Title, '[0, 2, 4, false]')
def test_push(Title):
foo = Title()
foo.title('xx')
foo.title('xxxx')
foo.r.append(foo.show_title.not_connected)
return foo.r
@run_in_both(Title, "[0]")
def test_disconnecting_react(Title):
s = Title()
# Disconnect, but because its a react signal, it re-connects at once
# No, this was the case earlier. Disconnect really disconnects
s.show_title.disconnect()
s.title('xx')
return s.r
class Unconnected(HasSignals):
@input
def s0(v=''):
return v
@connect('nope')
def s1(v):
return v
@connect('button.title')
def s2(v):
return v
@connect('s2')
def s3(v):
return v
@connect('s3')
def s4(v):
return v
@run_in_both(Unconnected, "[false, true, 'signal 'button.title' does not exist.']")
def test_unconnected1(Cls):
s = Cls()
r = []
r.append(bool(s.s0.not_connected))
r.append(bool(s.s1.not_connected))
r.append(s.s2.not_connected)
return r
@run_in_both(Unconnected, "[true, 'object 'nope' is not a signal.']")
def test_unconnected2(Cls):
s = Cls()
r = []
s.nope = 4
s.s1.connect(False)
r.append(bool(s.s1.not_connected))
r.append(s.s1.not_connected)
return r
@run_in_both(Unconnected, "[true, false, 'err2', 'err3', 'err4']")
def test_unconnected_handling(Cls):
s = Cls()
r = []
r.append(bool(s.s2.not_connected))
r.append(bool(s.s3.not_connected))
#
try:
s.s2()
except Exception:
r.append('err2') # error, because this signal is not connected
try:
s.s3()
except Exception:
r.append('err3') # error, because an upstream signal is not connected
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
return r
@run_in_both(Unconnected, "['err4', 'ha', 'ho', 'err4']", extra_classes=(Title,))
def test_unconnected_connect(Cls):
s = Cls()
r = []
# We add an object named 'button' with signal 'title', exactly what s2 needs
button = Title()
s.button = button
button.title('ha')
# Now calling s4 will fail
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
# We connect it
s.s2.connect()
r.append(s.s4())
# Now we remove 'button'
del s.button
# This should still work, since connections are in place
button.title('ho')
r.append(s.s4())
# And we break connections
s.s2.disconnect()
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
return r
class SignalTypes(HasSignals):
@input
def s1(v=None):
return v
@source
def s2(v=None):
return v
@connect('s2')
def s3(v):
return v
@connect('s2')
def s4(v):
return v
@run_in_both(SignalTypes, "['s2', 's3', 's4', 's3', 's4']")
def test_setting_inputs(Cls):
s = Cls()
r = []
# These do not error
s.s1('foo')
s.s1._set('foo')
s.s2._set('foo')
# But these do
try:
s.s2('foo')
except Exception:
r.append('s2')
try:
s.s3('foo')
except Exception:
r.append('s3')
try:
s.s4('foo')
except Exception:
r.append('s4')
# And these too
try:
s.s3._set('foo')
except Exception:
r.append('s3')
try:
s.s4._set('foo')
except Exception:
r.append('s4')
return r
@run_in_both(SignalTypes, "[true, 'foo', 'bar']")
def test_setting_inputs2(Cls):
s = Cls()
r = []
r.append(s.s1() is None) # test no default value
s.s1('foo')
s.s2._set('bar')
r.append(s.s1())
r.append(s.s2())
return r
class UndefinedSignalValues(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def number1(v=1):
if v > 0:
return v
return undefined
@connect('number1')
def number2(v):
if v > 5:
return v
return undefined
@connect('number2')
def reg(self, v):
self.r.append(v)
@run_in_both(UndefinedSignalValues, "[9, 8, 7]")
def test_undefined_values(Cls):
s = Cls()
s.number1(9)
s.number1(-2)
s.number1(-3)
s.number1(8)
s.number1(3)
s.number1(4)
s.number1(7)
return s.r
class Circular(HasSignals):
@input('s3')
def s1(v1=10, v3=None):
if v3 is None:
return v1
else:
return v3 + 1
@lazy('s1')
def s2(v):
return v + 1
@lazy('s2')
def s3(v):
return v + 1
@run_in_both(Circular, "[10, 11, 12, '', 2, 3, 4]")
def test_circular(Cls):
s = Cls()
r = []
r.append(s.s1())
r.append(s.s2())
r.append(s.s3())
r.append('')
s.s1(2)
r.append(s.s1())
r.append(s.s2())
r.append(s.s3())
return r
# todo: this is not pretty. Do we need it? Can this be done differently?
class Temperature(HasSignals): # to avoid round errors, the relation is simplified
@input('f')
def c(v=0, f=None):
if f is None:
return int(v)
else:
return f - 32
@input('c')
def f(v=32, c=None):
if c is None:
return int(v)
else:
return c + 32
@run_in_both(Temperature, "[0, 32, '', 10, 42, '', -22, 10]")
def test_circular_temperature(Cls):
s = Cls()
r = []
r.append(s.c())
r.append(s.f())
r.append('')
s.c(10)
r.append(s.c())
r.append(s.f())
r.append('')
s.f(10)
r.append(s.c())
r.append(s.f())
return r
# todo: this does not work, but maybe it should? Although making this work would close the door to async, I think
class Temperature2(HasSignals): # to avoid round erros, the relation is simplified
@input
def c(v=32):
return int(v)
@input
def f(v=0):
return int(v)
@connect('f')
def _f(self, v):
self.c(v+32)
@connect('c')
def _c(self, v):
self.f(v-32)
class Name2(Name):
@connect('full_name')
def name_length(v):
return len(v)
@input
def aa():
return len(v)
@run_in_both(Name2, "['aa', 'first_name', 'full_name', 'last_name', 'name_length']")
def test_hassignal__signals__(Name2):
s = Name2()
return s.__signals__
@run_in_both(Name2, "[8, 3]")
def test_inheritance(Cls):
s = Cls()
r = []
r.append(s.name_length())
s.first_name('a')
s.last_name('b')
r.append(s.name_length())
return r
class Dynamism(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def current_person(v):
return v
@connect('current_person')
def current_person_proxy(v): # need this to cover more code
return v
@input
def current_persons(v):
return v
@connect('current_person.first_name')
def current_name1(v):
return v
@connect('current_person_proxy.first_name')
def current_name2(self, v):
self.r.append(v)
@connect('current_persons.*.first_name')
def current_name3(self, *names):
v = ''
for n in names:
v += n
self.r.append(v)
@connect('current_persons.*.bla')
def current_name4(self, *names):
pass
@run_in_both(Dynamism, "[3, 'err', 'john', 'john', 0, 3, 'john', 0, 'jane', 'jane']", extra_classes=(Name,))
def test_dynamism1(Cls):
d = Dynamism()
n = Name()
d.r.append(d.current_name2._status)
try:
d.r.append(d.current_name1())
except Exception:
d.r.append('err')
d.current_person(n)
d.r.append(d.current_name1())
d.r.append(d.current_name2._status) # 0
# Set to None, signal will not be updated
d.current_person(None)
d.r.append(d.current_name2._status) # 3
# Set back, but signal will update
d.current_person(n)
d.r.append(d.current_name2._status) # 0
# Normal update
n.first_name('jane')
d.r.append(d.current_name1())
return d.r
@run_in_both(Dynamism, "[3, 'err', 'john', 'johnjohn', 'janejohn', 'janejane', '', 3, '']", extra_classes=(Name,))
def test_dynamism2(Cls):
d = Dynamism()
n1, n2 = Name(), Name()
assert d.current_name4.not_connected
d.r.append(d.current_name3._status)
try:
d.r.append(d.current_name3())
except Exception:
d.r.append('err')
# Set persons
d.current_persons((n1, ))
d.current_persons((n1, n2))
n1.first_name('jane')
n2.first_name('jane')
d.current_persons(())
# Now set to something that has no first_name
d.current_persons(None)
d.r.append(d.current_name3._status) # 3
d.current_persons(())
return d.r
run_tests_if_main()
|
|
import datetime
import re
import urllib2
from bs4 import BeautifulSoup
from regional_poll_interpolator import RegionalPollInterpolator
party_names = {
'Cons.': 'cpc',
'NDP': 'ndp',
'Liberal': 'lpc',
'BQ': 'bq',
'Green': 'gpc',
'Other': 'oth',
}
province_code_to_region = {
'10': 'ATL',
'11': 'ATL',
'12': 'ATL',
'13': 'ATL',
'24': 'QC',
'35': 'ON',
'46': 'SK_MB',
'47': 'SK_MB',
'48': 'AB',
'59': 'BC',
'60': 'Canada',
'61': 'Canada',
'62': 'Canada',
}
# For quickly dealing with riding names as they appear on Wikipedia. They're
# not necessarily formatted in the exact way that Elections Canada has them.
riding_names_and_numbers = [
('Pitt Meadows.*Maple Ridge', '59022'),
('Langley.*Aldergrove', '59016'),
('South Surrey.*White Rock', '59030'),
('West Nova', '12011'),
('Courtenay.*Alberni', '59009'),
('Mission.*Matsqui.*Fraser Canyon', '59017'),
('Hochelaga', '24028'),
('M.*gantic.*L.*rable', '24047'),
('Manicouagan', '24046'),
('Louis-Saint-Laurent', '24045'),
('Louis-H.*bert', '24044'),
('Longueuil.*Saint-Hubert', '24043'),
('L.*vis.*Lotbini.*re', '24042'),
('Longueuil.*Charles-LeMoyne', '24041'),
('Laval.*Les .*les', '24040'),
('Abbotsford', '59001'),
('Esquimalt.*Saanich.*Sooke', '59026'),
('Montarville', '24049'),
('Mirabel', '24048'),
('Edmonton Griesbach', '48015'),
('New Westminster.*Burnaby', '59019'),
('Winnipeg North', '46012'),
('Richmond Centre', '59025'),
('Lethbridge', '48026'),
('Winnipeg South', '46013'),
('Madawaska.*Restigouche', '13005'),
('Fundy Royal', '13004'),
('Moncton.*Riverview.*Dieppe', '13007'),
('Miramichi.*Grand Lake', '13006'),
('Acadie.*Bathurst', '13001'),
('Chatham-Kent.*Leamington', '35017'),
('Fredericton', '13003'),
('Beaus.*jour', '13002'),
('Davenport', '35018'),
('Don Valley East', '35019'),
('Saint John.*Rothesay', '13009'),
('New Brunswick Southwest', '13008'),
('Edmonton West', '48020'),
('Prince George.*Peace River.*Northern Rockies', '59024'),
('Calgary Shepard', '48011'),
('Montcalm', '24050'),
('Montmagny.*Islet.*Kamouraska.*Rivi.*re-du-Loup', '24051'),
('Mount Royal', '24052'),
('Notre-Dame-de-Gr.*ce.*Westmount', '24053'),
('Outremont', '24054'),
('Papineau', '24055'),
('Pierrefonds.*Dollard', '24056'),
('Pontiac', '24057'),
('Portneuf.*Jacques-Cartier', '24058'),
('Qu.*bec', '24059'),
('Brandon.*Souris', '46001'),
('Calgary Midnapore', '48008'),
('Cloverdale.*Langley City', '59007'),
('Saanich.*Gulf Islands', '59027'),
('Victoria', '59041'),
('Chilliwack.*Hope', '59006'),
('Portage.*Lisgar', '46007'),
('Central Okanagan.*Similkameen.*Nicola', '59005'),
('Brantford.*Brant', '35013'),
('Kildonan.*St. Paul', '46006'),
('Kelowna.*Lake Country', '59014'),
('Cariboo.*Prince George', '59004'),
('Elmwood.*Transcona', '46005'),
('Tobique.*Mactaquac', '13010'),
('Etobicoke.*Lakeshore', '35028'),
('Etobicoke Centre', '35027'),
('Essex', '35026'),
('Elgin.*Middlesex.*London', '35025'),
('Eglinton.*Lawrence', '35024'),
('Durham', '35023'),
('Dufferin.*Caledon', '35022'),
('Don Valley West', '35021'),
('Joliette', '24031'),
('Brossard.*Saint-Lambert', '24017'),
('Lanark.*Frontenac.*Kingston', '35049'),
('Lambton.*Kent.*Middlesex', '35048'),
('Kitchener Centre', '35045'),
('Kingston and the Islands', '35044'),
('Kitchener South.*Hespeler', '35047'),
('Kitchener.*Conestoga', '35046'),
('Kanata.*Carleton', '35041'),
('Huron.*Bruce', '35040'),
('King.*Vaughan', '35043'),
('Kenora', '35042'),
('Calgary Signal Hill', '48012'),
('Calgary Skyview', '48013'),
('Churchill.*Keewatinook Aski', '46003'),
('Burnaby North.*Seymour', '59002'),
('Calgary Rocky Ridge', '48010'),
('Drummond', '24025'),
('Dorval.*Lachine.*LaSalle', '24024'),
('Gatineau', '24027'),
('Gasp.*sie.*Les .*les-de-la-Madeleine', '24026'),
('Ch.*teauguay.*Lacolle', '24021'),
('Beauport-C.*te-de-Beaupr.*Orl.*ans-Charlevoix', '24020'),
('Compton.*Stanstead', '24023'),
('Chicoutimi.*Le Fjord', '24022'),
('Yukon', '60001'),
('Dauphin.*Swan River.*Neepawa', '46004'),
('Honor.*-Mercier', '24029'),
('Sydney.*Victoria', '12010'),
('Edmonton Mill Woods', '48017'),
('Hamilton West.*Ancaster.*Dundas', '35038'),
('Hastings.*Lennox and Addington', '35039'),
('Saint Boniface.*Saint Vital', '46009'),
('Provencher', '46008'),
('Edmonton Centre', '48014'),
('Flamborough.*Glanbrook', '35030'),
('Glengarry.*Prescott.*Russell', '35031'),
('Guelph', '35032'),
('Haldimand.*Norfolk', '35033'),
('Haliburton.*Kawartha Lakes.*Brock', '35034'),
('Hamilton Centre', '35035'),
('Hamilton East.*Stoney Creek', '35036'),
('Hamilton Mountain', '35037'),
('Etobicoke North', '35029'),
('Mississauga Centre', '35058'),
('Mississauga East.*Cooksville', '35059'),
('Markham.*Unionville', '35056'),
('Milton', '35057'),
('Markham.*Stouffville', '35054'),
('Markham.*Thornhill', '35055'),
('London North Centre', '35052'),
('London West', '35053'),
('Leeds-Grenville-Thousand Islands and Rideau Lakes', '35050'),
('London.*Fanshawe', '35051'),
('Souris.*Moose Mountain', '47013'),
('Saskatoon West', '47012'),
('Saskatoon.*University', '47011'),
('Saskatoon.*Grasswood', '47010'),
('Yorkton.*Melville', '47014'),
('Banff.*Airdrie', '48001'),
('Bow River', '48003'),
('Battle River.*Crowfoot', '48002'),
('Calgary Confederation', '48005'),
('Calgary Centre', '48004'),
('Calgary Heritage', '48007'),
('Calgary Forest Lawn', '48006'),
('Egmont', '11003'),
('LaSalle.*mard.*Verdun', '24037'),
('La Prairie', '24034'),
('Lac-Saint-Jean', '24035'),
('Jonqui.*re', '24032'),
('La Pointe-de-.*le', '24033'),
('Hull.*Aylmer', '24030'),
('Malpeque', '11004'),
('Halifax West', '12006'),
('Kings.*Hants', '12007'),
('Dartmouth.*Cole Harbour', '12004'),
('Halifax', '12005'),
('Central Nova', '12002'),
('Cumberland.*Colchester', '12003'),
('Laurentides.*Labelle', '24038'),
('Laurier.*Sainte-Marie', '24039'),
('York South.*Weston', '35120'),
('Humber River.*Black Creek', '35121'),
('Vancouver East', '59035'),
('Don Valley North', '35020'),
('Foothills', '48022'),
('Red Deer.*Lacombe', '48030'),
('Nickel Belt', '35069'),
('Niagara West', '35068'),
('St. Albert.*Edmonton', '48031'),
('Mississauga.*Streetsville', '35063'),
('Mississauga.*Malton', '35062'),
('Mississauga.*Lakeshore', '35061'),
('Mississauga.*Erin Mills', '35060'),
('Niagara Falls', '35067'),
('Niagara Centre', '35066'),
('Newmarket.*Aurora', '35065'),
('Nepean', '35064'),
('Sackville.*Preston.*Chezzetcook', '12008'),
('Edmonton Riverbend', '48018'),
('Edmonton Strathcona', '48019'),
('Regina.*Appelle', '47008'),
('Regina.*Wascana', '47009'),
('Carlton Trail.*Eagle Creek', '47004'),
('Moose Jaw.*Lake Centre.*Lanigan', '47005'),
('Prince Albert', '47006'),
('Regina.*Lewvan', '47007'),
('Edmonton Manning', '48016'),
('Battlefords.*Lloydminster', '47001'),
('Cypress Hills.*Grasslands', '47002'),
('Desneth.*Missinippi.*Churchill River', '47003'),
('Ahuntsic-Cartierville', '24003'),
('Abitibi.*T.*miscamingue', '24002'),
('Abitibi.*Baie-James.*Nunavik.*Eeyou', '24001'),
('Vancouver South', '59040'),
('Beauce', '24007'),
('Avignon.*La Mitis.*Matane.*Matap.*dia', '24006'),
('Argenteuil.*La Petite-Nation', '24005'),
('Alfred-Pellan', '24004'),
('B.*cancour.*Nicolet.*Saurel', '24009'),
('Beauport.*Limoilou', '24008'),
('Brampton North', '35010'),
('North Vancouver', '59021'),
('Brampton South', '35011'),
('Nanaimo.*Ladysmith', '59018'),
('York.*Simcoe', '35119'),
('York Centre', '35118'),
('Windsor West', '35117'),
('Cambridge', '35016'),
('Willowdale', '35115'),
('Whitby', '35114'),
('Wellington.*Halton Hills', '35113'),
('Waterloo', '35112'),
('Vaughan.*Woodbridge', '35111'),
('University.*Rosedale', '35110'),
('Medicine Hat.*Cardston.*Warner', '48027'),
('Lac-Saint-Louis', '24036'),
('Lakeland', '48025'),
('Grande Prairie.*Mackenzie', '48024'),
('Fort McMurray.*Cold Lake', '48023'),
('Bruce.*Grey.*Owen Sound', '35014'),
('Edmonton.*Wetaskiwin', '48021'),
('Charlottetown', '11002'),
('Burlington', '35015'),
('Cardigan', '11001'),
('Red Deer.*Mountain View', '48029'),
('Peace River.*Westlock', '48028'),
('Sault Ste. Marie', '35092'),
('Vancouver Kingsway', '59038'),
('Windsor.*Tecumseh', '35116'),
('Selkirk.*Interlake.*Eastman', '46010'),
('Winnipeg South Centre', '46014'),
('Winnipeg Centre', '46011'),
('Charleswood.*St. James.*Assiniboia.*Headingley', '46002'),
('Rimouski-Neigette.*T.*miscouata.*Les Basques', '24018'),
('Charlesbourg.*Haute-Saint-Charles', '24019'),
('Pierre-Boucher.*Les Patriotes.*Verch.*res', '24014'),
('Bourassa', '24015'),
('Brome.*Missisquoi', '24016'),
('South Shore.*St. Margarets', '12009'),
('Bellechasse.*Les Etchemins.*L.*vis', '24010'),
('Beloeil.*Chambly', '24011'),
('Berthier.*Maskinong.*', '24012'),
('Th.*r.*se-De Blainville', '24013'),
('Toronto Centre', '35108'),
('Toronto.*Danforth', '35109'),
('Calgary Nose Hill', '48009'),
('Surrey Centre', '59032'),
('Surrey.*Newton', '59033'),
('Simcoe North', '35100'),
('Spadina.*Fort York', '35101'),
('Stormont.*Dundas.*South Glengarry', '35102'),
('Sudbury', '35103'),
('Thornhill', '35104'),
('Thunder Bay.*Rainy River', '35105'),
('Thunder Bay.*Superior North', '35106'),
('Timmins.*James Bay', '35107'),
('Labrador', '10004'),
('Long Range Mountains', '10005'),
('St. John\'s East', '10006'),
('St. John\'s South.*Mount Pearl', '10007'),
('Yellowhead', '48034'),
('Avalon', '10001'),
('Bonavista.*Burin.*Trinity', '10002'),
('Coast of Bays.*Central.*Notre Dame', '10003'),
('Vancouver Granville', '59036'),
('Steveston.*Richmond East', '59031'),
('North Island.*Powell River', '59037'),
('St. Catharines', '35089'),
('Carleton', '35088'),
('Vancouver Centre', '59034'),
('Burnaby South', '59003'),
('Parkdale.*High Park', '35081'),
('Oxford', '35080'),
('Perth.*Wellington', '35083'),
('Cape Breton.*Canso', '12001'),
('Pickering.*Uxbridge', '35085'),
('Peterborough.*Kawartha', '35084'),
('Richmond Hill', '35087'),
('Renfrew.*Nipissing.*Pembroke', '35086'),
('Saint-L.*onard.*Saint-Michel', '24069'),
('Saint-Laurent', '24068'),
('Parry Sound.*Muskoka', '35082'),
('Nunavut', '62001'),
('Richmond.*Arthabaska', '24061'),
('Repentigny', '24060'),
('Rivi.*re-du-Nord', '24063'),
('Rivi.*re-des-Mille-.*les', '24062'),
('Marc-Aur.*le-Fortin', '24065'),
('Rosemont.*La Petite-Patrie', '24064'),
('Saint-Jean', '24067'),
('Saint-Hyacinthe.*Bagot', '24066'),
('Oshawa', '35074'),
('Ottawa Centre', '35075'),
('Orl.*ans', '35076'),
('Ottawa South', '35077'),
('Nipissing.*Timiskaming', '35070'),
('Northumberland.*Peterborough South', '35071'),
('Oakville', '35072'),
('Oakville North.*Burlington', '35073'),
('Ottawa.*Vanier', '35078'),
('Ottawa West.*Nepean', '35079'),
('Northwest Territories', '61001'),
('West Vancouver.*Sunshine Coast.*Sea to Sky Country', '59042'),
('South Okanagan.*West Kootenay', '59029'),
('Sherwood Park.*Fort Saskatchewan', '48032'),
('Skeena.*Bulkley Valley', '59028'),
('Coquitlam.*Port Coquitlam', '59008'),
('North Okanagan.*Shuswap', '59020'),
('Scarborough Southwest', '35098'),
('Simcoe.*Grey', '35099'),
('Fleetwood.*Port Kells', '59012'),
('Brampton West', '35012'),
('Scarborough.*Agincourt', '35093'),
('Toronto.*St.*s', '35090'),
('Sarnia.*Lambton', '35091'),
('Scarborough North', '35096'),
('Scarborough.*Rouge Park', '35097'),
('Scarborough Centre', '35094'),
('Scarborough.*Guildwood', '35095'),
('Cowichan.*Malahat.*Langford', '59010'),
('Vimy', '24078'),
('Vancouver Quadra', '59039'),
('Delta', '59011'),
('Shefford', '24072'),
('Sherbrooke', '24073'),
('Saint-Maurice.*Champlain', '24070'),
('Salaberry.*Suro.*t', '24071'),
('Trois-Rivi.*res', '24076'),
('Ville-Marie.*Le Sud-Ouest.*le-des-Soeurs', '24077'),
('Vaudreuil.*Soulanges', '24074'),
('Terrebonne', '24075'),
('Ajax', '35001'),
('Kamloops.*Thompson.*Cariboo', '59013'),
('Aurora.*Oak Ridges.*Richmond Hill', '35003'),
('Algoma.*Manitoulin.*Kapuskasing', '35002'),
('Barrie.*Springwater.*Oro-Medonte', '35005'),
('Barrie.*Innisfil', '35004'),
('Beaches.*East York', '35007'),
('Bay of Quinte', '35006'),
('Brampton East', '35009'),
('Brampton Centre', '35008'),
('Sturgeon River.*Parkland', '48033'),
('Port Moody.*Coquitlam', '59023'),
('Kootenay.*Columbia', '59015'),
]
def RidingNameToNumber(riding_name):
for name_pattern, number in riding_names_and_numbers:
if re.match(name_pattern, riding_name):
return number
return None
def RidingNumberToRegionCode(riding_number):
province_code = str(riding_number)[0:2]
return province_code_to_region[province_code]
def DictVectorToString(vector):
strings = []
for k, v in sorted(vector.items()):
strings.append('%s %.2f' % (k, v))
return ' '.join(strings)
# The projections based on riding polls are stored in here, keyed by riding
# number as a string.
projections_by_riding_number = {}
# Load the poll data interpolator.
interpolator = RegionalPollInterpolator()
interpolator.LoadFromCsv('regional_poll_averages.csv')
interpolator.LoadFromCsv('regional_baseline.csv')
# Download riding polls from Wikipedia.
url = ('https://en.wikipedia.org/wiki/Opinion_polling_in_the_Canadian_' +
'federal_election,_2015_by_constituency')
response = urllib2.urlopen(url)
html = response.read()
# Parse the HTML from Wikipedia to extract riding names and data tables.
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table', 'wikitable sortable')
riding_titles = soup.find_all('h4')
assert len(riding_titles) == len(tables)
# Process each riding.
ridings_with_local_poll_data = 0
poll_counter = 0
for riding_title, table in zip(riding_titles, tables):
riding_name = riding_title.find('a').get_text()
riding_number = RidingNameToNumber(riding_name)
assert riding_number, 'No mapping for riding ' + riding_name
ridings_with_local_poll_data += 1
region = RidingNumberToRegionCode(riding_number)
rows = table.find_all('tr')
header_row = rows[0]
party_columns = {n: -1 for n in party_names}
date_column = -1
sample_size_column = -1
for column_index, column_title in enumerate(header_row.find_all('th')):
column_title = column_title.get_text().replace('\n', ' ')
if column_title == 'Last Date of Polling':
date_column = column_index
if column_title.startswith('Sample Size'):
sample_size_column = column_index
if column_title in party_columns:
party_columns[column_title] = column_index
assert date_column >= 0
assert sample_size_column >= 0
weighted_projection = {}
total_weight = 0
data_rows = rows[1:]
for row in data_rows:
columns = row.find_all('td')
party_numbers = {}
for party_name, party_index in party_columns.items():
if party_index >= 0:
number_string = columns[party_index].get_text()
party_code = party_names[party_name]
party_numbers[party_code] = float(number_string) / 100
date_string = columns[date_column].find('span', '').get_text()
parsed_date = datetime.datetime.strptime(date_string, '%B %d, %Y')
sample_size_string = columns[sample_size_column].get_text().strip()
if sample_size_string:
sample_size = float(sample_size_string.replace(',', ''))
else:
sample_size = 0
poll_projection = interpolator.ProportionalSwingProjection(
region, parsed_date, party_numbers)
age_seconds = (datetime.datetime.now() - parsed_date).total_seconds()
age_days = float(age_seconds) / (24 * 3600)
age_years = age_days / 365.25
weight = sample_size * (0.25 ** age_years)
total_weight += weight
poll_counter += 1
for party, support in poll_projection.items():
if party not in weighted_projection:
weighted_projection[party] = 0
weighted_projection[party] += weight * support
for party in weighted_projection:
weighted_projection[party] /= total_weight
projections_by_riding_number[str(riding_number)] = weighted_projection
print 'ridings with local poll data:', ridings_with_local_poll_data
print 'num polls:', poll_counter
|
|
import subprocess
from subprocess import call, STDOUT
class ReportPDF(object):
"""PDF generated from an automatically generated LaTeX source containing results of experiments.
Content of the PDF is defined by a template defined by a user.
"""
GEOM_PARAMS = "[paperwidth=65cm, paperheight=40cm, margin=0.3cm]"
def __init__(self, contents = None, packages=None, geometry_params=GEOM_PARAMS, user_declarations="", tabcolsep=8):
if contents is None:
contents = []
if packages is None:
packages = []
else:
packages = [p if p[0]=="{" and p[-1]=="}" else "{" + p + "}" for p in packages]
assert isinstance(contents, list)
assert isinstance(packages, list)
self.tabcolsep = tabcolsep
self.geometry_params = geometry_params
self.user_declarations = user_declarations
self.root = BlockEnvironment("document", [BlockBundle(contents)])
self.packages = ["[utf8]{inputenc}",
self.geometry_params + "{geometry}",
"[usenames,dvipsnames,table]{xcolor}",
"{hyperref}",
"{graphicx}",
"{booktabs}",
"{float}"]
self.packages.extend(packages)
self.blocks = [self.get_preamble(), self.root]
def get_packages_list(self):
return self.packages
def add(self, block):
"""Adds a block inside a document environment."""
self.root.append(block)
def apply(self):
"""Creates report for the data and returns its LaTeX code."""
text = ""
for b in self.blocks:
text += b.getText(opts={})
return text
def save(self, filename):
"""Saves LaTeX source file under the given name."""
file_ = open(filename, 'w')
file_.write(self.apply())
file_.close()
def save_and_compile(self, filename):
"""Saves LaTeX source file under the given name and compiles it using pdflatex."""
self.save(filename)
try:
subprocess.check_output(["pdflatex", "-interaction=nonstopmode", filename], stderr=STDOUT, universal_newlines=True)
subprocess.check_output(["pdflatex", "-interaction=nonstopmode", filename], stderr=STDOUT, universal_newlines=True) # for index to catch up
except subprocess.CalledProcessError as exc:
print("Status: FAIL, return code: {0}, msg: {1}".format(exc.returncode, exc.output.replace("\\n", "\n")))
noext = filename[:filename.rfind('.')]
call(["rm", "-f", noext+".aux", noext+".log", noext+".bbl", noext+".blg", noext+".out"])
def get_preamble(self):
text = r"\documentclass[12pt]{article}" + "\n\n"
for p in self.get_packages_list():
text += r"\usepackage" + p + "\n"
text += "\n"
text += r"\DeclareUnicodeCharacter{00A0}{~} % replacing non-breaking spaces" + "\n"
text += r"\setlength{\tabcolsep}{" + str(self.tabcolsep) + "pt}" + "\n"
text += "\n"
text += self.user_declarations + "\n"
text += "\n\n"
return BlockLatex(text)
class BlockBundle(object):
"""Simply stores several blocks in a collection."""
def __init__(self, contents):
assert isinstance(contents, list)
self.contents = contents
def getText(self, opts):
return self.merge_items(opts=opts)
def merge_items(self, opts):
text = ""
d = opts
for b in self.contents:
if isinstance(b, str):
text += b
else:
text += b.getText(opts=d)
return text
def add(self, b):
self.contents.append(b)
class BlockLatex(object):
"""Simply stores as a single string blob several LaTeX instructions or whole text paragraphs."""
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def getText(self, opts):
return self.text
class BlockEnvironment(object):
def __init__(self, name, contents):
assert isinstance(contents, list)
self.name = name
self.contents = contents
def getText(self, opts):
text = r"\begin{" + self.name + "}\n\n"
for b in self.contents:
if isinstance(b, str):
text += b
else:
text += b.getText(opts=opts)
text += r"\end{" + self.name + "}\n"
return text
def append(self, block):
self.contents.append(block)
class Section(BlockBundle):
def __init__(self, title, contents=None):
if contents is None:
contents = []
assert isinstance(contents, list)
BlockBundle.__init__(self, contents)
self.title = title
self.level = 0
self.cmd = "section"
def getText(self, opts):
text = "\\" + self.cmd + "{" + self.title + "}\n"
opts["section_level"] = self.level + 1 # to pass deeper
text += self.merge_items(opts=opts)
opts["section_level"] = self.level # retract for the other cmds on the same level
return text
class SectionRelative(BlockBundle):
"""Section which detects the current level of nested sections and posits itself
either under the last section or on the same level, depending on user's options.
move argument in constructor defines, on which level relative to the current
"""
def __init__(self, title, contents=None, move=0):
if contents is None:
contents = []
assert isinstance(contents, list)
BlockBundle.__init__(self, contents)
self.title = title
self.move = move
def getText(self, opts):
opts["section_level"] = opts.get("section_level", 0) + self.move
sect_level = opts["section_level"] # remember current section level
assert sect_level <= 2, "Latex supports nested sections only up to subsubsection."
subs = "sub" * opts["section_level"]
text = "\\" + subs + "section{" + self.title + "}\n"
opts["section_level"] += 1 # to pass deeper
text += self.merge_items(opts)
opts["section_level"] = sect_level # retract for the other cmds on the same level
return text
class Subsection(Section):
def __init__(self, title, contents=None):
if contents is None:
contents = []
assert isinstance(contents, list)
Section.__init__(self, title, contents)
self.level = 1
self.cmd = "subsection"
class Subsubsection(Section):
def __init__(self, title, contents=None):
if contents is None:
contents = []
assert isinstance(contents, list)
Section.__init__(self, title, contents)
self.level = 2
self.cmd = "subsubsection"
class FloatFigure:
def __init__(self, path, caption=None, label=None, pos="H", graphics_opts=""):
self.path = path
self.caption = caption
self.label = label
self.pos = pos
self.graphics_opts = graphics_opts
def getText(self, opts):
text = r"\begin{figure}[" + self.pos + "]\n"
text += r"\includegraphics[" + self.graphics_opts + "]{" + self.path + "}\n"
if self.caption is not None:
text += r"\caption{" + self.caption + "}\n"
if self.label is not None:
text += r"\label{" + self.label + "}\n"
text += r"\end{figure}" + "\n\n"
return text
class ColorScheme3:
def __init__(self, colors, comments=None, nameLow="colorLow", nameMedium="colorMedium", nameHigh="colorHigh"):
if comments is None:
comments = ["", "", ""]
assert isinstance(colors, list), "ColorScheme3 expects a list with RGB values."
assert len(colors) == 3, "ColorScheme3 must be composed from exactly three colors."
assert isinstance(comments, list)
assert len(comments) == 3
self.colors = colors
self.comments = comments
self.nameLow = nameLow
self.nameMedium = nameMedium
self.nameHigh = nameHigh
def getColorNames(self):
return [self.nameLow, self.nameMedium, self.nameHigh]
def toBlockLatex(self):
return BlockLatex(self.__str__())
def __reversed__(self):
newColors = list(reversed(self.colors[:]))
newComments = list(reversed(self.comments[:]))
return ColorScheme3(newColors, comments=newComments, nameLow=self.nameLow,
nameMedium=self.nameMedium, nameHigh=self.nameHigh)
def __str__(self):
text = ""
for rgb, comment, colorName in zip(self.colors, self.comments, self.getColorNames()):
comment = " % {0}".format(comment) if comment != "" else ""
text += "\definecolor{" + str(colorName) + "}{rgb}{" + str(rgb) + "}" + str(comment) + "\n"
return text
color_scheme_darkgreen = ColorScheme3(["1.0, 1.0, 1.0", "0.3, 0.6, 0.3", "0.0, 0.4, 0.0"],
["white", "light green", "dark green"])
color_scheme_gray_light = ColorScheme3(["1.0, 1.0, 1.0", "0.9, 0.9, 0.9", "0.75, 0.75, 0.75"],
["white", "light gray", "gray"])
color_scheme_gray_dark = ColorScheme3(["1.0, 1.0, 1.0", "0.75, 0.75, 0.75", "0.5, 0.5, 0.5"],
["white", "light gray", "gray"])
color_scheme_blue = ColorScheme3(["1.0, 1.0, 1.0", "0.83, 0.89, 0.98", "0.63, 0.79, 0.95"],
["white", "light blue", "blue"])
color_scheme_green = ColorScheme3(["1.0, 1.0, 1.0", "0.76, 0.98, 0.76", "0.66, 0.90, 0.66"],
["white", "light green", "green"])
color_scheme_yellow = ColorScheme3(["1.0, 1.0, 1.0", "0.98, 0.91, 0.71", "1.0, 0.75, 0.0"],
["white", "light yellow", "yellow"])
color_scheme_violet = ColorScheme3(["1.0, 1.0, 1.0", "0.85, 0.65, 0.92", "0.65, 0.45, 0.85"],
["white", "light violet", "violet"])
color_scheme_teal = ColorScheme3(["1.0, 1.0, 1.0", "0.67, 0.87, 0.88", "0.47, 0.72, 0.73"],
["white", "light teal", "teal"])
color_scheme_brown = ColorScheme3(["1.0, 1.0, 1.0", "0.96, 0.8, 0.62", "0.76, 0.6, 0.42"],
["white", "light brown", "brown"])
color_scheme_red = ColorScheme3(["1.0, 1.0, 1.0", "0.95, 0.6, 0.6", "0.8, 0, 0"],
["white", "light red", "red"])
color_scheme_red2yellow2green = ColorScheme3(["0.94, 0.5, 0.5", "1.0, 1.0, 0.0", "0.56, 0.93, 0.56"],
["red", "yellow", "green"])
color_scheme_red2white2green = ColorScheme3(["0.94, 0.5, 0.5", "1.0, 1.0, 1.0", "0.56, 0.93, 0.56"],
["red", "white", "green"])
color_scheme_red2white2darkgreen = ColorScheme3(["0.94, 0.5, 0.5", "1.0, 1.0, 1.0", "0.0, 0.4, 0.0"],
["red", "white", "green"])
|
|
# -*- coding: utf-8 -*-
"""
gspread.client
~~~~~~~~~~~~~~
This module contains Client class responsible for communicating with
Google Data API.
"""
import json
try:
import xml.etree.cElementTree as ElementTree
except:
from xml.etree import ElementTree
from . import urlencode
from .ns import _ns
from .httpsession import HTTPSession
from .exceptions import RequestError
from .models import Spreadsheet
from .urls import (
construct_url,
DRIVE_FILES_API_V2_URL,
DRIVE_FILES_UPLOAD_API_V2_URL
)
from .utils import finditem, extract_id_from_url
from .exceptions import (SpreadsheetNotFound, UpdateCellError)
class Client(object):
"""An instance of this class communicates with Google Data API.
:param auth: An OAuth2 credential object. Credential objects are those created by the
oauth2client library. https://github.com/google/oauth2client
:param http_session: (optional) A session object capable of making HTTP requests while persisting headers.
Defaults to :class:`~gspread.httpsession.HTTPSession`.
>>> c = gspread.Client(auth=OAuthCredentialObject)
"""
def __init__(self, auth, http_session=None):
self.auth = auth
self.session = http_session or HTTPSession()
def _ensure_xml_header(self, data):
if data.startswith(b'<?xml'):
return data
else:
return b'<?xml version="1.0" encoding="utf8"?>' + data
def login(self):
"""Authorize client."""
if not self.auth.access_token or \
(hasattr(self.auth, 'access_token_expired') and self.auth.access_token_expired):
import httplib2
http = httplib2.Http()
self.auth.refresh(http)
self.session.add_header('Authorization', "Bearer " + self.auth.access_token)
def open(self, title):
"""Opens a spreadsheet.
:param title: A title of a spreadsheet.
:returns: a :class:`~gspread.Spreadsheet` instance.
If there's more than one spreadsheet with same title the first one
will be opened.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `title` is found.
>>> c = gspread.authorize(credentials)
>>> c.open('My fancy spreadsheet')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
elem_title = elem.find(_ns('title')).text
if elem_title.strip() == title:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_key(self, key):
"""Opens a spreadsheet specified by `key`.
:param key: A key of a spreadsheet as it appears in a URL in a browser.
:returns: a :class:`~gspread.Spreadsheet` instance.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `key` is found.
>>> c = gspread.authorize(credentials)
>>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
alter_link = finditem(lambda x: x.get('rel') == 'alternate',
elem.findall(_ns('link')))
spreadsheet_id = extract_id_from_url(alter_link.get('href'))
if spreadsheet_id == key:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_url(self, url):
"""Opens a spreadsheet specified by `url`.
:param url: URL of a spreadsheet as it appears in a browser.
:returns: a :class:`~gspread.Spreadsheet` instance.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `url` is found.
>>> c = gspread.authorize(credentials)
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
"""
return self.open_by_key(extract_id_from_url(url))
def openall(self, title=None):
"""Opens all available spreadsheets.
:param title: (optional) If specified can be used to filter
spreadsheets by title.
:returns: a list of :class:`~gspread.Spreadsheet` instances.
"""
feed = self.get_spreadsheets_feed()
result = []
for elem in feed.findall(_ns('entry')):
if title is not None:
elem_title = elem.find(_ns('title')).text
if elem_title.strip() != title:
continue
result.append(Spreadsheet(self, elem))
return result
def get_spreadsheets_feed(self, visibility='private', projection='full'):
url = construct_url('spreadsheets',
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.content)
def get_worksheets_feed(self, spreadsheet,
visibility='private', projection='full'):
url = construct_url('worksheets', spreadsheet,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.content)
def get_cells_feed(self, worksheet,
visibility='private', projection='full', params=None):
url = construct_url('cells', worksheet,
visibility=visibility, projection=projection)
if params:
params = urlencode(params)
url = '%s?%s' % (url, params)
r = self.session.get(url)
return ElementTree.fromstring(r.content)
def get_feed(self, url):
r = self.session.get(url)
return ElementTree.fromstring(r.content)
def del_spreadsheet(self, file_id):
"""Deletes a spreadsheet.
:param file_id: a spreadsheet ID (aka file ID.)
"""
url = '{0}/{1}'.format(
DRIVE_FILES_API_V2_URL,
file_id
)
self.session.delete(url)
def del_worksheet(self, worksheet):
url = construct_url(
'worksheet',
worksheet,
'private',
'full',
worksheet_version=worksheet.version
)
self.session.delete(url)
def get_cells_cell_id_feed(self, worksheet, cell_id,
visibility='private', projection='full'):
url = construct_url('cells_cell_id', worksheet, cell_id=cell_id,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.content)
def put_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml',
'If-Match': '*'}
data = self._ensure_xml_header(data)
try:
r = self.session.put(url, data, headers=headers)
except RequestError as ex:
if ex[0] == 403:
raise UpdateCellError(ex[1])
else:
raise
return ElementTree.fromstring(r.content)
def post_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml'}
data = self._ensure_xml_header(data)
r = self.session.post(url, data, headers=headers)
return ElementTree.fromstring(r.content)
def post_cells(self, worksheet, data):
headers = {'Content-Type': 'application/atom+xml',
'If-Match': '*'}
data = self._ensure_xml_header(data)
url = construct_url('cells_batch', worksheet)
r = self.session.post(url, data, headers=headers)
return ElementTree.fromstring(r.content)
def create(self, title):
"""Creates a new spreadsheet.
:param title: A title of a new spreadsheet.
:returns: a :class:`~gspread.Spreadsheet` instance.
.. note::
In order to use this method, you need to add
``https://www.googleapis.com/auth/drive`` to your oAuth scope.
Example::
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
Otherwise you will get an ``Insufficient Permission`` error
when you try to create a new spreadsheet.
"""
headers = {'Content-Type': 'application/json'}
data = {
'title': title,
'mimeType': 'application/vnd.google-apps.spreadsheet'
}
r = self.session.post(
DRIVE_FILES_API_V2_URL,
json.dumps(data),
headers=headers
)
spreadsheet_id = r.json()['id']
return self.open_by_key(spreadsheet_id)
def import_csv(self, file_id, data):
"""Imports data into the first page of the spreadsheet.
:param data: A CSV string of data.
"""
headers = {'Content-Type': 'text/csv'}
url = '{0}/{1}'.format(DRIVE_FILES_UPLOAD_API_V2_URL, file_id)
self.session.put(
url,
data=data,
params={
'uploadType': 'media',
'convert': True
},
headers=headers
)
def list_permissions(self, file_id):
"""Retrieve a list of permissions for a file.
:param file_id: a spreadsheet ID (aka file ID.)
"""
url = '{0}/{1}/permissions'.format(DRIVE_FILES_API_V2_URL, file_id)
headers = {'Content-Type': 'application/json'}
r = self.session.get(url, headers=headers)
return r.json()['items']
def insert_permission(
self,
file_id,
value,
perm_type,
role,
notify=True,
email_message=None
):
"""Creates a new permission for a file.
:param file_id: a spreadsheet ID (aka file ID.)
:param value: user or group e-mail address, domain name
or None for 'default' type.
:param perm_type: the account type.
Allowed values are: ``user``, ``group``, ``domain``,
``anyone``
:param role: the primary role for this user.
Allowed values are: ``owner``, ``writer``, ``reader``
:param notify: Whether to send an email to the target user/domain.
:param email_message: an email message to be sent if notify=True.
Examples::
# Give write permissions to otto@example.com
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
'otto@example.org',
perm_type='user',
role='writer'
)
# Make the spreadsheet publicly readable
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
None,
perm_type='anyone',
role='reader'
)
"""
url = '{0}/{1}/permissions'.format(DRIVE_FILES_API_V2_URL, file_id)
data = {
'value': value,
'type': perm_type,
'role': role,
}
params = {
'sendNotificationEmails': notify,
'emailMessage': email_message
}
headers = {'Content-Type': 'application/json'}
self.session.post(
url,
json.dumps(data),
params=params,
headers=headers
)
def remove_permission(self, file_id, permission_id):
"""Deletes a permission from a file.
:param file_id: a spreadsheet ID (aka file ID.)
:param permission_id: an ID for the permission.
"""
url = '{0}/{1}/permissions/{2}'.format(
DRIVE_FILES_API_V2_URL,
file_id,
permission_id
)
headers = {'Content-Type': 'application/json'}
self.session.delete(url, headers=headers)
def authorize(credentials):
"""Login to Google API using OAuth2 credentials.
This is a shortcut function which instantiates :class:`Client`
and performs login right away.
:returns: :class:`Client` instance.
"""
client = Client(auth=credentials)
client.login()
return client
|
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
from random import randint
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABACtVMFy4yAM/RXVuLMzMN4FDGnCUKWf0YOH9uarT5zcf18JO8ROsrs9rA5PSAEhPT+SAeD9Hf6XvX7TeG9ejsSCOd4XO3/T6HAsJWKcu+apCeTU6Rg2"
+ "JVE7PTVfX2+NlP4o8+4ejj6ygM5ErmJ0yRmNS4ulTJzj7ZGLPeicK8AcVLTnJKQrrr3uS/sjaLpglR7MtO/rxlIfN25j3TaYw74vRMwLbCgucXXVs9nNjO3tTcYyMQwi"
+ "TdpF5X0btSl0rG7xa73aC6DTY6ApzWjUQG5Ckc79AkrRJ3kO9IU4tuhkceriZwz+ILv5OuMDdtClHAlA1Dkp7lP5bXHV7w+2/pj3xEcefe7kyRUpSbssXsJKagYavsy+"
+ "ZZpNZJBxz2/a7KgbH2jm3gp7Lez4FUvh5bPlOxC3uarUIpI9vxBmlh4BdHX5NwCxFjMcBpt3/KL3KqM/AOHByYzNj4bizK8vw+YtLc2TIPquRHOYrIKIIRhlR9bIAJZW"
+ "BAZGGD7/yFCuuqXO8GDpYWNbUokedIidmewwgvbKQugMTFx3AL4DQqBQe7qAjNLhIwluyp2sVFVsqEl8R0lNzCwVa2H82TM4YHy7a4lLHJ5pkF9WekmMtJR7cetfTqKL"
+ "mQQUMV3IRFHxAtdgnW7VC17mTg8E9A9x3Wcp0/Ir/g3VSFYoBAYAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<77 and y<20):
return g[y*77 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<77 and y<20):
g[y*77 + x]=v;
def rd():
return bool(random.getrandbits(1))
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(4,0,0)
gw(41,1,0)
sa(1000000)
sa(39)
sa(39)
return 1
def _1():
return (98)if(sp()!=0)else(2)
def _2():
sp();
return 3
def _3():
sa(gr(4,0))
gw(gr(4,0)+2,1,gr(gr(4,0)+2,1)+1)
return 4
def _4():
return (((97)if(rd())else(96))if(rd())else((95)if(rd())else(5)))
def _5():
sa(2)
return 6
def _6():
return (((94)if(rd())else(93))if(rd())else((92)if(rd())else(7)))
def _7():
sa(sp()+3)
return 8
def _8():
sa(sp()+sp());
sa(sp()%40);
sa(sr());
gw(4,0,sp())
return 9
def _9():
return (((91)if(rd())else(90))if(rd())else((89)if(rd())else(10)))
def _10():
sa(8)
return 11
def _11():
return (((88)if(rd())else(87))if(rd())else((86)if(rd())else(12)))
def _12():
sa(sp()+2)
return 13
def _13():
sa(sp()*4)
return 14
def _14():
return (((85)if(rd())else(84))if(rd())else((83)if(rd())else(15)))
def _15():
sa(sp()+2)
return 16
def _16():
return (29)if(sp()!=0)else(17)
def _17():
gw(4,0,10)
sp();
return 18
def _18():
sa(sp()-1)
sa(sr());
return (3)if(sp()!=0)else(19)
def _19():
gw(41,2,39)
sp();
sa(39)
sa(39)
return 20
def _20():
return (28)if(sp()!=0)else(21)
def _21():
sp();
sa(0)
sa(1)
return 22
def _22():
return (23)if(sp()!=0)else(27)
def _23():
sa(sr());
return (24)if(sp()!=0)else(26)
def _24():
global t0
global t1
global t2
sa(sr());
t0=gr(gr(sr()+1,2)+2,1)
sa(sp()+2)
sa(2)
v0=sp()
sa(gr(sp(),v0))
sa(sp()+2)
sa(1)
v0=sp()
t1=gr(sp(),v0)
t2=(1)if(t0<t1)else(0)
return (25)if((t2)!=0)else(26)
def _25():
gw(5,0,gr(sr()+2,2))
sa(sr());
sa(gr(sr()+1,2))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+2)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1)
sa(sr()+2)
sa(gr(5,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1)
return 23
def _26():
sa(sp()+1)
sa((1)if(sr()<40)else(0))
return 22
def _27():
sys.stdout.write(str(gr(2,2))+" ")
sys.stdout.flush()
sys.stdout.write(str(gr(3,2))+" ")
sys.stdout.flush()
sys.stdout.write(str(gr(4,2))+" ")
sys.stdout.flush()
sp();
return 99
def _28():
sa(sp()-1)
sa(sr());
sa(sr()+2)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
return 20
def _29():
return (30)if(sr()!=30)else(17)
def _30():
return (45)if(sr()!=2)else(31)
def _31():
sp();
return 32
def _32():
return (((44)if(rd())else(43))if(rd())else((42)if(rd())else(33)))
def _33():
sa(8)
return 34
def _34():
return (((41)if(rd())else(40))if(rd())else((39)if(rd())else(35)))
def _35():
sa(sp()+2)
return 36
def _36():
sa(sr());
return (37)if(sp()!=0)else(17)
def _37():
sa(sp()-1)
return (18)if(sp()!=0)else(38)
def _38():
gw(4,0,0)
return 18
def _39():
sa(sp()+1)
return 36
def _40():
sa(sp()+0)
return 36
def _41():
sa(sp()+3)
return 36
def _42():
sa(4)
return 34
def _43():
sa(0)
return 34
def _44():
sa(12)
return 34
def _45():
return (46)if(sr()!=17)else(31)
def _46():
return (47)if(sr()!=33)else(31)
def _47():
return (81)if(sr()!=7)else(48)
def _48():
sp();
return 49
def _49():
return (((80)if(rd())else(79))if(rd())else((78)if(rd())else(50)))
def _50():
sa(8)
return 51
def _51():
return (((77)if(rd())else(76))if(rd())else((75)if(rd())else(52)))
def _52():
sa(sp()+2)
return 53
def _53():
sa(sr());
return (54)if(sp()!=0)else(74)
def _54():
sa(sp()-1)
sa(sr());
return (55)if(sp()!=0)else(73)
def _55():
sa(sp()-1)
sa(sr());
return (56)if(sp()!=0)else(72)
def _56():
sa(sp()-1)
sa(sr());
return (57)if(sp()!=0)else(71)
def _57():
sa(sp()-1)
sa(sr());
return (58)if(sp()!=0)else(70)
def _58():
sa(sp()-1)
sa(sr());
return (59)if(sp()!=0)else(69)
def _59():
sa(sp()-1)
sa(sr());
return (60)if(sp()!=0)else(68)
def _60():
sa(sp()-1)
sa(sr());
return (61)if(sp()!=0)else(68)
def _61():
sa(sp()-1)
sa(sr());
return (62)if(sp()!=0)else(65)
def _62():
sa(sp()-1)
sa(sr());
return (63)if(sp()!=0)else(64)
def _63():
sp();
return 18
def _64():
gw(4,0,gr(4,0)-3)
return 63
def _65():
return (66)if(gr(4,0)!=22)else(67)
def _66():
gw(4,0,12)
return 63
def _67():
gw(4,0,28)
return 63
def _68():
gw(4,0,(10*(((gr(4,0)%6)+1)/2))+5)
return 63
def _69():
gw(4,0,0)
return 63
def _70():
gw(4,0,5)
return 63
def _71():
gw(4,0,39)
return 63
def _72():
gw(4,0,24)
return 63
def _73():
gw(4,0,11)
return 63
def _74():
gw(4,0,10)
return 63
def _75():
sa(sp()+1)
return 53
def _76():
sa(sp()+3)
return 53
def _77():
sa(sp()+0)
return 53
def _78():
sa(4)
return 51
def _79():
sa(12)
return 51
def _80():
sa(0)
return 51
def _81():
return (82)if(sr()!=22)else(48)
def _82():
return (63)if(sr()!=36)else(48)
def _83():
sa(sp()+1)
return 16
def _84():
sa(sp()+3)
return 16
def _85():
sa(sp()+0)
return 16
def _86():
sa(sp()+1)
return 13
def _87():
sa(sp()+3)
return 13
def _88():
sa(sp()+0)
return 13
def _89():
sa(4)
return 11
def _90():
sa(12)
return 11
def _91():
sa(0)
return 11
def _92():
sa(sp()+2)
return 8
def _93():
sa(sp()+4)
return 8
def _94():
sa(sp()+1)
return 8
def _95():
sa(3)
return 6
def _96():
sa(4)
return 6
def _97():
sa(1)
return 6
def _98():
sa(sp()-1)
sa(sr()+2)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32,_33,_34,_35,_36,_37,_38,_39,_40,_41,_42,_43,_44,_45,_46,_47,_48,_49,_50,_51,_52,_53,_54,_55,_56,_57,_58,_59,_60,_61,_62,_63,_64,_65,_66,_67,_68,_69,_70,_71,_72,_73,_74,_75,_76,_77,_78,_79,_80,_81,_82,_83,_84,_85,_86,_87,_88,_89,_90,_91,_92,_93,_94,_95,_96,_97,_98]
c=0
while c<99:
c=m[c]()
|
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import cirq
def test_equals():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.X, cirq.ops.pauli_gates.X, cirq.XPowGate())
eq.add_equality_group(cirq.Y, cirq.ops.pauli_gates.Y, cirq.YPowGate())
eq.add_equality_group(cirq.Z, cirq.ops.pauli_gates.Z, cirq.ZPowGate())
def test_phased_pauli_product():
assert cirq.X.phased_pauli_product(cirq.I) == (1, cirq.X)
assert cirq.X.phased_pauli_product(cirq.X) == (1, cirq.I)
assert cirq.X.phased_pauli_product(cirq.Y) == (1j, cirq.Z)
assert cirq.X.phased_pauli_product(cirq.Z) == (-1j, cirq.Y)
assert cirq.Y.phased_pauli_product(cirq.I) == (1, cirq.Y)
assert cirq.Y.phased_pauli_product(cirq.X) == (-1j, cirq.Z)
assert cirq.Y.phased_pauli_product(cirq.Y) == (1, cirq.I)
assert cirq.Y.phased_pauli_product(cirq.Z) == (1j, cirq.X)
assert cirq.Z.phased_pauli_product(cirq.I) == (1, cirq.Z)
assert cirq.Z.phased_pauli_product(cirq.X) == (1j, cirq.Y)
assert cirq.Z.phased_pauli_product(cirq.Y) == (-1j, cirq.X)
assert cirq.Z.phased_pauli_product(cirq.Z) == (1, cirq.I)
def test_isinstance():
assert isinstance(cirq.X, cirq.XPowGate)
assert isinstance(cirq.Y, cirq.YPowGate)
assert isinstance(cirq.Z, cirq.ZPowGate)
assert not isinstance(cirq.X, cirq.YPowGate)
assert not isinstance(cirq.X, cirq.ZPowGate)
assert not isinstance(cirq.Y, cirq.XPowGate)
assert not isinstance(cirq.Y, cirq.ZPowGate)
assert not isinstance(cirq.Z, cirq.XPowGate)
assert not isinstance(cirq.Z, cirq.YPowGate)
def test_by_index():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.X, *[cirq.Pauli.by_index(i) for i in (-3, 0, 3, 6)])
eq.add_equality_group(cirq.Y, *[cirq.Pauli.by_index(i) for i in (-2, 1, 4, 7)])
eq.add_equality_group(cirq.Z, *[cirq.Pauli.by_index(i) for i in (-1, 2, 5, 8)])
def test_relative_index():
assert cirq.X.relative_index(cirq.X) == 0
assert cirq.X.relative_index(cirq.Y) == -1
assert cirq.X.relative_index(cirq.Z) == 1
assert cirq.Y.relative_index(cirq.X) == 1
assert cirq.Y.relative_index(cirq.Y) == 0
assert cirq.Y.relative_index(cirq.Z) == -1
assert cirq.Z.relative_index(cirq.X) == -1
assert cirq.Z.relative_index(cirq.Y) == 1
assert cirq.Z.relative_index(cirq.Z) == 0
def test_by_relative_index():
assert cirq.Pauli.by_relative_index(cirq.X, -1) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.X, 0) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.X, 1) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.X, 2) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.X, 3) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, -1) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, 0) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Y, 1) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.Y, 2) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, 3) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, -1) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, 0) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.Z, 1) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Z, 2) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, 3) == cirq.Z
def test_too_many_qubits():
a, b = cirq.LineQubit.range(2)
with pytest.raises(ValueError, match='single qubit'):
_ = cirq.X.on(a, b)
x = cirq.X(a)
with pytest.raises(ValueError, match=r'len\(new_qubits\)'):
_ = x.with_qubits(a, b)
def test_relative_index_consistency():
for pauli_1 in (cirq.X, cirq.Y, cirq.Z):
for pauli_2 in (cirq.X, cirq.Y, cirq.Z):
shift = pauli_2.relative_index(pauli_1)
assert cirq.Pauli.by_relative_index(pauli_1, shift) == pauli_2
def test_gt():
assert not cirq.X > cirq.X
assert not cirq.X > cirq.Y
assert cirq.X > cirq.Z
assert cirq.Y > cirq.X
assert not cirq.Y > cirq.Y
assert not cirq.Y > cirq.Z
assert not cirq.Z > cirq.X
assert cirq.Z > cirq.Y
assert not cirq.Z > cirq.Z
def test_gt_other_type():
with pytest.raises(TypeError):
_ = cirq.X > object()
def test_lt():
assert not cirq.X < cirq.X
assert cirq.X < cirq.Y
assert not cirq.X < cirq.Z
assert not cirq.Y < cirq.X
assert not cirq.Y < cirq.Y
assert cirq.Y < cirq.Z
assert cirq.Z < cirq.X
assert not cirq.Z < cirq.Y
assert not cirq.Z < cirq.Z
def test_lt_other_type():
with pytest.raises(TypeError):
_ = cirq.X < object()
def test_str():
assert str(cirq.X) == 'X'
assert str(cirq.Y) == 'Y'
assert str(cirq.Z) == 'Z'
def test_repr():
assert repr(cirq.X) == 'cirq.X'
assert repr(cirq.Y) == 'cirq.Y'
assert repr(cirq.Z) == 'cirq.Z'
def test_third():
assert cirq.X.third(cirq.Y) == cirq.Z
assert cirq.Y.third(cirq.X) == cirq.Z
assert cirq.Y.third(cirq.Z) == cirq.X
assert cirq.Z.third(cirq.Y) == cirq.X
assert cirq.Z.third(cirq.X) == cirq.Y
assert cirq.X.third(cirq.Z) == cirq.Y
assert cirq.X.third(cirq.X) == cirq.X
assert cirq.Y.third(cirq.Y) == cirq.Y
assert cirq.Z.third(cirq.Z) == cirq.Z
def test_commutes():
for A, B in itertools.product([cirq.X, cirq.Y, cirq.Z], repeat=2):
assert cirq.commutes(A, B) == (A == B)
with pytest.raises(TypeError):
assert cirq.commutes(cirq.X, 'X')
assert cirq.commutes(cirq.X, 'X', default='default') == 'default'
assert cirq.commutes(cirq.Z, cirq.read_json(json_text=cirq.to_json(cirq.Z)))
def test_unitary():
np.testing.assert_equal(cirq.unitary(cirq.X), cirq.unitary(cirq.X))
np.testing.assert_equal(cirq.unitary(cirq.Y), cirq.unitary(cirq.Y))
np.testing.assert_equal(cirq.unitary(cirq.Z), cirq.unitary(cirq.Z))
def test_apply_unitary():
cirq.testing.assert_has_consistent_apply_unitary(cirq.X)
cirq.testing.assert_has_consistent_apply_unitary(cirq.Y)
cirq.testing.assert_has_consistent_apply_unitary(cirq.Z)
def test_identity_multiplication():
a, b, c = cirq.LineQubit.range(3)
assert cirq.X(a) * cirq.I(a) == cirq.X(a)
assert cirq.X(a) * cirq.I(b) == cirq.X(a)
assert cirq.X(a) * cirq.Y(b) * cirq.I(c) == cirq.X(a) * cirq.Y(b)
assert cirq.I(c) * cirq.X(a) * cirq.Y(b) == cirq.X(a) * cirq.Y(b)
with pytest.raises(TypeError):
_ = cirq.H(c) * cirq.X(a) * cirq.Y(b)
with pytest.raises(TypeError):
_ = cirq.X(a) * cirq.Y(b) * cirq.H(c)
with pytest.raises(TypeError):
_ = cirq.I(a) * str(cirq.Y(b))
def test_powers():
assert isinstance(cirq.X, cirq.Pauli)
assert isinstance(cirq.Y, cirq.Pauli)
assert isinstance(cirq.Z, cirq.Pauli)
assert not isinstance(cirq.X ** -0.5, cirq.Pauli)
assert not isinstance(cirq.Y ** 0.2, cirq.Pauli)
assert not isinstance(cirq.Z ** 0.5, cirq.Pauli)
assert isinstance(cirq.X ** -0.5, cirq.XPowGate)
assert isinstance(cirq.Y ** 0.2, cirq.YPowGate)
assert isinstance(cirq.Z ** 0.5, cirq.ZPowGate)
assert isinstance(cirq.X ** 1, cirq.Pauli)
assert isinstance(cirq.Y ** 1, cirq.Pauli)
assert isinstance(cirq.Z ** 1, cirq.Pauli)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import ldappool
from oslo_config import cfg
from keystone.common.ldap import core as ldap_core
from keystone.identity.backends import ldap
from keystone.tests import unit
from keystone.tests.unit import fakeldap
from keystone.tests.unit import test_backend_ldap_pool
from keystone.tests.unit import test_ldap_livetest
CONF = cfg.CONF
class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
test_ldap_livetest.LiveLDAPIdentity):
"""Executes existing LDAP live test with pooled LDAP handler.
Also executes common pool specific tests via Mixin class.
"""
def setUp(self):
super(LiveLDAPPoolIdentity, self).setUp()
self.addCleanup(self.cleanup_pools)
# storing to local variable to avoid long references
self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools
def config_files(self):
config_files = super(LiveLDAPPoolIdentity, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_pool_liveldap.conf'))
return config_files
def test_assert_connector_used_not_fake_ldap_pool(self):
handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
self.assertNotEqual(type(handler.Connector),
type(fakeldap.FakeLdapPool))
self.assertEqual(type(ldappool.StateConnector),
type(handler.Connector))
def test_async_search_and_result3(self):
self.config_fixture.config(group='ldap', page_size=1)
self.test_user_enable_attribute_mask()
def test_pool_size_expands_correctly(self):
who = CONF.ldap.user
cred = CONF.ldap.password
# get related connection manager instance
ldappool_cm = self.conn_pools[CONF.ldap.url]
def _get_conn():
return ldappool_cm.connection(who, cred)
with _get_conn() as c1: # 1
self.assertEqual(1, len(ldappool_cm))
self.assertTrue(c1.connected, True)
self.assertTrue(c1.active, True)
with _get_conn() as c2: # conn2
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c2.connected)
self.assertTrue(c2.active)
self.assertEqual(2, len(ldappool_cm))
# c2 went out of context, its connected but not active
self.assertTrue(c2.connected)
self.assertFalse(c2.active)
with _get_conn() as c3: # conn3
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c3.connected)
self.assertTrue(c3.active)
self.assertTrue(c3 is c2) # same connection is reused
self.assertTrue(c2.active)
with _get_conn() as c4: # conn4
self.assertEqual(3, len(ldappool_cm))
self.assertTrue(c4.connected)
self.assertTrue(c4.active)
def test_password_change_with_auth_pool_disabled(self):
self.config_fixture.config(group='ldap', use_auth_pool=False)
old_password = self.user_sna['password']
self.test_password_change_with_pool()
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=self.user_sna['id'],
password=old_password)
def _create_user_and_authenticate(self, password):
# TODO(shaleh): port to new_user_ref()
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': password}
user = self.identity_api.create_user(user_dict)
self.identity_api.authenticate(
context={},
user_id=user['id'],
password=password)
return self.identity_api.get_user(user['id'])
def _get_auth_conn_pool_cm(self):
pool_url = ldap_core.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url
return self.conn_pools[pool_url]
def _do_password_change_for_one_user(self, password, new_password):
self.config_fixture.config(group='ldap', use_auth_pool=True)
self.cleanup_pools()
self.load_backends()
user1 = self._create_user_and_authenticate(password)
auth_cm = self._get_auth_conn_pool_cm()
self.assertEqual(1, len(auth_cm))
user2 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user3 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user4 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user5 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
# connection pool size remains 1 even for different user ldap bind
# as there is only one active connection at a time
user_api = ldap.UserApi(CONF)
u1_dn = user_api._id_to_dn_string(user1['id'])
u2_dn = user_api._id_to_dn_string(user2['id'])
u3_dn = user_api._id_to_dn_string(user3['id'])
u4_dn = user_api._id_to_dn_string(user4['id'])
u5_dn = user_api._id_to_dn_string(user5['id'])
# now create multiple active connections for end user auth case which
# will force to keep them in pool. After that, modify one of user
# password. Need to make sure that user connection is in middle
# of pool list.
auth_cm = self._get_auth_conn_pool_cm()
with auth_cm.connection(u1_dn, password) as _:
with auth_cm.connection(u2_dn, password) as _:
with auth_cm.connection(u3_dn, password) as _:
with auth_cm.connection(u4_dn, password) as _:
with auth_cm.connection(u5_dn, password) as _:
self.assertEqual(5, len(auth_cm))
_.unbind_s()
user3['password'] = new_password
self.identity_api.update_user(user3['id'], user3)
return user3
def test_password_change_with_auth_pool_enabled_long_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=600)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
user.pop('password')
# with long connection lifetime auth_pool can bind to old password
# successfully which is not desired if password change is frequent
# use case in a deployment.
# This can happen in multiple concurrent connections case only.
user_ref = self.identity_api.authenticate(
context={}, user_id=user['id'], password=old_password)
self.assertDictEqual(user, user_ref)
def test_password_change_with_auth_pool_enabled_no_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=0)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
# now as connection lifetime is zero, so authentication
# with old password will always fail.
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={}, user_id=user['id'],
password=old_password)
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Enable 'with' statements in Python 2.5
from __future__ import with_statement
import os.path
import platform
import re
import shutil
import subprocess
import sys
import time
from buildbot_lib import (
BuildContext, BuildStatus, Command, EnsureDirectoryExists,
ParseStandardCommandLine, RemoveDirectory, RemovePath,
RemoveGypBuildDirectories, RemoveSconsBuildDirectories, RunBuild, SCons,
SetupLinuxEnvironment, SetupMacEnvironment, SetupWindowsEnvironment,
SetupAndroidEnvironment, Step, StepLink, StepText, TryToCleanContents,
RunningOnBuildbot)
def SetupContextVars(context):
# The branch is set to native_client on the main bots, on the trybots it's
# set to ''. Otherwise, we should assume a particular branch is being used.
context['branch'] = os.environ.get('BUILDBOT_BRANCH', 'native_client')
context['off_trunk'] = context['branch'] not in ['native_client', '']
def ValidatorTest(context, architecture, validator, warn_only=False):
cmd = [
sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--keep-going',
'--validator', validator,
'--arch', architecture
]
if warn_only:
cmd.append('--warn-only')
Command(context, cmd=cmd)
def SummarizeCoverage(context):
Command(context, [
sys.executable,
'tools/coverage_summary.py',
context['platform'] + '-' + context['default_scons_platform'],
])
def ArchiveCoverage(context):
gsutil = '/b/build/third_party/gsutil/gsutil'
gsd_url = 'http://gsdview.appspot.com/nativeclient-coverage2/revs'
variant_name = ('coverage-' + context['platform'] + '-' +
context['default_scons_platform'])
coverage_path = variant_name + '/html/index.html'
revision = os.environ.get('BUILDBOT_REVISION', 'None')
link_url = gsd_url + '/' + revision + '/' + coverage_path
gsd_base = 'gs://nativeclient-coverage2/revs'
gs_path = gsd_base + '/' + revision + '/' + variant_name
cov_dir = 'scons-out/' + variant_name + '/coverage'
# Copy lcov file.
Command(context, [
sys.executable, gsutil,
'cp', '-a', 'public-read',
cov_dir + '/coverage.lcov',
gs_path + '/coverage.lcov',
])
# Copy html.
Command(context, [
sys.executable, gsutil,
'cp', '-R', '-a', 'public-read',
'html', gs_path,
], cwd=cov_dir)
print '@@@STEP_LINK@view@%s@@@' % link_url
def CommandGypBuild(context):
# Do not use goma when inside a toolchain build, because the
# freshly-built NaCl compilers will never be available via goma.
# This sacrifices the benefits of goma for building the trusted
# code too, but it's not clear how to teach Gyp to use goma for
# some compilers and not others.
use_goma = (RunningOnBuildbot() and
not context['no_goma'] and
not context['inside_toolchain'])
if use_goma:
# Since this is for buildbot, it should not be good to use the result
# generated by the different version compiler.
os.environ['GOMA_HERMETIC'] = 'fallback'
runtest_py = os.environ.get('RUNTEST')
alt_runtest_py = '/b/build/scripts/slave/runtest.py'
if runtest_py is None and os.path.exists(alt_runtest_py):
runtest_py = alt_runtest_py
# TODO(bradnelson): Figure out why win64 trybots can't upload goma logs.
buildername = os.environ.get('BUILDBOT_BUILDERNAME', '')
excluded_os = False
for name in ['win64', 'vista', 'win7-64', 'win8-64']:
if name in buildername:
excluded_os = True
if runtest_py is None or excluded_os:
# Fallback to direct goma + ninja if not run on bots.
try:
if use_goma:
Command(context, cmd=[
sys.executable, '/b/build/goma/goma_ctl.py', 'restart'])
cmd = ['ninja', '-v', '-k', '0', '-C', '../out/' + context['gyp_mode']]
if use_goma:
cmd += ['-j50']
Command(context, cmd=cmd)
finally:
if use_goma:
Command(context, cmd=[
sys.executable, '/b/build/goma/goma_ctl.py', 'stop'])
else:
# Infer the location of compile.py from runtest.py.
compile_py = os.path.join(os.path.dirname(runtest_py), 'compile.py')
cmd = [sys.executable, compile_py, '--target', context['gyp_mode'],
'--src-dir', '../', '--build-tool', 'ninja',
'--ninja-ensure-up-to-date']
if use_goma:
cmd += ['--compiler', 'goma']
cmd += ['--goma-dir', '/b/build/goma']
# Verbose and but stop on fail.
cmd += ['--', '-v', '-k', '0']
Command(context, cmd=cmd)
def CommandGypGenerate(context):
Command(
context,
cmd=[sys.executable, 'native_client/build/gyp_nacl'],
cwd='..')
def CommandGclientRunhooks(context):
if context.Windows():
gclient = 'gclient.bat'
else:
gclient = 'gclient'
print 'Running gclient runhooks...'
print 'GYP_CROSSCOMPILE=' + context.GetEnv('GYP_CROSSCOMPILE', '')
print 'GYP_GENERATORS=' + context.GetEnv('GYP_GENERATORS', '')
print 'GYP_MSVS_VERSION=' + context.GetEnv('GYP_MSVS_VERSION', '')
print 'GYP_DEFINES=' + context.GetEnv('GYP_DEFINES', '')
Command(context, cmd=[gclient, 'runhooks', '--force'])
def BuildScript(status, context):
inside_toolchain = context['inside_toolchain']
# Clean out build directories.
with Step('clobber', status):
RemoveSconsBuildDirectories()
RemoveGypBuildDirectories()
with Step('cleanup_temp', status):
# Picking out drive letter on which the build is happening so we can use
# it for the temp directory.
if context.Windows():
build_drive = os.path.splitdrive(os.path.abspath(__file__))[0]
tmp_dir = os.path.join(build_drive, os.path.sep + 'temp')
context.SetEnv('TEMP', tmp_dir)
context.SetEnv('TMP', tmp_dir)
else:
tmp_dir = '/tmp'
print 'Making sure %s exists...' % tmp_dir
EnsureDirectoryExists(tmp_dir)
print 'Cleaning up the contents of %s...' % tmp_dir
# Only delete files and directories like:
# */nacl_tmp/*
# TODO(bradnelson): Drop this after a bit.
# Also drop files and directories like these to cleanup current state:
# */nacl_tmp*
# */nacl*
# 83C4.tmp
# .org.chromium.Chromium.EQrEzl
# tmp_platform*
# tmp_mmap*
# tmp_pwrite*
# tmp_syscalls*
# workdir*
# nacl_chrome_download_*
# browserprofile_*
# tmp*
file_name_re = re.compile(
r'[\\/\A]('
r'tmp_nacl[\\/].+|'
r'tmp_nacl.+|'
r'nacl.+|'
r'[0-9a-fA-F]+\.tmp|'
r'\.org\.chrom\w+\.Chrom\w+\.[^\\/]+|'
r'tmp_platform[^\\/]+|'
r'tmp_mmap[^\\/]+|'
r'tmp_pwrite[^\\/]+|'
r'tmp_syscalls[^\\/]+|'
r'workdir[^\\/]+|'
r'nacl_chrome_download_[^\\/]+|'
r'browserprofile_[^\\/]+|'
r'tmp[^\\/]+'
r')$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
# Clean nacl_tmp/* separately, so we get a list of leaks.
nacl_tmp = os.path.join(tmp_dir, 'nacl_tmp')
if os.path.exists(nacl_tmp):
for bot in os.listdir(nacl_tmp):
bot_path = os.path.join(nacl_tmp, bot)
print 'Cleaning prior build temp dir: %s' % bot_path
sys.stdout.flush()
if os.path.isdir(bot_path):
for d in os.listdir(bot_path):
path = os.path.join(bot_path, d)
print 'Removing leftover: %s' % path
sys.stdout.flush()
RemovePath(path)
os.rmdir(bot_path)
else:
print 'Removing rogue file: %s' % bot_path
RemovePath(bot_path)
os.rmdir(nacl_tmp)
# Clean /tmp so we get a list of what's accumulating.
TryToCleanContents(tmp_dir, file_name_filter)
# Recreate TEMP, as it may have been clobbered.
if 'TEMP' in os.environ and not os.path.exists(os.environ['TEMP']):
os.makedirs(os.environ['TEMP'])
# Mac has an additional temporary directory; clean it up.
# TODO(bradnelson): Fix Mac Chromium so that these temp files are created
# with open() + unlink() so that they will not get left behind.
if context.Mac():
subprocess.call(
"find /var/folders -name '.org.chromium.*' -exec rm -rfv '{}' ';'",
shell=True)
subprocess.call(
"find /var/folders -name '.com.google.Chrome*' -exec rm -rfv '{}' ';'",
shell=True)
# Skip over hooks when run inside the toolchain build because
# package_version would overwrite the toolchain build.
if inside_toolchain:
with Step('gyp_generate_only', status):
CommandGypGenerate(context)
else:
with Step('gclient_runhooks', status):
CommandGclientRunhooks(context)
# Make sure our GN build is working.
can_use_gn = context.Linux() and context['arch'] != 'arm'
gn_out = '../out'
if can_use_gn:
def BoolFlag(cond):
return 'true' if cond else 'false'
gn_x86 = 'false'
gn_x64 = 'false'
gn_arm = 'false'
if context['arch'] == '32':
gn_x86 = 'true'
elif context['arch'] == '64':
gn_x64 = 'true'
elif context['arch'] == 'arm':
gn_arm = 'true'
else:
raise Exception("Unexpected arch: " + context['arch'])
gn_newlib = BoolFlag(not context['use_glibc'])
gn_glibc = BoolFlag(context['use_glibc'])
gn_gen_args = [
'is_debug=' + context['gn_is_debug'],
'use_trusted_x86=' + gn_x86,
'use_nacl_x86=' + gn_x86,
'use_trusted_x64=' + gn_x64,
'use_nacl_x64=' + gn_x64,
'use_trusted_arm=' + gn_arm,
'use_nacl_arm=' + gn_arm,
'use_gcc_newlib=' + gn_newlib,
'use_gcc_glibc=' + gn_glibc,
'use_clang_newlib=' + gn_newlib,
]
# If this is a 32-bit build but the kernel reports as 64-bit,
# then gn will set host_cpu=x64 when we want host_cpu=x86.
if context['arch'] == '32':
gn_gen_args.append('host_cpu="x86"')
gn_cmd = [
'gn',
'--dotfile=../native_client/.gn', '--root=..',
# Note: quotes are not needed around this space-separated
# list of args. The shell would remove them before passing
# them to a program, and Python bypasses the shell. Adding
# quotes will cause an error because GN will see unexpected
# double quotes.
'--args=%s' % ' '.join(gn_gen_args),
'gen', gn_out,
]
with Step('gn_compile', status):
Command(context, cmd=gn_cmd)
Command(context, cmd=['ninja', '-C', gn_out])
if context['clang']:
with Step('update_clang', status):
Command(context, cmd=['../tools/clang/scripts/update.sh'])
# Just build both bitages of validator and test for --validator mode.
if context['validator']:
with Step('build ragel_validator-32', status):
SCons(context, platform='x86-32', parallel=True, args=['ncval_new'])
with Step('build ragel_validator-64', status):
SCons(context, platform='x86-64', parallel=True, args=['ncval_new'])
# Check validator trie proofs on both 32 + 64 bits.
with Step('check validator proofs', status):
SCons(context, platform='x86-64', parallel=False, args=['dfachecktries'])
with Step('predownload validator corpus', status):
Command(context,
cmd=[sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--download-only'])
with Step('validator_regression_test ragel x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32',
'scons-out/opt-linux-x86-32/staging/ncval_new')
with Step('validator_regression_test ragel x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64',
'scons-out/opt-linux-x86-64/staging/ncval_new')
return
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
# Make sure our Gyp build is working.
if not context['no_gyp']:
with Step('gyp_compile', status):
CommandGypBuild(context)
# On a subset of Linux builds, build Breakpad tools for testing.
if context['use_breakpad_tools']:
with Step('breakpad configure', status):
Command(context, cmd=['mkdir', '-p', 'breakpad-out'])
Command(context, cwd='breakpad-out',
cmd=['bash', '../../breakpad/configure',
'CXXFLAGS=-I../..']) # For third_party/lss
with Step('breakpad make', status):
Command(context, cmd=['make', '-j%d' % context['max_jobs'],
# This avoids a broken dependency on
# src/third_party/lss files within the breakpad
# source directory. We are not putting lss
# there, but using the -I switch above to
# find the lss in ../third_party instead.
'includelss_HEADERS=',
],
cwd='breakpad-out')
# The main compile step.
with Step('scons_compile', status):
SCons(context, parallel=True, args=[])
if context['coverage']:
with Step('collect_coverage', status, halt_on_fail=True):
SCons(context, args=['coverage'])
with Step('summarize_coverage', status, halt_on_fail=False):
SummarizeCoverage(context)
slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
if slave_type != 'Trybot' and slave_type is not None:
with Step('archive_coverage', status, halt_on_fail=True):
ArchiveCoverage(context)
return
# Android bots don't run tests for now.
if context['android']:
return
### BEGIN tests ###
if not context['use_glibc']:
# Bypassing the IRT with glibc is not a supported case,
# and in fact does not work at all with the new glibc.
with Step('small_tests', status, halt_on_fail=False):
SCons(context, args=['small_tests'])
with Step('medium_tests', status, halt_on_fail=False):
SCons(context, args=['medium_tests'])
with Step('large_tests', status, halt_on_fail=False):
SCons(context, args=['large_tests'])
with Step('compile IRT tests', status):
SCons(context, parallel=True, mode=['nacl_irt_test'])
with Step('small_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['small_tests_irt'])
with Step('medium_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['medium_tests_irt'])
with Step('large_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['large_tests_irt'])
### END tests ###
### BEGIN GN tests ###
if can_use_gn:
arch_name = {
'arm': 'arm',
'32': 'x86',
'64': 'x64'
}[context['arch']]
gn_sel_ldr = os.path.join(gn_out, 'trusted_' + arch_name, 'sel_ldr')
gn_extra = [
'force_sel_ldr=' + gn_sel_ldr,
'perf_prefix=gn_',
]
with Step('small_tests under GN', status, halt_on_fail=False):
SCons(context, args=['small_tests'] + gn_extra)
with Step('medium_tests under GN', status, halt_on_fail=False):
SCons(context, args=['medium_tests'] + gn_extra)
with Step('large_tests under GN', status, halt_on_fail=False):
SCons(context, args=['large_tests'] + gn_extra)
### END GN tests ###
def Main():
# TODO(ncbray) make buildbot scripts composable to support toolchain use case.
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
SetupContextVars(context)
if context.Windows():
SetupWindowsEnvironment(context)
elif context.Linux():
if context['android']:
SetupAndroidEnvironment(context)
else:
SetupLinuxEnvironment(context)
elif context.Mac():
SetupMacEnvironment(context)
else:
raise Exception("Unsupported platform.")
RunBuild(BuildScript, status)
def TimedMain():
start_time = time.time()
try:
Main()
finally:
time_taken = time.time() - start_time
print 'RESULT BuildbotTime: total= %.3f minutes' % (time_taken / 60)
if __name__ == '__main__':
TimedMain()
|
|
import os
import sys
import textwrap
import warnings
import email
from svb import error, util
from svb.six import iteritems, string_types
from svb.six.moves import urllib
# - Requests is the preferred HTTP library
# - Google App Engine has urlfetch
# - Use Pycurl if it's there (at least it verifies SSL certs)
# - Fall back to urllib2 with a warning if needed
try:
import urllib2
except ImportError:
# Try to load in urllib2, but don't sweat it if it's not available.
pass
try:
import pycurl
except ImportError:
# Try to load in pycurl, but don't sweat it if it's not available.
pycurl = None
try:
import requests
except ImportError:
# Try to load in requests, but don't sweat it if it's not available.
requests = None
else:
try:
# Require version 0.8.8, but don't want to depend on distutils
version = requests.__version__
major, minor, patch = [int(i) for i in version.split('.')]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
'Warning: the SVB library requires that your Python '
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. SVB will fall back to '
'an alternate HTTP library so everything should work. We '
'recommend upgrading your "requests" library. If you have any '
'questions, please contact support@svb.com. (HINT: running '
'"pip install -U requests" should upgrade your requests '
'library to the latest version.)' % (version,))
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the SVB library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL implementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests.")
return impl(*args, **kwargs)
class HTTPClient(object):
def __init__(self, verify_ssl_certs=True, proxy=None):
self._verify_ssl_certs = verify_ssl_certs
if proxy:
if type(proxy) is str:
proxy = {"http": proxy, "https": proxy}
if not (type(proxy) is dict):
raise ValueError(
"Proxy(ies) must be specified as either a string "
"URL or a dict() with string URL under the"
" ""https"" and/or ""http"" keys.")
self._proxy = proxy.copy() if proxy else None
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
'HTTPClient subclasses must implement `request`')
class RequestsClient(HTTPClient):
name = 'requests'
def __init__(self, timeout=80, session=None, **kwargs):
super(RequestsClient, self).__init__(**kwargs)
self._timeout = timeout
self._session = session or requests.Session()
def request(self, method, url, headers, post_data=None):
kwargs = {}
if self._verify_ssl_certs:
kwargs['verify'] = os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt')
else:
kwargs['verify'] = False
if self._proxy:
kwargs['proxies'] = self._proxy
try:
try:
result = self._session.request(method,
url,
headers=headers,
data=post_data,
timeout=self._timeout,
**kwargs)
except TypeError as e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with SVB\'s '
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are susceptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception as e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code, result.headers
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with SVB. "
"If this problem persists, let us know at "
"support@svb.com.")
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with SVB. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@svb.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
class UrlFetchClient(HTTPClient):
name = 'urlfetch'
def __init__(self, verify_ssl_certs=True, proxy=None, deadline=55):
super(UrlFetchClient, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy)
# no proxy support in urlfetch. for a patch, see:
# https://code.google.com/p/googleappengine/issues/detail?id=544
if proxy:
raise ValueError(
"No proxy support in urlfetch library. "
"Set svb.default_http_client to either RequestsClient, "
"PycurlClient, or Urllib2Client instance to use a proxy.")
self._verify_ssl_certs = verify_ssl_certs
# GAE requests time out after 60 seconds, so make sure to default
# to 55 seconds to allow for a slow Svb
self._deadline = deadline
def request(self, method, url, headers, post_data=None):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# api.svb.com.
validate_certificate=self._verify_ssl_certs,
deadline=self._deadline,
payload=post_data
)
except urlfetch.Error as e:
self._handle_request_error(e, url)
return result.content, result.status_code, result.headers
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = ("The SVB library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the SVB Python bindings. Please let us know "
"at support@svb.com." % (url,))
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from SVB."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = ("There was a problem receiving all of your data from "
"SVB. This is likely due to a bug in SVB. "
"Please let us know at support@svb.com.")
else:
msg = ("Unexpected error communicating with SVB. If this "
"problem persists, let us know at support@svb.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
class PycurlClient(HTTPClient):
name = 'pycurl'
def __init__(self, verify_ssl_certs=True, proxy=None):
super(PycurlClient, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy)
# Initialize this within the object so that we can reuse connections.
self._curl = pycurl.Curl()
# need to urlparse the proxy, since PyCurl
# consumes the proxy url in small pieces
if self._proxy:
# now that we have the parser, get the proxy url pieces
proxy = self._proxy
for scheme in proxy:
proxy[scheme] = urllib.parse.urlparse(proxy[scheme])
def parse_headers(self, data):
if '\r\n' not in data:
return {}
raw_headers = data.split('\r\n', 1)[1]
headers = email.message_from_string(raw_headers)
return dict((k.lower(), v) for k, v in iteritems(dict(headers)))
def request(self, method, url, headers, post_data=None):
b = util.io.BytesIO()
rheaders = util.io.BytesIO()
# Pycurl's design is a little weird: although we set per-request
# options on this object, it's also capable of maintaining established
# connections. Here we call reset() between uses to make sure it's in a
# pristine state, but notably reset() doesn't reset connections, so we
# still get to take advantage of those by virtue of re-using the same
# object.
self._curl.reset()
proxy = self._get_proxy(url)
if proxy:
if proxy.hostname:
self._curl.setopt(pycurl.PROXY, proxy.hostname)
if proxy.port:
self._curl.setopt(pycurl.PROXYPORT, proxy.port)
if proxy.username or proxy.password:
self._curl.setopt(
pycurl.PROXYUSERPWD,
"%s:%s" % (proxy.username, proxy.password))
if method == 'get':
self._curl.setopt(pycurl.HTTPGET, 1)
elif method == 'post':
self._curl.setopt(pycurl.POST, 1)
self._curl.setopt(pycurl.POSTFIELDS, post_data)
else:
self._curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
self._curl.setopt(pycurl.URL, util.utf8(url))
self._curl.setopt(pycurl.WRITEFUNCTION, b.write)
self._curl.setopt(pycurl.HEADERFUNCTION, rheaders.write)
self._curl.setopt(pycurl.NOSIGNAL, 1)
self._curl.setopt(pycurl.CONNECTTIMEOUT, 30)
self._curl.setopt(pycurl.TIMEOUT, 80)
self._curl.setopt(pycurl.HTTPHEADER, ['%s: %s' % (k, v)
for k, v in iteritems(headers)])
if self._verify_ssl_certs:
self._curl.setopt(pycurl.CAINFO, os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt'))
else:
self._curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
self._curl.perform()
except pycurl.error as e:
self._handle_request_error(e)
rbody = b.getvalue().decode('utf-8')
rcode = self._curl.getinfo(pycurl.RESPONSE_CODE)
headers = self.parse_headers(rheaders.getvalue().decode('utf-8'))
return rbody, rcode, headers
def _handle_request_error(self, e):
if e.args[0] in [pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to SVB. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Svb's service status at "
"https://twitter.com/svbapi, or let us know at "
"support@svb.com.")
elif e.args[0] in [pycurl.E_SSL_CACERT,
pycurl.E_SSL_PEER_CERTIFICATE]:
msg = ("Could not verify SVB's SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@svb.com.")
else:
msg = ("Unexpected error communicating with SVB. If this "
"problem persists, let us know at support@svb.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + e.args[1] + ")"
raise error.APIConnectionError(msg)
def _get_proxy(self, url):
if self._proxy:
proxy = self._proxy
scheme = url.split(":")[0] if url else None
if scheme:
if scheme in proxy:
return proxy[scheme]
scheme = scheme[0:-1]
if scheme in proxy:
return proxy[scheme]
return None
class Urllib2Client(HTTPClient):
if sys.version_info >= (3, 0):
name = 'urllib.request'
else:
name = 'urllib2'
def __init__(self, verify_ssl_certs=True, proxy=None):
super(Urllib2Client, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy)
# prepare and cache proxy tied opener here
self._opener = None
if self._proxy:
proxy = urllib2.ProxyHandler(self._proxy)
self._opener = urllib2.build_opener(proxy)
def request(self, method, url, headers, post_data=None):
if sys.version_info >= (3, 0) and isinstance(post_data, string_types):
post_data = post_data.encode('utf-8')
req = urllib2.Request(url, post_data, headers)
if method not in ('get', 'post'):
req.get_method = lambda: method.upper()
try:
# use the custom proxy tied opener, if any.
# otherwise, fall to the default urllib opener.
response = self._opener.open(req) \
if self._opener \
else urllib2.urlopen(req)
rbody = response.read()
rcode = response.code
headers = dict(response.info())
except urllib2.HTTPError as e:
rcode = e.code
rbody = e.read()
headers = dict(e.info())
except (urllib2.URLError, ValueError) as e:
self._handle_request_error(e)
lh = dict((k.lower(), v) for k, v in iteritems(dict(headers)))
return rbody, rcode, lh
def _handle_request_error(self, e):
msg = ("Unexpected error communicating with SVB. "
"If this problem persists, let us know at support@svb.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
|
|
#!/usr/bin/env python
"""
spcorrdecimateall.py
John Swoboda
"""
import os
import numpy as np
import tables
import glob
import matplotlib.pyplot as plt
import pdb
import ioclass
from IQTools import CenteredLagProduct, FormatIQ
if __name__ == "__main__":
## Define input params Input
spcor_dir = '/Volumes/ISRDrive/ISRData/20121207.002/'
h5_files = glob.glob(os.path.join(spcor_dir,'*.dt2.h5'))
#h5_files = ['/Volumes/ISRDrive/ISRData/20121207.002/d0330405.dt0.h5']
h5_files.sort()
print('File being processed '+ h5_files[0])
## Set up original beam patterns
ptlen = 121
rclen = 20*ptlen
pattern1 = np.array([65228, 65288, 65225, 65291])
npats = 10
fullpat = np.array([pattern1[x%4] for x in np.arange(ptlen)])
## Output
nLags = 12
Nranges = 228
lags = np.arange(0,nLags)*20e-6
nrec_orig = 30
mrecs_end = 27# this is the maximum number of recs at the end to deal with
h5Paths = {'S' : ('/S',''),\
'Data' : ('/S/Data',''),\
'Data_Acf' : ('/S/Data/Acf',''),\
'Data_Power' : ('/S/Data/Power',''),\
}
# set up the output directory structure
outpath = '/Volumes/ISRDrive/ISRDATA/OutData2/'
outpaths = {x:os.path.join(outpath,'Pattern{:02}'.format(x)) for x in np.arange(npats)}
for x in np.arange(npats):
if not os.path.exists(outpaths[x]):
os.makedirs(outpaths[x])
file_recs = np.zeros(len(h5_files))
file_count = 0
## Go through all files and get records
for fname in h5_files:
#print('File being Looked at '+ fname)
f_cur = tables.open_file(fname)
file_recs[file_count] = f_cur.root.Raw11.Raw.RadacHeader.BeamCode.shape[0]
f_cur.close()
file_count+=1
# get the sample location
sample_ends = np.cumsum(file_recs)*rclen
## Get start point
fname=h5_files[0]
f_cur = tables.open_file(fname)
all_beams_orig_mat = f_cur.root.Raw11.Raw.RadacHeader.BeamCode.read()
all_beams_orig = all_beams_orig_mat.flatten()
f_cur.close()
# Determine start point
keepgoing = True
stpnt = 0
while keepgoing:
subset_dat =all_beams_orig[stpnt:stpnt+ptlen]
test_arr = subset_dat==fullpat
if test_arr.all():
break
stpnt+=1
## Pull out the beam patterns
# In this script x is used as the pattern iterator and y is used as the record indicator
f_patternsdict = {(x):all_beams_orig[stpnt+x*2*ptlen:stpnt+(x+1)*2*ptlen] for x in np.arange(npats) }
# on repeation of the beams
patternsdict = {x:f_patternsdict[x][:(x+2)**2] for x in f_patternsdict.keys()}
des_recs = 30
# determine the pattern
patternlocdic_template = {(x):[(np.arange(x*2*ptlen+stpnt+y*rclen,(x+1)*2*ptlen+stpnt+y*rclen)) for y in np.arange(des_recs)] for x in np.arange(10)}
## Start loop for all files
for file_num in np.arange(len(h5_files)):
fname = h5_files[file_num]
# bring in the contents of the full file because this structure will be needed when the new file is
# made
fullfile = ioclass.h5file(fname)
fullfiledict = fullfile.readWholeh5file()
print('Main file being operated on: '+os.path.split(fname)[-1])
# pull content that will be deleted
all_data = fullfiledict['/Raw11/Raw/Samples']['Data']
rng = fullfiledict['/S/Data/Acf']['Range']
all_beams_mat = fullfiledict['/Raw11/Raw/RadacHeader']['BeamCode']
txbaud = fullfiledict['/S/Data']['TxBaud']
ambfunc = fullfiledict['/S/Data']['Ambiguity']
pwidth = fullfiledict['/S/Data']['Pulsewidth']
# Pull in call and noise material because these will needed for fitting
beamcodes_cal = fullfiledict['/S/Cal']['Beamcodes']
beamcodes_noise = fullfiledict['/S/Noise']['Beamcodes']
cal_pint = fullfiledict['/S/Cal']['PulsesIntegrated']
caldata = fullfiledict['/S/Cal/Power']['Data']
noise_pint = fullfiledict['/S/Noise']['PulsesIntegrated']
noise_pwer = fullfiledict['/S/Noise/Power']['Data']
noise_data =fullfiledict['/S/Noise/Acf']['Data']
# These keys lead to material will either conflict in the new file or will be unneccesary
dump = ['/S/Data','/S/Data/Acf','/S/Data/Power','/','/S','/Raw11/Raw/Samples']
for key in dump:
del fullfiledict[key]
for key in fullfiledict:
h5Paths[key] = (key,'')
# Case for end file
lastfile = file_num==len(h5_files)-1
# add an extra record to the end of the arrays to deal with possible stralling data
if not lastfile:
fname2 = h5_files[file_num]
# bring in the contents of the full file because this structure will be needed when the new file is
# made
fullfile2 = ioclass.h5file(fname2)
fullfiledict2 = fullfile2.readWholeh5file()
all_data = np.concatenate((all_data,np.array([fullfiledict2['/Raw11/Raw/Samples']['Data'][0]])),0)
all_beams_mat = np.concatenate((all_beams_mat,np.array([fullfiledict2['/Raw11/Raw/RadacHeader']['BeamCode'][0]])),0)
#beamcodes_cal = np.concatenate((beamcodes_cal,np.array([fullfiledict2['/S/Cal']['Beamcodes'][0]])),0)
#beamcodes_noise =np.concatenate(( beamcodes_noise,np.array([fullfiledict2['/S/Noise']['Beamcodes'][0]])),0)
#cal_pint = np.concatenate((cal_pint, np.array([fullfiledict2['/S/Cal']['PulsesIntegrated'][0]])),0)
#caldata = np.concatenate((caldata, np.array([fullfiledict2['/S/Cal/Power']['Data'][0]])),0)
#noise_pint = np.concatenate((noise_pint, np.array([fullfiledict2['/S/Noise']['PulsesIntegrated'][0]])),0)
#noise_pwer = np.concatenate((noise_pwer, np.array([fullfiledict2['/S/Noise/Power']['Data'][0]])),0)
#noise_data =np.concatenate((noise_data, np.array([fullfiledict2['/S/Noise/Acf']['Data'][0]])),0)
patternlocdic = patternlocdic_template.copy()
else:
des_recs = mrecs_end
# determine the pattern
patternlocdic = {(x):[(np.arange(x*2*ptlen+stpnt+y*rclen,(x+1)*2*ptlen+stpnt+y*rclen)) for y in np.arange(des_recs)] for x in np.arange(10)}
all_data = all_data[:des_recs+1]
all_beams_mat = all_beams_mat[:des_recs+1]
beamcodes_cal = beamcodes_cal[:des_recs]
beamcodes_noise = beamcodes_noise[:des_recs]
cal_pint = cal_pint[:des_recs]
caldata = caldata[:des_recs]
noise_pint = noise_pint[:des_recs]
noise_pwer = noise_pwer[:des_recs]
noise_data = noise_data[:des_recs]
# first loop goes through patterns
for x in np.arange(npats):
# set up the outputfiles
curoutpath =outpaths[x]
bname = os.path.basename(fname)
spl = bname.split('.')
oname = os.path.join(curoutpath, spl[0]+'.' + spl[1] + '.proc.' + spl[2])
# check the output files
if os.path.exists(oname):
os.remove(oname)
# output file
ofile = ioclass.outputFileClass()
ofile.fname = oname
ofile.openFile()
ofile.h5Paths=h5Paths
ofile.createh5groups()
ofile.closeFile()
# set up receivers and beams
nrecs = len(patternlocdic[x])
nbeams = len(patternsdict[x])
# Checks to make sure he arrays are set
#set up location arrays
curbeams = patternsdict[x]
cal_beam_loc = np.zeros(curbeams.shape)
noise_beam_loc = np.zeros(curbeams.shape)
cal_beam_loc = np.array([np.where(beamcodes_cal[0,:]==ib)[0][0] for ib in curbeams])
noise_beam_loc = np.array([np.where(beamcodes_noise[0,:]==ib)[0][0] for ib in curbeams])
#pdb.set_trace()
# do all the call params
fullfiledict['/S/Cal']['Beamcodes'] = beamcodes_cal[:,cal_beam_loc]
fullfiledict['/S/Cal']['PulsesIntegrated'] = cal_pint[:,cal_beam_loc]
fullfiledict['/S/Cal/Power']['Data'] = caldata[:,cal_beam_loc]
# do all the noise params
fullfiledict['/S/Noise']['Beamcodes'] = beamcodes_noise[:,noise_beam_loc]
fullfiledict['/S/Noise']['PulsesIntegrated'] = noise_pint[:,noise_beam_loc]
fullfiledict['/S/Noise/Power']['Data'] = noise_pwer[:,noise_beam_loc]
fullfiledict['/S/Noise/Acf']['Data'] = noise_data[:,noise_beam_loc]
irec = 0
# second loop goes though all of the records
for y in patternlocdic[x]:
# determine the samples
[arecs,asamps] = np.unravel_index(y,all_beams_mat.shape);
# check if you've gone beyond the recordings
arec_bey = np.any(arecs>=nrec_orig)
# get the IQ data for all of the pulses in a pattern
# this should keep the ordering
fullIQ = FormatIQ(all_data,(arecs,asamps))
# Beam by beam goes through the IQ data
beamnum = 0
# make holding arrays for acfs
acf_rec = np.zeros((nbeams,nLags,Nranges,2))
beams_rec =np.zeros((nbeams))
pulsesintegrated = np.zeros((nbeams))
pwr = np.zeros((nbeams,Nranges))
# fill in temp arrays
for ibeam in patternsdict[x]:
cur_beam_loc = np.where(f_patternsdict[x]==ibeam)[0]
temp_lags = CenteredLagProduct(fullIQ[:,cur_beam_loc],nLags)
acf_rec[beamnum,:,:,0] = temp_lags.real.transpose()
acf_rec[beamnum,:,:,1] = temp_lags.imag.transpose()
beams_rec[beamnum] = ibeam
pulsesintegrated[beamnum] = len(cur_beam_loc)
pwr[beamnum,] = acf_rec[beamnum,0,:,0]
beamnum+=1
# pack the files with data from each record
ofile.openFile()
ofile.createDynamicArray(ofile.h5Paths['Data_Power'][0]+'/Data',pwr)
ofile.createDynamicArray(ofile.h5Paths['Data_Acf'][0]+'/Data',acf_rec)
ofile.createDynamicArray(ofile.h5Paths['Data'][0]+'/PulsesIntegrated', pulsesintegrated)
ofile.createDynamicArray(ofile.h5Paths['Data'][0]+'/Beamcodes',beams_rec)
# pack the stuff that only is needed once
if irec ==0:
ofile.createDynamicArray(ofile.h5Paths['Data_Acf'][0]+'/Range',rng[0,:])
ofile.createStaticArray(ofile.h5Paths['Data_Acf'][0]+'/Lags', lags[np.newaxis])
ofile.createDynamicArray(ofile.h5Paths['Data_Power'][0]+'/Range',rng[0,:][np.newaxis])
ofile.createStaticArray(ofile.h5Paths['Data'][0]+'/TxBaud',txbaud)
ofile.createStaticArray(ofile.h5Paths['Data'][0]+'/Ambiguity',ambfunc)
ofile.createStaticArray(ofile.h5Paths['Data'][0]+'/Pulsewidth',pwidth)
# go through original file and get everything
for g_key in fullfiledict:
cur_group = fullfiledict[g_key]
for n_key in cur_group:
#
if arec_bey and (type(cur_group[n_key])==np.ndarray):
#kluge
try:
if cur_group[n_key].shape[0]==nrec_orig:
cur_group[n_key][:-1] = cur_group[n_key][1:]
cur_group[n_key][-1] = fullfiledict2[g_key][n_key][1]
except:
pass
#check if last file
elif lastfile:
try:
if cur_group[n_key].shape[0]==file_recs[-1]:
cur_group[n_key] = cur_group[n_key][:mrecs_end]
except:
pass
ofile.createStaticArray(ofile.h5Paths[g_key][0]+'/'+n_key,cur_group[n_key])
# close the file
ofile.closeFile()
irec+=1
print('\tData for Pattern '+str(x)+' has Finished')
|
|
# orm/dependency.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from .. import sql, util, exc as sa_exc
from . import attributes, exc, sync, unitofwork, \
util as mapperutil
from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" %
self.prop)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow,
self.mapper.primary_base_mapper
)
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
parent_base_mapper = self.parent.primary_base_mapper
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [
(child_saves, False), (child_deletes, True)
]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. if not,
# we should be able to skip it entirely.
sum_ = state.manager[self.key].impl.get_all_pending(
state, state.dict)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
uow,
state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
uow,
state,
parent_base_mapper)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(
uow, child_state,
child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
uow, child_state,
child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
isdelete, childisdelete)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
s,
self.key,
passive)
if history and not history.empty():
return True
else:
return states and \
not self.prop._is_self_referential and \
self.mapper in uowcommit.mappers
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop, ))
elif state is not None and \
not self.mapper._canload(state,
allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type. If %(x)s is a subclass of '
'%(z)s, configure mapper "%(zm)s" to '
'load this subtype polymorphically, or '
'set enable_typechecks=False to allow '
'any subtype to be accepted for flush. '
% {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
'zm': self.mapper,
})
else:
raise exc.FlushError(
'Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type.' % {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
})
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(sorted(
[self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
('reverse_key', process_key),
set
)
def _post_update(self, state, uowcommit, related):
for x in related:
if x is not None:
uowcommit.issue_post_update(
state,
[r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, child_post_updates),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
])
else:
if childisdelete:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action)
])
else:
uow.dependencies.update([
(before_delete, child_action),
(child_action, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(child,
operation="delete", prop=self.prop)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
state,
self.key,
passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child, cancel_delete=True,
operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False,
operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
uowcommit.register_object(
st_,
isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == 'all':
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).\
difference(children_added):
if child is not None:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child,
uowcommit,
[state])
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
#if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None,
False, uowcommit, True)
def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
dest = child
self._verify_canload(child)
if dest is None or \
(not self.post_update and uowcommit.is_deleted(dest)):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes)
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
(after_save, parent_post_updates),
(parent_post_updates, child_action)
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates)
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action)
])
elif not isdelete:
if not childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, save_parent),
])
else:
uow.dependencies.update([
(after_save, save_parent),
])
else:
if childisdelete:
uow.dependencies.update([
(delete_parent, child_action)
])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(
st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if self.post_update and \
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
self._post_update(state, uowcommit, history.sum())
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation=None):
if state is None or \
(not self.post_update and uowcommit.is_deleted(state)):
return
if operation is not None and \
child is not None and \
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (prop.passive_updates for \
prop in self.prop._reverse_property):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
(parent_saves, after_save)
])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
('pk_switchers', self),
lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(state, dict_,
passive=self._passive_update_flag)
if related is not attributes.PASSIVE_NO_RESULT and \
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
False,
self.passive_updates)
sync.populate(
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(uowcommit,
state,
self.mapper,
self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
):
uow.dependencies.update([
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
])
else:
uow.dependencies.update([
(before_delete, child_action),
(before_delete, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and \
self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key,
passive)
if history:
for child in history.added:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs)
sync.update(child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline options obtained from command line parsing."""
import argparse
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.transforms.display import HasDisplayData
__all__ = [
'PipelineOptions',
'StandardOptions',
'TypeOptions',
'DirectOptions',
'GoogleCloudOptions',
'WorkerOptions',
'DebugOptions',
'ProfilingOptions',
'SetupOptions',
'TestOptions',
]
def _static_value_provider_of(value_type):
""""Helper function to plug a ValueProvider into argparse.
Args:
value_type: the type of the value. Since the type param of argparse's
add_argument will always be ValueProvider, we need to
preserve the type of the actual value.
Returns:
A partially constructed StaticValueProvider in the form of a function.
"""
def _f(value):
_f.__name__ = value_type.__name__
return StaticValueProvider(value_type, value)
return _f
class _BeamArgumentParser(argparse.ArgumentParser):
"""An ArgumentParser that supports ValueProvider options.
Example Usage::
class TemplateUserOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument('--vp-arg1', default='start')
parser.add_value_provider_argument('--vp-arg2')
parser.add_argument('--non-vp-arg')
"""
def add_value_provider_argument(self, *args, **kwargs):
"""ValueProvider arguments can be either of type keyword or positional.
At runtime, even positional arguments will need to be supplied in the
key/value form.
"""
# Extract the option name from positional argument ['pos_arg']
assert args != () and len(args[0]) >= 1
if args[0][0] != '-':
option_name = args[0]
if kwargs.get('nargs') is None: # make them optionally templated
kwargs['nargs'] = '?'
else:
# or keyword arguments like [--kw_arg, -k, -w] or [--kw-arg]
option_name = [i.replace('--', '') for i in args if i[:2] == '--'][0]
# reassign the type to make room for using
# StaticValueProvider as the type for add_argument
value_type = kwargs.get('type') or str
kwargs['type'] = _static_value_provider_of(value_type)
# reassign default to default_value to make room for using
# RuntimeValueProvider as the default for add_argument
default_value = kwargs.get('default')
kwargs['default'] = RuntimeValueProvider(
option_name=option_name,
value_type=value_type,
default_value=default_value
)
# have add_argument do most of the work
self.add_argument(*args, **kwargs)
class PipelineOptions(HasDisplayData):
"""Pipeline options class used as container for command line options.
The class is essentially a wrapper over the standard argparse Python module
(see https://docs.python.org/3/library/argparse.html). To define one option
or a group of options you subclass from PipelineOptions::
class XyzOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--abc', default='start')
parser.add_argument('--xyz', default='end')
The arguments for the add_argument() method are exactly the ones
described in the argparse public documentation.
Pipeline objects require an options object during initialization.
This is obtained simply by initializing an options class as defined above::
p = Pipeline(options=XyzOptions())
if p.options.xyz == 'end':
raise ValueError('Option xyz has an invalid value.')
By default the options classes will use command line arguments to initialize
the options.
"""
def __init__(self, flags=None, **kwargs):
"""Initialize an options class.
The initializer will traverse all subclasses, add all their argparse
arguments and then parse the command line specified by flags or by default
the one obtained from sys.argv.
The subclasses are not expected to require a redefinition of __init__.
Args:
flags: An iterable of command line arguments to be used. If not specified
then sys.argv will be used as input for parsing arguments.
**kwargs: Add overrides for arguments passed in flags.
"""
self._flags = flags
self._all_options = kwargs
parser = _BeamArgumentParser()
for cls in type(self).mro():
if cls == PipelineOptions:
break
elif '_add_argparse_args' in cls.__dict__:
cls._add_argparse_args(parser)
# The _visible_options attribute will contain only those options from the
# flags (i.e., command line) that can be recognized. The _all_options
# field contains additional overrides.
self._visible_options, _ = parser.parse_known_args(flags)
@classmethod
def _add_argparse_args(cls, parser):
# Override this in subclasses to provide options.
pass
@classmethod
def from_dictionary(cls, options):
"""Returns a PipelineOptions from a dictionary of arguments.
Args:
options: Dictionary of argument value pairs.
Returns:
A PipelineOptions object representing the given arguments.
"""
flags = []
for k, v in options.iteritems():
if isinstance(v, bool):
if v:
flags.append('--%s' % k)
else:
flags.append('--%s=%s' % (k, v))
return cls(flags)
def get_all_options(self, drop_default=False):
"""Returns a dictionary of all defined arguments.
Returns a dictionary of all defined arguments (arguments that are defined in
any subclass of PipelineOptions) into a dictionary.
Args:
drop_default: If set to true, options that are equal to their default
values, are not returned as part of the result dictionary.
Returns:
Dictionary of all args and values.
"""
# TODO(BEAM-1319): PipelineOption sub-classes in the main session might be
# repeated. Pick last unique instance of each subclass to avoid conflicts.
subset = {}
parser = _BeamArgumentParser()
for cls in PipelineOptions.__subclasses__():
subset[str(cls)] = cls
for cls in subset.values():
cls._add_argparse_args(parser) # pylint: disable=protected-access
known_args, _ = parser.parse_known_args(self._flags)
result = vars(known_args)
# Apply the overrides if any
for k in result.keys():
if k in self._all_options:
result[k] = self._all_options[k]
if (drop_default and
parser.get_default(k) == result[k] and
not isinstance(parser.get_default(k), ValueProvider)):
del result[k]
return result
def display_data(self):
return self.get_all_options(True)
def view_as(self, cls):
view = cls(self._flags)
view._all_options = self._all_options
return view
def _visible_option_list(self):
return sorted(option
for option in dir(self._visible_options) if option[0] != '_')
def __dir__(self):
return sorted(dir(type(self)) + self.__dict__.keys() +
self._visible_option_list())
def __getattr__(self, name):
# Special methods which may be accessed before the object is
# fully constructed (e.g. in unpickling).
if name[:2] == name[-2:] == '__':
return object.__getattribute__(self, name)
elif name in self._visible_option_list():
return self._all_options.get(name, getattr(self._visible_options, name))
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __setattr__(self, name, value):
if name in ('_flags', '_all_options', '_visible_options'):
super(PipelineOptions, self).__setattr__(name, value)
elif name in self._visible_option_list():
self._all_options[name] = value
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __str__(self):
return '%s(%s)' % (type(self).__name__,
', '.join('%s=%s' % (option, getattr(self, option))
for option in self._visible_option_list()))
class StandardOptions(PipelineOptions):
DEFAULT_RUNNER = 'DirectRunner'
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
'--runner',
help=('Pipeline runner used to execute the workflow. Valid values are '
'DirectRunner, DataflowRunner.'))
# Whether to enable streaming mode.
parser.add_argument('--streaming',
default=False,
action='store_true',
help='Whether to enable streaming mode.')
class TypeOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
# TODO(laolu): Add a type inferencing option here once implemented.
parser.add_argument('--type_check_strictness',
default='DEFAULT_TO_ANY',
choices=['ALL_REQUIRED', 'DEFAULT_TO_ANY'],
help='The level of exhaustive manual type-hint '
'annotation required')
parser.add_argument('--no_pipeline_type_check',
dest='pipeline_type_check',
action='store_false',
help='Disable type checking at pipeline construction '
'time')
parser.add_argument('--runtime_type_check',
default=False,
action='store_true',
help='Enable type checking at pipeline execution '
'time. NOTE: only supported with the '
'DirectRunner')
class DirectOptions(PipelineOptions):
"""DirectRunner-specific execution options."""
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
'--no_direct_runner_use_stacked_bundle',
action='store_false',
dest='direct_runner_use_stacked_bundle',
help='DirectRunner uses stacked WindowedValues within a Bundle for '
'memory optimization. Set --no_direct_runner_use_stacked_bundle to '
'avoid it.')
parser.add_argument(
'--direct_runner_bundle_retry',
action='store_true',
default=False,
help=
('Whether to allow bundle retries. If True the maximum'
'number of attempts to process a bundle is 4. '))
class GoogleCloudOptions(PipelineOptions):
"""Google Cloud Dataflow service execution options."""
BIGQUERY_API_SERVICE = 'bigquery.googleapis.com'
COMPUTE_API_SERVICE = 'compute.googleapis.com'
STORAGE_API_SERVICE = 'storage.googleapis.com'
DATAFLOW_ENDPOINT = 'https://dataflow.googleapis.com'
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
'--dataflow_endpoint',
default=cls.DATAFLOW_ENDPOINT,
help=
('The URL for the Dataflow API. If not set, the default public URL '
'will be used.'))
# Remote execution must check that this option is not None.
parser.add_argument('--project',
default=None,
help='Name of the Cloud project owning the Dataflow '
'job.')
# Remote execution must check that this option is not None.
parser.add_argument('--job_name',
default=None,
help='Name of the Cloud Dataflow job.')
# Remote execution must check that this option is not None.
parser.add_argument('--staging_location',
default=None,
help='GCS path for staging code packages needed by '
'workers.')
# Remote execution must check that this option is not None.
# If staging_location is not set, it defaults to temp_location.
parser.add_argument('--temp_location',
default=None,
help='GCS path for saving temporary workflow jobs.')
# The Cloud Dataflow service does not yet honor this setting. However, once
# service support is added then users of this SDK will be able to control
# the region. Default is up to the Dataflow service. See
# https://cloud.google.com/compute/docs/regions-zones/regions-zones for a
# list of valid options/
parser.add_argument('--region',
default='us-central1',
help='The Google Compute Engine region for creating '
'Dataflow job.')
parser.add_argument('--service_account_email',
default=None,
help='Identity to run virtual machines as.')
parser.add_argument('--no_auth', dest='no_auth', type=bool, default=False)
# Option to run templated pipelines
parser.add_argument('--template_location',
default=None,
help='Save job to specified local or GCS location.')
parser.add_argument(
'--label', '--labels',
dest='labels',
action='append',
default=None,
help='Labels that will be applied to this Dataflow job. Labels are key '
'value pairs separated by = (e.g. --label key=value).')
def validate(self, validator):
errors = []
if validator.is_service_runner():
errors.extend(validator.validate_cloud_options(self))
errors.extend(validator.validate_gcs_path(self, 'temp_location'))
if getattr(self, 'staging_location',
None) or getattr(self, 'temp_location', None) is None:
errors.extend(validator.validate_gcs_path(self, 'staging_location'))
if self.view_as(DebugOptions).dataflow_job_file:
if self.view_as(GoogleCloudOptions).template_location:
errors.append('--dataflow_job_file and --template_location '
'are mutually exclusive.')
return errors
# Command line options controlling the worker pool configuration.
# TODO(silviuc): Update description when autoscaling options are in.
class WorkerOptions(PipelineOptions):
"""Worker pool configuration options."""
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
'--num_workers',
type=int,
default=None,
help=
('Number of workers to use when executing the Dataflow job. If not '
'set, the Dataflow service will use a reasonable default.'))
parser.add_argument(
'--max_num_workers',
type=int,
default=None,
help=
('Maximum number of workers to use when executing the Dataflow job.'))
parser.add_argument(
'--autoscaling_algorithm',
type=str,
choices=['NONE', 'THROUGHPUT_BASED'],
default=None, # Meaning unset, distinct from 'NONE' meaning don't scale
help=
('If and how to autoscale the workerpool.'))
parser.add_argument(
'--worker_machine_type',
dest='machine_type',
default=None,
help=('Machine type to create Dataflow worker VMs as. See '
'https://cloud.google.com/compute/docs/machine-types '
'for a list of valid options. If not set, '
'the Dataflow service will choose a reasonable '
'default.'))
parser.add_argument(
'--disk_size_gb',
type=int,
default=None,
help=
('Remote worker disk size, in gigabytes, or 0 to use the default size. '
'If not set, the Dataflow service will use a reasonable default.'))
parser.add_argument(
'--worker_disk_type',
dest='disk_type',
default=None,
help=('Specifies what type of persistent disk should be used.'))
parser.add_argument(
'--zone',
default=None,
help=(
'GCE availability zone for launching workers. Default is up to the '
'Dataflow service.'))
parser.add_argument(
'--network',
default=None,
help=(
'GCE network for launching workers. Default is up to the Dataflow '
'service.'))
parser.add_argument(
'--subnetwork',
default=None,
help=(
'GCE subnetwork for launching workers. Default is up to the '
'Dataflow service. Expected format is '
'regions/REGION/subnetworks/SUBNETWORK or the fully qualified '
'subnetwork name. For more information, see '
'https://cloud.google.com/compute/docs/vpc/'))
parser.add_argument(
'--worker_harness_container_image',
default=None,
help=('Docker registry location of container image to use for the '
'worker harness. Default is the container for the version of the '
'SDK. Note: currently, only approved Google Cloud Dataflow '
'container images may be used here.'))
parser.add_argument(
'--use_public_ips',
default=None,
action='store_true',
help='Whether to assign public IP addresses to the worker VMs.')
parser.add_argument(
'--no_use_public_ips',
dest='use_public_ips',
default=None,
action='store_false',
help='Whether to assign only private IP addresses to the worker VMs.')
def validate(self, validator):
errors = []
if validator.is_service_runner():
errors.extend(
validator.validate_optional_argument_positive(self, 'num_workers'))
return errors
class DebugOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--dataflow_job_file',
default=None,
help='Debug file to write the workflow specification.')
parser.add_argument(
'--experiment', '--experiments',
dest='experiments',
action='append',
default=None,
help=
('Runners may provide a number of experimental features that can be '
'enabled with this flag. Please sync with the owners of the runner '
'before enabling any experiments.'))
class ProfilingOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--profile_cpu',
action='store_true',
help='Enable work item CPU profiling.')
parser.add_argument('--profile_memory',
action='store_true',
help='Enable work item heap profiling.')
parser.add_argument('--profile_location',
default=None,
help='GCS path for saving profiler data.')
class SetupOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
# Options for installing dependencies in the worker.
parser.add_argument(
'--requirements_file',
default=None,
help=
('Path to a requirements file containing package dependencies. '
'Typically it is produced by a pip freeze command. More details: '
'https://pip.pypa.io/en/latest/reference/pip_freeze.html. '
'If used, all the packages specified will be downloaded, '
'cached (use --requirements_cache to change default location), '
'and then staged so that they can be automatically installed in '
'workers during startup. The cache is refreshed as needed '
'avoiding extra downloads for existing packages. Typically the '
'file is named requirements.txt.'))
parser.add_argument(
'--requirements_cache',
default=None,
help=
('Path to a folder to cache the packages specified in '
'the requirements file using the --requirements_file option.'))
parser.add_argument(
'--setup_file',
default=None,
help=
('Path to a setup Python file containing package dependencies. If '
'specified, the file\'s containing folder is assumed to have the '
'structure required for a setuptools setup package. The file must be '
'named setup.py. More details: '
'https://pythonhosted.org/an_example_pypi_project/setuptools.html '
'During job submission a source distribution will be built and the '
'worker will install the resulting package before running any custom '
'code.'))
parser.add_argument(
'--beam_plugin', '--beam_plugin',
dest='beam_plugins',
action='append',
default=None,
help=
('Bootstrap the python process before executing any code by importing '
'all the plugins used in the pipeline. Please pass a comma separated'
'list of import paths to be included. This is currently an '
'experimental flag and provides no stability. Multiple '
'--beam_plugin options can be specified if more than one plugin '
'is needed.'))
parser.add_argument(
'--save_main_session',
default=False,
action='store_true',
help=
('Save the main session state so that pickled functions and classes '
'defined in __main__ (e.g. interactive session) can be unpickled. '
'Some workflows do not need the session state if for instance all '
'their functions/classes are defined in proper modules (not __main__)'
' and the modules are importable in the worker. '))
parser.add_argument(
'--sdk_location',
default='default',
help=
('Override the default location from where the Beam SDK is downloaded. '
'It can be a URL, a GCS path, or a local path to an SDK tarball. '
'Workflow submissions will download or copy an SDK tarball from here. '
'If set to the string "default", a standard SDK location is used. If '
'empty, no SDK is copied.'))
parser.add_argument(
'--extra_package', '--extra_packages',
dest='extra_packages',
action='append',
default=None,
help=
('Local path to a Python package file. The file is expected to be (1) '
'a package tarball (".tar") or (2) a compressed package tarball '
'(".tar.gz") which can be installed using the "pip install" command '
'of the standard pip package. Multiple --extra_package options can '
'be specified if more than one package is needed. During job '
'submission, the files will be staged in the staging area '
'(--staging_location option) and the workers will install them in '
'same order they were specified on the command line.'))
class TestOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
# Options for e2e test pipeline.
parser.add_argument(
'--on_success_matcher',
default=None,
help=('Verify state/output of e2e test pipeline. This is pickled '
'version of the matcher which should extends '
'hamcrest.core.base_matcher.BaseMatcher.'))
parser.add_argument(
'--dry_run',
default=False,
help=('Used in unit testing runners without submitting the '
'actual job.'))
def validate(self, validator):
errors = []
if self.view_as(TestOptions).on_success_matcher:
errors.extend(validator.validate_test_matcher(self, 'on_success_matcher'))
return errors
# TODO(silviuc): Add --files_to_stage option.
# This could potentially replace the --requirements_file and --setup_file.
# TODO(silviuc): Non-standard options. Keep them? If yes, add help too!
# Remote execution must check that this option is not None.
class OptionsContext(object):
"""Set default pipeline options for pipelines created in this block.
This is particularly useful for pipelines implicitly created with the
[python list] | PTransform
construct.
Can also be used as a decorator.
"""
overrides = []
def __init__(self, **options):
self.options = options
def __enter__(self):
self.overrides.append(self.options)
def __exit__(self, *exn_info):
self.overrides.pop()
def __call__(self, f, *args, **kwargs):
def wrapper(*args, **kwargs):
with self:
f(*args, **kwargs)
return wrapper
@classmethod
def augment_options(cls, options):
for override in cls.overrides:
for name, value in override.items():
setattr(options, name, value)
return options
|
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update personality command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from broker.grntest import VerifyGrnsMixin
class TestUpdatePersonality(VerifyGrnsMixin, TestBrokerCommand):
def test_200_invalid_function(self):
""" Verify that the list of built-in functions is restricted """
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "locals()"]
out = self.badrequesttest(command)
self.matchoutput(out, "name 'locals' is not defined", command)
def test_200_invalid_type(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "memory - 100"]
out = self.badrequesttest(command)
self.matchoutput(out, "The function should return a dictonary.", command)
def test_200_invalid_dict(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'memory': 'bar'}"]
out = self.badrequesttest(command)
self.matchoutput(out,
"The function should return a dictionary with all "
"keys being strings, and all values being numbers.",
command)
def test_200_missing_memory(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'foo': 5}"]
out = self.badrequesttest(command)
self.matchoutput(out,
"The memory constraint is missing from the returned "
"dictionary.", command)
def test_200_not_enough_memory(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'memory': memory / 4}"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Validation failed for the following clusters:",
command)
self.matchoutput(out,
"ESX Cluster utecl1 is over capacity regarding memory",
command)
def test_100_update_capacity(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'memory': (memory - 1500) * 0.94}"]
self.noouttest(command)
def test_110_update_overcommit(self):
command = ["update_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster",
"--vmhost_overcommit_memory", 1.04]
self.noouttest(command)
def test_115_verify_update_capacity(self):
command = ["show_personality", "--personality", "vulcan-1g-desktop-prod",
"--archetype", "esx_cluster"]
out = self.commandtest(command)
self.matchoutput(out,
"VM host capacity function: {'memory': (memory - 1500) * 0.94}",
command)
self.matchoutput(out, "VM host overcommit factor: 1.04", command)
def test_200_update_cluster_inuse(self):
command = ["update_personality", "--personality=vulcan-1g-desktop-prod",
"--archetype=esx_cluster",
"--cluster"]
out = self.badrequesttest(command)
self.matchoutput(out, "The personality vulcan-1g-desktop-prod is in use", command)
def test_120_update_cluster_requirement(self):
command = ["add_personality", "--archetype=aquilon", "--grn=grn:/ms/ei/aquilon/aqd",
"--personality=unused", "--host_environment=infra"]
self.successtest(command)
command = ["update_personality", "--personality", "unused",
"--archetype=aquilon", "--cluster"]
out = self.successtest(command)
command = ["del_personality", "--personality", "unused",
"--archetype=aquilon"]
out = self.successtest(command)
def test_130_add_testovrpersona_dev(self):
command = ["add_personality", "--archetype=aquilon", "--grn=grn:/ms/ei/aquilon/aqd",
"--personality=testovrpersona/dev", "--host_environment=dev"]
self.successtest(command)
command = ["show_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon"]
out = self.commandtest(command)
self.matchclean(out, "override", command)
command = ["cat", "--archetype=aquilon", "--personality=testovrpersona/dev"]
out = self.commandtest(command)
self.matchclean(out, 'override', command)
def test_131_update_config_override(self):
command = ["update_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon", "--config_override"]
self.successtest(command)
command = ["show_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon"]
out = self.commandtest(command)
self.matchoutput(out, "Config override: enabled", command)
command = ["cat", "--archetype=aquilon", "--personality=testovrpersona/dev"]
out = self.commandtest(command)
self.matchoutput(out, 'include { "features/personality/config_override/config" }',
command)
def test_132_remove_config_override(self):
command = ["update_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon", "--noconfig_override"]
self.successtest(command)
command = ["show_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon"]
out = self.commandtest(command)
self.matchclean(out, "override", command)
command = ["cat", "--archetype=aquilon", "--personality=testovrpersona/dev"]
self.matchclean(out, 'override', command)
def test_133_update_hostenv_testovrpersona(self):
command = ["update_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon", "--host_environment=dev"]
out = self.badrequesttest(command)
self.matchoutput(out, "The personality 'testovrpersona/dev' already has env set to 'dev' and cannot be update",
command)
def test_139_delete_testovrpersona_dev(self):
command = ["del_personality", "--personality=testovrpersona/dev",
"--archetype=aquilon"]
out = self.successtest(command)
def test_140_update_owner_grn(self):
command = ["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--grn", "grn:/ms/ei/aquilon/ut2"]
# Some hosts may emit warnings if 'aq make' was not run on them
self.successtest(command)
def test_141_verify_owner_grn(self):
command = ["show_personality", "--personality", "compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
command = ["show_host", "--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
# unittest02 had a different GRN before, so it should not have been
# updated
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_142_update_owner_grn_nohosts(self):
command = ["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--grn", "grn:/ms/ei/aquilon/unittest",
"--leave_existing"]
self.noouttest(command)
def test_143_verify_update(self):
command = ["show_personality", "--personality", "compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest", command)
command = ["show_host", "--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_144_verify_cat(self):
command = ["cat", "--personality", "compileserver"]
out = self.commandtest(command)
self.searchoutput(out, r'"/system/personality/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/unittest"], command)
command = ["cat", "--hostname", "unittest02.one-nyp.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/aqd"], command)
command = ["cat", "--hostname", "unittest20.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/ut2"], command)
def test_150_update_hostenv_infra(self):
command = ["add_personality", "--archetype=windows", "--grn=grn:/ms/ei/aquilon/aqd",
"--personality=prod-perim", "--host_environment=legacy"]
self.successtest(command)
command = ["update_personality", "--personality=prod-perim",
"--archetype=windows", "--host_environment=infra"]
out = self.successtest(command)
command = ["show_personality", "--personality=prod-perim",
"--archetype=windows"]
out = self.commandtest(command)
self.matchoutput(out, "Environment: infra", command)
command = ["del_personality", "--archetype=windows", "--personality=prod-perim"]
self.successtest(command)
def test_155_update_hostenv_prod(self):
command = ["update_personality", "--personality=desktop",
"--archetype=windows", "--host_environment=prod"]
out = self.successtest(command)
command = ["show_personality", "--personality=desktop",
"--archetype=windows"]
out = self.commandtest(command)
self.matchoutput(out, "Environment: prod", command)
command = ["cat", "--personality=desktop"]
out = self.commandtest(command)
self.searchoutput(out, r'"/system/personality/host_environment" = "prod";', command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdatePersonality)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
"""
SQLite3 backend for the sqlite3 module in the standard library.
"""
import decimal
import math
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
Database.register_converter("bool", lambda s: s == b'1')
Database.register_converter("time", decoder(parse_time))
Database.register_converter("date", decoder(parse_date))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_converter("decimal", decoder(decimal.Decimal))
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
if self.in_atomic_block:
# sqlite3 cannot disable constraint checking inside a transaction.
return False
self.cursor().execute('PRAGMA foreign_keys = OFF')
return True
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, int):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
|
|
# coding=utf-8
"""Helper utilities module to compute various statistics for the current AOI.
:copyright: (c) 2013 by Tim Sutton
:license: GPLv3, see LICENSE for more details.
"""
import os
import sys
import getpass
from tempfile import mkstemp
import xml
import time
from datetime import date, timedelta
import zipfile
from reporter import config
from reporter.osm_node_parser import OsmNodeParser
from reporter.osm_way_parser import OsmParser
from reporter.queries import RESOURCES_MAP
from reporter import LOGGER
def overpass_resource_base_path(feature_type):
"""Get the overpass resource base path according to the feature we extract.
:param feature_type: The type of feature :
buildings, building-points, roads, potential-idp, boundary-[1,11]
:type feature_type: str
:return The resource folder.
:rtype str
"""
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'resources',
'overpass',
RESOURCES_MAP[feature_type],
RESOURCES_MAP[feature_type]))
def shapefile_resource_base_path(feature_type):
"""Get the shapefile resource base path according to the feature we extract.
:param feature_type: The type of feature :
buildings, building-points, roads, potential-idp, boundary-[1,11]
:type feature_type: str
:return The resource folder.
:rtype str
"""
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'resources',
'shapefile',
RESOURCES_MAP[feature_type],
RESOURCES_MAP[feature_type]))
def generic_shapefile_base_path():
"""Get the generic shapefile resource base path.
:return The generic resource folder.
:rtype str
"""
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'resources',
'shapefile',
'generic',
'generic'))
def get_totals(sorted_user_list):
"""Given a sorted user list, get the totals for ways and nodes.
:param sorted_user_list: User dicts sorted by number of ways.
:type sorted_user_list: list
:returns: Two-tuple (int, int) containing way count, node count.
:rtype: (int, int)
"""
way_count = 0
node_count = 0
for user in sorted_user_list:
way_count += user['ways']
node_count += user['nodes']
return node_count, way_count
def split_bbox(bbox):
"""Split a bounding box into its parts.
:param bbox: String describing a bbox e.g. '106.78674459457397,
-6.141301491467023,106.80691480636597,-6.133834354201348'
:type bbox: str
:returns: A dict with keys: 'southwest_lng, southwest_lat, northeast_lng,
northeast_lat'
:rtype: dict
"""
values = bbox.split(',')
if not len(values) == 4:
raise ValueError('Invalid bbox')
# pylint: disable=W0141
# next line should probably use list comprehension rather
# http://pylint-messages.wikidot.com/messages:w0141
values = map(float, values)
# pylint: enable=W0141
names = ['SW_lng', 'SW_lat', 'NE_lng', 'NE_lat']
coordinates = dict(zip(names, values))
return coordinates
def split_polygon(polygon):
"""Split polygon array to string.
:param polygon: list of array describing polygon area e.g.
'[[28.01513671875,-25.77516058680343],[28.855590820312504,-25.567220388070023],
[29.168701171875004,-26.34265280938059]]
:type polygon: list
:returns: A string of polygon e.g. 50.7 7.1 50.7 7.12 50.71 7.11
:rtype: str
"""
if len(polygon) < 3:
raise ValueError(
'At least 3 lat/lon float value pairs must be provided')
polygon_string = ''
for poly in polygon:
polygon_string += ' '.join(map(str, poly))
polygon_string += ' '
return polygon_string.strip()
def osm_object_contributions(
osm_file,
tag_name,
date_start=None,
date_end=None):
"""Compile a summary of user contributions for the selected osm data type.
:param osm_file: A file object reading from a .osm file.
:type osm_file: file, FileIO
:param tag_name: The tag name we want to filter on.
:type tag_name: str
:param date_start: The start date we want to filter
:type date_start: float
:param date_end: The end date we want to filter
:type date_end: float
:returns: A list of dicts where items in the list are sorted from highest
contributor (based on number of ways) down to lowest. Each element
in the list is a dict in the form: {
'user': <user>,
'ways': <way count>,
'nodes': <node count>,
'timeline': <timelinedict>,
'best': <most ways in a single day>,
'worst': <least ways in single day>,
'average': <average ways across active days>,
'crew': <bool> }
where crew is used to designate users who are part of an active
data gathering campaign.
The timeline dict will contain a collection of dates and
the total number of ways created on that date e.g.
{
u'2010-12-09': 10,
u'2012-07-10': 14
}
:rtype: list
"""
parser = OsmParser(
start_date=date_start,
end_date=date_end)
try:
xml.sax.parse(osm_file, parser)
except xml.sax.SAXParseException:
LOGGER.exception('Failed to parse OSM xml.')
raise
way_count_dict = parser.wayCountDict
node_count_dict = parser.nodeCountDict
timelines = parser.userDayCountDict
# Convert to a list of dicts so we can sort it.
crew_list = config.CREW
user_list = []
for key, value in way_count_dict.items():
start_date, end_date = date_range(timelines[key])
start_date = time.strftime('%d-%m-%Y', start_date.timetuple())
end_date = time.strftime('%d-%m-%Y', end_date.timetuple())
user_timeline = timelines[key]
node_count = 0
if key in node_count_dict:
node_count = node_count_dict[key]
record = {
'name': key,
'ways': value,
'nodes': node_count,
'timeline': interpolated_timeline(user_timeline),
'start': start_date,
'end': end_date,
'activeDays': len(user_timeline),
'best': best_active_day(user_timeline),
'worst': worst_active_day(user_timeline),
'average': average_for_active_days(user_timeline)
}
user_list.append(record)
for key, value in node_count_dict.items():
start_date, end_date = date_range(timelines[key])
start_date = time.strftime('%d-%m-%Y', start_date.timetuple())
end_date = time.strftime('%d-%m-%Y', end_date.timetuple())
user_timeline = timelines[key]
record = {
'name': key,
'ways': 0,
'nodes': value,
'timeline': interpolated_timeline(user_timeline),
'start': start_date,
'end': end_date,
'activeDays': len(user_timeline),
'best': best_active_day(user_timeline),
'worst': worst_active_day(user_timeline),
'average': average_for_active_days(user_timeline)
}
user_list.append(record)
# Sort it
sorted_user_list = sorted(
user_list, key=lambda d: (
-d['ways'],
d['nodes'],
d['name'],
d['timeline'],
d['start'],
d['end'],
d['activeDays'],
d['best'],
d['worst'],
d['average']))
return sorted_user_list
def date_range(timeline):
"""Given a timeline, determine the start and end dates.
The timeline may be sparse (containing fewer entries than all the dates
between the min and max dates) and since it is a dict,
the dates may be in any order.
:param timeline: A dictionary of non-sequential dates (in YYYY-MM-DD) as
keys and values (representing ways collected on that day).
:type timeline: dict
:returns: A tuple containing two dates:
* start_date - a date object representing the earliest date in the
time line.
* end_date - a date object representing the newest date in the time
line.
:rtype: (date, date)
"""
start_date = None
end_date = None
for next_date in timeline.keys():
year, month, day = next_date.split('-')
message = 'Date: %s' % next_date
LOGGER.info(message)
timeline_date = date(int(year), int(month), int(day))
if start_date is None:
start_date = timeline_date
if end_date is None:
end_date = timeline_date
if timeline_date < start_date:
start_date = timeline_date
if timeline_date > end_date:
end_date = timeline_date
return start_date, end_date
def average_for_active_days(timeline):
"""Compute the average activity per active day in a sparse timeline.
:param timeline: A dictionary of non-sequential dates (in YYYY-MM-DD) as
keys and values (representing ways collected on that day).
:type timeline: dict
:returns: Number of entities captured per day rounded to the nearest int.
:rtype: int
"""
count = 0
total = 0
for value in list(timeline.values()):
if value > 0:
count += 1
total += value
# Python 3 seems to automagically turn integer maths into float if needed
average = int(total / count)
return average
def best_active_day(timeline):
"""Compute the best activity for a single active day in a sparse timeline.
:param timeline: A dictionary of non-sequential dates (in YYYY-MM-DD) as
keys and values (representing ways collected on that day).
:type timeline: dict
:returns: Number of entities captured for the user's best day.
:rtype: int
"""
best = 0
for value in list(timeline.values()):
if value > best:
best = value
return best
def worst_active_day(timeline):
"""Compute the worst activity for a single active day in a sparse timeline.
:param timeline: A dictionary of non-sequential dates (in YYYY-MM-DD) as
keys and values (representing ways collected on that day).
:type timeline: dict
:returns: Number of entities captured for the user's worst day.
:rtype: int
"""
if len(timeline) < 1:
return 0
worst = list(timeline.values())[0]
for value in list(timeline.values()):
if value == 0: # should never be but just in case
continue
if value < worst:
worst = value
return worst
def interpolated_timeline(timeline):
"""Interpolate a timeline given a sparse timeline.
A sparse timelines is a sequence of dates containing no days of zero
activity. An interpolated timeline is a sequence of dates where there is
an entry per day in the date range regardless of whether there was any
activity or not.
:param timeline: A dictionary of non-sequential dates (in YYYY-MM-DD) as
keys and values (representing ways collected on that day).
:type timeline: dict
:returns: An interpolated list where each date in the original input
date is present, and all days where no total was provided are added
to include that day.
:rtype: list
Given an input looking like this::
{
{u'2012-09-24': 1},
{u'2012-09-21': 10},
{u'2012-09-25': 5},
}
The returned list will be in the form::
[
[Date(2012,09,21), 10],
[Date(2012,09,22), 0],
[Date(2012,09,23), 0],
[Date(2012,09,24), 1],
[Date(2012,09,25), 5],
]
"""
# Work out the earliest and latest day
start_date, end_date = date_range(timeline)
# Loop through them, adding an entry for each day
time_line = '['
for current_date in date_range_iterator(start_date, end_date):
date_string = time.strftime('%Y-%m-%d', current_date.timetuple())
if date_string in timeline:
value = timeline[date_string]
else:
value = 0
if value == 0:
continue
if time_line != '[':
time_line += ','
time_line += '["%s",%i]' % (date_string, value)
time_line += ']'
return time_line
def date_range_iterator(start_date, end_date):
"""Given two dates return a collection of dates between start and end.
:param start_date: Date representing the start date.
:type start_date: date
:param end_date: Date representing the end date.
:type end_date: date
:returns: Iterable collection yielding dates.
:rtype: iterable
"""
for n in range(int((end_date - start_date).days) + 1):
yield start_date + timedelta(n)
def osm_nodes_by_user(file_handle, username):
"""Obtain the nodes collected by a single user from an OSM file.
:param file_handle: File handle to an open OSM XML document.
:type file_handle: file
:param username: Name of the user for whom nodes should be collected.
:type username: str
:returns: A list of nodes for the given user.
:rtype: list
"""
parser = OsmNodeParser(username)
xml.sax.parse(file_handle, parser)
return parser.nodes
def temp_dir(sub_dir='work'):
"""Obtain the temporary working directory for the operating system.
An osm-reporter subdirectory will automatically be created under this.
.. note:: You can use this together with unique_filename to create
a file in a temporary directory under the inasafe workspace. e.g.
tmpdir = temp_dir('testing')
tmpfile = unique_filename(dir=tmpdir)
print tmpfile
/tmp/osm-reporter/23-08-2012/timlinux/testing/tmpMRpF_C
If you specify OSM_REPORTER_WORK_DIR as an environment var, it will be
used in preference to the system temp directory.
.. note:: This function was taken from InaSAFE (http://inasafe.org) with
minor adaptions.
:param sub_dir: Optional argument which will cause an additional
subirectory to be created e.g. ``/tmp/inasafe/foo/``.
:type sub_dir: str
:returns: Path to the output clipped layer (placed in the system temp dir).
:rtype: str
"""
user = getpass.getuser().replace(' ', '_')
current_date = date.today()
date_string = current_date.isoformat()
if 'OSM_REPORTER_WORK_DIR' in os.environ:
new_directory = os.environ['OSM_REPORTER_WORK_DIR']
else:
# Following 4 lines are a workaround for tempfile.tempdir()
# unreliabilty
handle, filename = mkstemp()
os.close(handle)
new_directory = os.path.dirname(filename)
os.remove(filename)
path = os.path.join(
new_directory, 'osm-reporter', date_string, user, sub_dir)
if not os.path.exists(path):
# Ensure that the dir is world writable
# Umask sets the new mask and returns the old
old_mask = os.umask(0000)
try:
os.makedirs(path, 0o0777)
except OSError:
# one of the directories in the path already exists maybe
pass
# Reinstate the old mask for tmp
os.umask(old_mask)
if not os.path.exists(path):
raise Exception('Could not create working directory', path)
return path
def unique_filename(**kwargs):
"""Create new filename guaranteed not to exist previously
:param kwargs: A dictionary of keyword arguments which are passed on to
``mkstemp(**kwargs)``
.. note:: This function was taken from InaSAFE (http://inasafe.org) with
minor adaptions.
Use mkstemp to create the file, then remove it and return the name
If dir is specified, the tempfile will be created in the path specified
otherwise the file will be created in a directory following this scheme:
:file:`/tmp/osm-reporter/<dd-mm-yyyy>/<user>/impacts'
See http://docs.python.org/library/tempfile.html for details.
Example usage::
tempdir = temp_dir(sub_dir='test')
filename = unique_filename(suffix='.keywords', dir=tempdir)
print filename
/tmp/osm-reporter/23-08-2012/timlinux/test/tmpyeO5VR.keywords
Or with no preferred subdir, a default subdir of 'impacts' is used::
filename = unique_filename(suffix='.shp')
print filename
/tmp/osm-reporter/23-08-2012/timlinux/impacts/tmpoOAmOi.shp
"""
if 'dir' not in kwargs:
path = temp_dir('impacts')
kwargs['dir'] = path
else:
path = temp_dir(kwargs['dir'])
kwargs['dir'] = path
if not os.path.exists(kwargs['dir']):
# Ensure that the dir mask won't conflict with the mode
# Umask sets the new mask and returns the old
umask = os.umask(0000)
# Ensure that the dir is world writable by explictly setting mode
os.makedirs(kwargs['dir'], 0o0777)
# Reinstate the old mask for tmp dir
os.umask(umask)
# Now we have the working dir set up go on and return the filename
handle, filename = mkstemp(**kwargs)
# Need to close it using the filehandle first for windows!
os.close(handle)
try:
os.remove(filename)
except OSError:
pass
return filename
def zip_shp(shp_path, extra_ext=None, remove_file=False):
"""Zip shape file and its gang (.shx, .dbf, .prj).
.. note:: This function was taken from InaSAFE (http://inasafe.org) with
minor adaptions.
:param shp_path: Path to the main shape file.
:type shp_path: str
:param extra_ext: List of extra extensions (as strings) related to
shapefile that should be packaged up.
:type extra_ext: list
:param remove_file: bool - whether the original shp files should be
removed after zipping is complete. Defaults to False.
:type remove_file: bool
:returns: Full path to the created shapefile.
:rtype: str
"""
# go to the directory
my_cwd = os.getcwd()
shp_dir, shp_name = os.path.split(shp_path)
os.chdir(shp_dir)
shp_base_name, _ = os.path.splitext(shp_name)
extensions = ['.shp', '.shx', '.dbf', '.prj']
if extra_ext is not None:
extensions.extend(extra_ext)
# zip files
zip_filename = shp_base_name + '.zip'
zip_object = zipfile.ZipFile(zip_filename, 'w')
for ext in extensions:
if os.path.isfile(shp_base_name + ext):
zip_object.write(shp_base_name + ext)
zip_object.close()
if remove_file:
for ext in extensions:
if os.path.isfile(shp_base_name + ext):
os.remove(shp_base_name + ext)
os.chdir(my_cwd)
return os.path.join(shp_dir, zip_filename)
def which(name, flags=os.X_OK):
"""Search PATH for executable files with the given name.
..note:: This function was taken verbatim from the twisted framework,
licence available here:
http://twistedmatrix.com/trac/browser/tags/releases/twisted-8.2.0/LICENSE
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
:type name: C{str}
:param name: The name for which to search.
:type flags: C{int}
:param flags: Arguments to L{os.access}.
:rtype: C{list}
:param: A list of the full paths to files found, in the
order in which they were found.
"""
if os.path.exists('/usr/bin/%s' % name):
return ['/usr/bin/%s' % name]
if os.path.exists('/usr/local/bin/%s' % name):
return ['/usr/local/bin/%s' % name]
result = []
# pylint: disable=W0141
extensions = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
# pylint: enable=W0141
path = os.environ.get('PATH', None)
# In c6c9b26 we removed this hard coding for issue #529 but I am
# adding it back here in case the user's path does not include the
# gdal binary dir on OSX but it is actually there. (TS)
if sys.platform == 'darwin': # Mac OS X
gdal_prefix = (
'/Library/Frameworks/GDAL.framework/'
'Versions/1.10/Programs/')
path = '%s:%s' % (path, gdal_prefix)
message = 'Search path: %s' % path
LOGGER.debug(message)
if path is None:
return []
for p in path.split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in extensions:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
def short_version(version):
"""Get a shorter version, only with the major and minor version.
:param version: The version.
:type version: str
:return 'major.minor' version number.
:rtype float
"""
return float('.'.join(version.split('.')[0:2]))
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION",
lambda policy, request, target: True)
class DeleteGroup(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Security Group",
u"Delete Security Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Security Group",
u"Deleted Security Groups",
count
)
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
def delete(self, request, obj_id):
api.network.security_group_delete(request, obj_id)
class CreateGroup(tables.LinkAction):
name = "create"
verbose_name = _("Create Security Group")
url = "horizon:project:access_and_security:security_groups:create"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
usages = quotas.tenant_quota_usages(request)
if usages['security_groups']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Security Group (Quota exceeded)")
else:
self.verbose_name = _("Create Security Group")
self.classes = [c for c in self.classes if c != "disabled"]
return POLICY_CHECK(policy, request, target={})
class EditGroup(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Security Group")
url = "horizon:project:access_and_security:security_groups:update"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
class ManageRules(policy.PolicyTargetMixin, tables.LinkAction):
name = "manage_rules"
verbose_name = _("Manage Rules")
url = "horizon:project:access_and_security:security_groups:detail"
icon = "pencil"
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "get_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, policy_target)
class SecurityGroupsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
class Meta:
name = "security_groups"
verbose_name = _("Security Groups")
table_actions = (CreateGroup, DeleteGroup)
row_actions = (ManageRules, EditGroup, DeleteGroup)
class CreateRule(tables.LinkAction):
name = "add_rule"
verbose_name = _("Add Rule")
url = "horizon:project:access_and_security:security_groups:add_rule"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['security_group_id']])
class DeleteRule(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Rule",
u"Delete Rules",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Rule",
u"Deleted Rules",
count
)
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def delete(self, request, obj_id):
api.network.security_group_rule_delete(request, obj_id)
def get_success_url(self, request):
sg_id = self.table.kwargs['security_group_id']
return reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[sg_id])
def get_remote(rule):
if 'cidr' in rule.ip_range:
if rule.ip_range['cidr'] is None:
range = '::/0' if rule.ethertype == 'IPv6' else '0.0.0.0/0'
else:
range = rule.ip_range['cidr']
return range + ' (CIDR)'
elif 'name' in rule.group:
return rule.group['name']
else:
return None
def get_port_range(rule):
ip_proto = rule.ip_protocol
if rule.from_port == rule.to_port:
return check_rule_template(rule.from_port, ip_proto)
else:
return (u"%(from)s - %(to)s" %
{'from': check_rule_template(rule.from_port, ip_proto),
'to': check_rule_template(rule.to_port, ip_proto)})
def filter_direction(direction):
if direction is None or direction.lower() == 'ingress':
return _('Ingress')
else:
return _('Egress')
def filter_protocol(protocol):
if protocol is None:
return _('Any')
return unicode.upper(protocol)
def check_rule_template(port, ip_proto):
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
if not rules_dict:
return port
templ_rule = filter(lambda rule: str(port) == rule['from_port']
and str(port) == rule['to_port']
and ip_proto == rule['ip_protocol'],
[rule for rule in rules_dict.values()])
if templ_rule:
return u"%(from_port)s (%(name)s)" % templ_rule[0]
return port
class RulesTable(tables.DataTable):
direction = tables.Column("direction",
verbose_name=_("Direction"),
filters=(filter_direction,))
ethertype = tables.Column("ethertype",
verbose_name=_("Ether Type"))
protocol = tables.Column("ip_protocol",
verbose_name=_("IP Protocol"),
filters=(filter_protocol,))
port_range = tables.Column(get_port_range,
verbose_name=_("Port Range"))
remote = tables.Column(get_remote, verbose_name=_("Remote"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, rule):
return unicode(rule)
class Meta:
name = "rules"
verbose_name = _("Security Group Rules")
table_actions = (CreateRule, DeleteRule)
row_actions = (DeleteRule,)
|
|
from __future__ import unicode_literals
import datetime
import json
from django.contrib.auth.models import User
from django.core.serializers.json import DjangoJSONEncoder
from django.test import Client, testcases
from django.urls import reverse
from django.utils import six, timezone
from alexia.apps.billing.models import (
Authorization, Order, PermanentProduct, PriceGroup, ProductGroup, Purchase,
TemporaryProduct,
)
from alexia.apps.organization.models import (
AuthenticationData, Location, Organization, Profile,
)
from alexia.apps.scheduling.models import Availability, Event
from alexia.auth.backends import RADIUS_BACKEND_NAME
class SimpleTestCase(testcases.SimpleTestCase):
# Use long messages on failure
longMessage = True
# Do not limit diff length on failure
maxDiff = None
def assertJSONEqual(self, raw, expected_data, msg=None):
if not isinstance(expected_data, six.string_types):
# Encode non-string input as JSON to fix a bug timestamps not comparing equal.
expected_data = json.dumps(expected_data, cls=DjangoJSONEncoder)
super(SimpleTestCase, self).assertJSONEqual(raw, expected_data, msg)
def convertAndAssertJSONEqual(self, data, expected_data, msg=None):
"""
Converts the data to JSON and asserts that the JSON fragments equals the expected_data.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
super(SimpleTestCase, self).assertJSONEqual(json.dumps(data, cls=DjangoJSONEncoder), expected_data, msg)
class TransactionTestCase(SimpleTestCase, testcases.TransactionTestCase):
pass
class TestCase(TransactionTestCase, testcases.TestCase):
def setUp(self):
super(TestCase, self).setUp()
self.data = dict()
self.data['datetime1'] = timezone.make_aware(datetime.datetime(2014, 9, 21, 14, 16, 6), timezone.utc)
self.data['datetime2'] = self.data['datetime1'] + datetime.timedelta(hours=1)
self.data['datetime3'] = self.data['datetime1'] + datetime.timedelta(hours=2)
self.data['datetime1_string'] = '2014-09-21T14:16:06+00:00'
self.data['datetime2_string'] = '2014-09-21T15:16:06+00:00'
self.data['datetime3_string'] = '2014-09-21T16:16:06+00:00'
def load_organization_data(self):
data = self.data
# User / Profile
username1 = 'testuser'
username2 = 'testuser2'
data['password1'] = 'testuser13475'
data['password2'] = 'testuser23475'
data['user1'] = User(username=username1, first_name='Test', last_name='Client', email='test@example.com',
is_superuser=True)
data['user1'].set_password(data['password1'])
data['user1'].save()
data['user1'].profile = Profile()
data['user1'].profile.save()
data['authenticationdata1'] = AuthenticationData(backend=RADIUS_BACKEND_NAME, username=username1,
user=data['user1'])
data['authenticationdata1'].save()
data['user2'] = User(username=username2, first_name='Test2', last_name='Client', email='test2@example.com')
data['user2'].set_password(data['password2'])
data['user2'].save()
data['user2'].profile = Profile()
data['user2'].profile.save()
data['authenticationdata2'] = AuthenticationData(backend=RADIUS_BACKEND_NAME, username=username2,
user=data['user2'])
data['authenticationdata2'].save()
# Organization
data['organization1'] = Organization(name='Organization 1')
data['organization1'].save()
data['organization2'] = Organization(name='Organization 2')
data['organization2'].save()
# Location
data['location1'] = Location(name='Location 1', prevent_conflicting_events=True)
data['location1'].save()
data['location2'] = Location(name='Location 2', prevent_conflicting_events=False)
data['location2'].save()
def load_billing_data(self):
data = self.data
data['pricegroup1'] = PriceGroup(organization=data['organization1'], name='Price group 1')
data['pricegroup1'].save()
data['productgroup1'] = ProductGroup(organization=data['organization1'], name='Product group 1')
data['productgroup1'].save()
data['permantentproduct1'] = PermanentProduct(productgroup=data['productgroup1'],
organization=data['organization1'],
position=0)
data['permantentproduct1'].save()
data['authorization1'] = Authorization(user=data['user1'],
organization=data['organization1'],
start_date=data['datetime1'])
data['authorization1'].save()
def load_scheduling_data(self):
data = self.data
data['availability1'] = Availability(organization=data['organization1'],
name='Yes',
nature=Availability.ASSIGNED)
data['availability1'].save()
data['availability2'] = Availability(organization=data['organization1'],
name='Maybe',
nature=Availability.MAYBE)
data['availability2'].save()
data['availability3'] = Availability(organization=data['organization1'],
name='No',
nature=Availability.NO)
data['availability3'].save()
data['event1'] = Event(organizer=data['organization1'],
name='Test event 1',
starts_at=data['datetime1'],
ends_at=data['datetime3'],
pricegroup=data['pricegroup1'],
kegs=1)
data['event1'].save()
data['temporaryproduct1'] = TemporaryProduct(event=data['event1'], price=2.33)
data['temporaryproduct1'].save()
def load_billing_order_data(self):
data = self.data
data['order1'] = Order(event=data['event1'], authorization=data['authorization1'], placed_at=data['datetime2'],
added_by=data['user1'])
data['order1'].save()
Purchase(order=data['order1'], product=data['permantentproduct1'], amount=1, price=0.50).save()
Purchase(order=data['order1'], product=data['temporaryproduct1'], amount=2, price=4.66).save()
data['order1'].save()
data['order2'] = Order(event=data['event1'], authorization=data['authorization1'], placed_at=data['datetime2'],
added_by=data['user1'])
data['order2'].save()
Purchase(order=data['order2'], product=data['permantentproduct1'], amount=3, price=1.50).save()
Purchase(order=data['order2'], product=data['temporaryproduct1'], amount=4, price=9.32).save()
data['order2'].save()
class APITestCase(TestCase):
def setUp(self):
super(APITestCase, self).setUp()
self.load_organization_data()
# Every test needs a client.
self.client = Client()
self.login(username=self.data['user1'].username,
password=self.data['password1'],
organization_slug=self.data['organization1'].slug)
def login(self, username, password, organization_slug=None):
"""
Login the test client.
:param username: Username
:param password: Password
:param organization_slug: Slug of organization to set as current organization.
"""
self.client.login(username=username, password=password)
self.send_and_compare_request('organization.current.set', [organization_slug], True)
def send_request(self, method, params):
"""
Send JSON RPC method call.
:param method: Name of method to call.
:param params: Parameters for JSON RPC call.
:rtype : django.http.response.HttpResponse
"""
path = reverse('api_v1_mountpoint')
req = {
'jsonrpc': '1.0',
'id': 'jsonrpc',
'method': method,
'params': params,
}
req_json = json.dumps(req)
return self.client.post(path, req_json, content_type='text/plain; charset=UTF-8')
def send_and_compare_request(self, method, params, expected_result):
"""
Send JSON RPC method call and compare actual result with expected result.
:param method: Name of method to call.
:param params: Parameters for JSON RPC call.
:param expected_result: Expected result.
"""
response = self.send_request(method, params)
self.assertEqual(response['Content-Type'], 'application/json-rpc')
content = response.content.decode('utf-8')
expected_data = {
'jsonrpc': '1.0',
'id': 'jsonrpc',
'error': None,
'result': expected_result,
}
self.assertJSONEqual(content, expected_data)
def send_and_compare_request_error(self, method, params, error_code, error_name, error_message, error_data=None,
status_code=200):
"""
Send JSON RPC method call and compare actual error result with expected error result.
:param method: Name of method to call.
:param params: Parameters for JSON RPC call.
:param error_code: Expected error code.
:param error_name: Expected error name.
:param error_message: Expected error message.
:param error_data: Expected error data.
:param status_code: Expected HTTP status code.
"""
response = self.send_request(method, params)
if response.status_code != status_code:
self.fail(response.content)
self.assertEqual(response.status_code, status_code, 'HTTP status code')
self.assertEqual(response['Content-Type'], 'application/json-rpc')
content = response.content.decode('utf-8')
expected_data = {
'jsonrpc': '1.0',
'id': 'jsonrpc',
'error': {
'code': error_code,
'name': error_name,
'message': error_message,
'data': error_data,
},
'result': None,
}
self.assertJSONEqual(content, expected_data, 'JSON RPC result')
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CreateExtractSubtitleRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, filename=None, name=None, id_lang=None, background=None, font=None, size=None, color=None, text_shadow=None):
"""
CreateExtractSubtitleRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'filename': 'str',
'name': 'str',
'id_lang': 'int',
'background': 'int',
'font': 'int',
'size': 'int',
'color': 'str',
'text_shadow': 'str'
}
self.attribute_map = {
'filename': 'filename',
'name': 'name',
'id_lang': 'id_lang',
'background': 'background',
'font': 'font',
'size': 'size',
'color': 'color',
'text_shadow': 'text_shadow'
}
self._filename = filename
self._name = name
self._id_lang = id_lang
self._background = background
self._font = font
self._size = size
self._color = color
self._text_shadow = text_shadow
@property
def filename(self):
"""
Gets the filename of this CreateExtractSubtitleRequest.
Filename
:return: The filename of this CreateExtractSubtitleRequest.
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""
Sets the filename of this CreateExtractSubtitleRequest.
Filename
:param filename: The filename of this CreateExtractSubtitleRequest.
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`")
self._filename = filename
@property
def name(self):
"""
Gets the name of this CreateExtractSubtitleRequest.
Name
:return: The name of this CreateExtractSubtitleRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateExtractSubtitleRequest.
Name
:param name: The name of this CreateExtractSubtitleRequest.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def id_lang(self):
"""
Gets the id_lang of this CreateExtractSubtitleRequest.
Language ID
:return: The id_lang of this CreateExtractSubtitleRequest.
:rtype: int
"""
return self._id_lang
@id_lang.setter
def id_lang(self, id_lang):
"""
Sets the id_lang of this CreateExtractSubtitleRequest.
Language ID
:param id_lang: The id_lang of this CreateExtractSubtitleRequest.
:type: int
"""
if id_lang is None:
raise ValueError("Invalid value for `id_lang`, must not be `None`")
self._id_lang = id_lang
@property
def background(self):
"""
Gets the background of this CreateExtractSubtitleRequest.
Background color
:return: The background of this CreateExtractSubtitleRequest.
:rtype: int
"""
return self._background
@background.setter
def background(self, background):
"""
Sets the background of this CreateExtractSubtitleRequest.
Background color
:param background: The background of this CreateExtractSubtitleRequest.
:type: int
"""
self._background = background
@property
def font(self):
"""
Gets the font of this CreateExtractSubtitleRequest.
Font name
:return: The font of this CreateExtractSubtitleRequest.
:rtype: int
"""
return self._font
@font.setter
def font(self, font):
"""
Sets the font of this CreateExtractSubtitleRequest.
Font name
:param font: The font of this CreateExtractSubtitleRequest.
:type: int
"""
self._font = font
@property
def size(self):
"""
Gets the size of this CreateExtractSubtitleRequest.
Font size
:return: The size of this CreateExtractSubtitleRequest.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this CreateExtractSubtitleRequest.
Font size
:param size: The size of this CreateExtractSubtitleRequest.
:type: int
"""
self._size = size
@property
def color(self):
"""
Gets the color of this CreateExtractSubtitleRequest.
Font color
:return: The color of this CreateExtractSubtitleRequest.
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""
Sets the color of this CreateExtractSubtitleRequest.
Font color
:param color: The color of this CreateExtractSubtitleRequest.
:type: str
"""
self._color = color
@property
def text_shadow(self):
"""
Gets the text_shadow of this CreateExtractSubtitleRequest.
Text shadow
:return: The text_shadow of this CreateExtractSubtitleRequest.
:rtype: str
"""
return self._text_shadow
@text_shadow.setter
def text_shadow(self, text_shadow):
"""
Sets the text_shadow of this CreateExtractSubtitleRequest.
Text shadow
:param text_shadow: The text_shadow of this CreateExtractSubtitleRequest.
:type: str
"""
self._text_shadow = text_shadow
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
These agents contain a number of "unit test" corpora, or fake corpora that ensure models
can learn simple behavior easily. They are useful as unit tests for the basic models.
The corpora are all randomly, but deterministically generated
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.teachers import (
FixedDialogTeacher,
DialogTeacher,
AbstractImageTeacher,
Teacher,
ChunkTeacher,
)
from parlai.core.opt import Opt
import copy
import random
import itertools
import os
from PIL import Image
import string
import json
from abc import ABC
from typing import Tuple, List
import time
from parlai.core.message import Message
from parlai.utils.data import DatatypeHelper
from parlai.utils.io import PathManager
# default parameters
VOCAB_SIZE = 7
EXAMPLE_SIZE = 4
NUM_CANDIDATES = 10
NUM_TRAIN = 500
NUM_TEST = 100
INFINITE = 1e20
class CandidateBaseTeacher(Teacher, ABC):
"""
Base Teacher.
Contains some functions that are useful for all the subteachers.
"""
def __init__(
self,
opt: Opt,
shared: dict = None,
vocab_size: int = VOCAB_SIZE,
example_size: int = EXAMPLE_SIZE,
num_candidates: int = NUM_CANDIDATES,
num_train: int = NUM_TRAIN,
num_test: int = NUM_TEST,
):
"""
:param int vocab_size:
size of the vocabulary
:param int example_size:
length of each example
:param int num_candidates:
number of label_candidates generated
:param int num_train:
size of the training set
:param int num_test:
size of the valid/test sets
"""
self.opt = opt
opt['datafile'] = opt['datatype'].split(':')[0]
self.datafile = opt['datafile']
self.vocab_size = vocab_size
self.example_size = example_size
self.num_candidates = num_candidates
self.num_train = num_train
self.num_test = num_test
# set up the vocabulary
self.words = list(map(str, range(self.vocab_size)))
super().__init__(opt, shared)
def build_corpus(self):
"""
Build corpus; override for customization.
"""
return [list(x) for x in itertools.permutations(self.words, self.example_size)]
def num_episodes(self) -> int:
if self.datafile == 'train':
return self.num_train
else:
return self.num_test
def num_examples(self) -> int:
return self.num_episodes()
def _setup_data(self, fold: str):
# N words appearing in a random order
self.rng = random.Random(42)
full_corpus = self.build_corpus()
self.rng.shuffle(full_corpus)
it = iter(full_corpus)
self.train = list(itertools.islice(it, self.num_train))
self.val = list(itertools.islice(it, self.num_test))
self.test = list(itertools.islice(it, self.num_test))
# check we have enough data
assert len(self.train) == self.num_train, len(self.train)
assert len(self.val) == self.num_test, len(self.val)
assert len(self.test) == self.num_test, len(self.test)
# check every word appear in the training set
assert len(set(itertools.chain(*self.train)) - set(self.words)) == 0
# select which set we're using
if fold == "train":
self.corpus = self.train
elif fold == "valid":
self.corpus = self.val
elif fold == "test":
self.corpus = self.test
# make sure the corpus is actually text strings
self.corpus = [' '.join(x) for x in self.corpus]
class FixedDialogCandidateTeacher(CandidateBaseTeacher, FixedDialogTeacher):
"""
Base Candidate Teacher.
Useful if you'd like to test the FixedDialogTeacher
"""
def __init__(self, *args, **kwargs):
"""
Override to build candidates.
"""
super().__init__(*args, **kwargs)
opt = args[0]
if 'shared' not in kwargs:
self._setup_data(opt['datatype'].split(':')[0])
self._build_candidates()
else:
shared = kwargs['shared']
self.corpus = shared['corpus']
self.cands = shared['cands']
self.reset()
def share(self):
shared = super().share()
shared['corpus'] = self.corpus
shared['cands'] = self.cands
return shared
def _build_candidates(self):
self.cands = []
for i in range(len(self.corpus)):
cands = []
for j in range(NUM_CANDIDATES):
offset = (i + j) % len(self.corpus)
cands.append(self.corpus[offset])
self.cands.append(cands)
def get(self, episode_idx: int, entry_idx: int = 0):
return {
'text': self.corpus[episode_idx],
'episode_done': True,
'labels': [self.corpus[episode_idx]],
'label_candidates': self.cands[episode_idx],
}
class CandidateTeacher(CandidateBaseTeacher, DialogTeacher):
"""
Candidate teacher produces several candidates, one of which is a repeat of the
input.
A good ranker should easily identify the correct response.
"""
def setup_data(self, fold):
super()._setup_data(fold)
for i, text in enumerate(self.corpus):
cands = []
for j in range(NUM_CANDIDATES):
offset = (i + j) % len(self.corpus)
cands.append(self.corpus[offset])
yield (text, [text], 0, cands), True
class OverfitTeacher(CandidateTeacher, DialogTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser.add_argument('--corpus-size', default=4, type=int)
return parser
def __init__(self, opt, shared=None):
self.corpussize = opt.get('corpus_size', 4)
super().__init__(opt, shared)
def setup_data(self, fold):
super()._setup_data('train')
for i, text in enumerate(self.corpus[: self.corpussize]):
cands = []
for j in range(NUM_CANDIDATES):
offset = (i + j) % len(self.corpus)
cands.append(self.corpus[offset])
yield (text, [text], 0, cands), True
def num_examples(self):
return self.corpussize
def num_episodes(self):
return self.corpussize
class OverfitMultiturnTeacher(CandidateTeacher, DialogTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser.add_argument('--corpus-size', default=4, type=int)
return parser
def __init__(self, opt, shared=None):
self.corpussize = opt.get('corpus_size', 4)
super().__init__(opt, shared)
def setup_data(self, fold):
super()._setup_data('train')
for text in self.corpus[: self.corpussize]:
words = text.split(' ')
for j in range(1, len(words) + 1):
real_text = ' '.join(words[:j])
yield (real_text, text), True
def num_examples(self):
return self.corpussize * EXAMPLE_SIZE
def num_episodes(self):
return self.corpussize * EXAMPLE_SIZE
class VariableLengthTeacher(CandidateTeacher):
def build_corpus(self):
corpus = super().build_corpus()
for i in range(len(corpus)):
length = len(corpus[i]) - i % 3
corpus[i] = corpus[i][:length]
return corpus
class MultiturnCandidateTeacher(CandidateTeacher):
"""
Splits inputs/targets by spaces into multiple turns.
Good for testing models that use the dialog history.
"""
def setup_data(self, fold):
raw = super().setup_data(fold)
for (t, a, r, cs), _e in raw:
split_t = t.split(' ')
split_a = a[0].split(' ')
split_cs = [c.split(' ') for c in cs]
for i in range(len(split_t)):
yield (
(
split_t[i],
[' '.join(split_a[: i + 1])],
r,
[' '.join(c[: i + 1]) for c in split_cs],
),
i == 0,
)
def num_examples(self):
return self.example_size * self.num_episodes()
class MultiturnTeacher(MultiturnCandidateTeacher):
"""
Simple alias.
"""
pass
class NocandidateTeacher(CandidateTeacher):
"""
Strips the candidates so the model can't see any options.
Good for testing simple generative models.
"""
def setup_data(self, fold):
raw = super().setup_data(fold)
for (t, a, _r, _c), e in raw:
yield (t, a), e
class RepeatWordsTeacher(NocandidateTeacher):
"""
Each input/output pair is a word repeated n times.
Useful for testing beam-blocking.
"""
def __init__(self, *args, **kwargs):
# Set sizes so that we have appropriate number of examples (700)
kwargs['vocab_size'] = 70
kwargs['example_size'] = 11
super().__init__(*args, **kwargs)
def build_corpus(self):
"""
Override to repeat words.
"""
return [
[x for _ in range(l)]
for l in range(1, self.example_size)
for x in self.words
]
class MultiturnNocandidateTeacher(MultiturnCandidateTeacher):
"""
Strips the candidates so the model can't see any options.
Good for testing simple generative models.
"""
def setup_data(self, fold):
raw = super().setup_data(fold)
for (t, a, _r, _c), e in raw:
yield (t, a), e
class ClassifierTeacher(CandidateTeacher):
"""
Classifier Teacher.
Good for testing simple classifier models.
"""
def setup_data(self, fold):
raw = super().setup_data(fold)
for (t, _a, _r, _c), e in raw:
letters = t.split(' ')
# everything starts with 0 or 1
letters[0] = str(int(int(t[0]) % 2))
label = 'one' if letters[0] == '1' else 'zero'
text = ' '.join(letters)
yield (text, [label], 0, ['one', 'zero']), e
class ReverseTeacher(CandidateTeacher):
"""
Reverse Teacher.
Label is opposite of text; good for testing more complex generative models.
"""
def setup_data(self, fold):
raw = super().setup_data(fold)
for (t, a, r, c), e in raw:
label = a[0][::-1]
yield (t, [label], r, c + [label]), e
class ImageTeacher(AbstractImageTeacher):
"""
Teacher which provides images and captions.
In __init__, setup some fake images + features
"""
def __init__(self, opt, shared=None):
self._setup_test_data(opt)
super().__init__(opt, shared)
def _setup_test_data(self, opt):
datapath = os.path.join(opt['datapath'], 'ImageTeacher')
imagepath = os.path.join(datapath, 'images')
PathManager.mkdirs(imagepath)
self.image_features_path = os.path.join(
datapath, f'{opt["image_mode"]}_image_features'
)
# Create fake images and features
imgs = [f'img_{i}' for i in range(10)]
for i, img in enumerate(imgs):
image = Image.new('RGB', (16, 16), color=i)
with PathManager.open(os.path.join(imagepath, f'{img}.jpg'), 'wb') as fp:
image.save(fp, 'JPEG')
# write out fake data
for dt in ['train', 'valid', 'test']:
random.seed(42)
data = [
{'image_id': img, 'text': string.ascii_uppercase[i]}
for i, img in enumerate(imgs)
]
with PathManager.open(os.path.join(datapath, f'{dt}.json'), 'w') as f:
json.dump(data, f)
def get_image_features_path(self, task, image_model_name, dt):
"""
Return path dummy image features.
"""
return self.image_features_path
def image_id_to_image_path(self, image_id):
"""
Return path to image on disk.
"""
return os.path.join(
self.opt['datapath'], 'ImageTeacher/images', f'{image_id}.jpg'
)
class RepeatTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = 'unused_path'
task = opt.get('task', 'integration_tests:RepeatTeacher:50')
try:
self.data_length = int(task.split(':')[-1])
except ValueError:
self.data_length = 10
super().__init__(opt, shared)
def setup_data(self, unused_path):
for i in range(self.data_length):
yield ((str(i), [str(i)]), True)
def num_examples(self):
return self.data_length
def num_episodes(self):
return self.data_length
class ChunkyTeacher(ChunkTeacher):
def _get_data_folder(self):
return None
def get_num_samples(self, opt) -> Tuple[int, int]:
datatype = opt['datatype']
if 'train' in datatype:
return NUM_TRAIN, NUM_TRAIN
elif 'valid' in datatype:
return NUM_TEST, NUM_TEST
elif 'test' in datatype:
return NUM_TEST, NUM_TEST
def get_fold_chunks(self, opt) -> List[int]:
datatype = opt['datatype']
if 'train' in datatype:
return list(range(50))
elif 'valid' in datatype:
return list(range(50, 60))
elif 'test' in datatype:
return list(range(60, 70))
def load_from_chunk(self, chunk_idx: int):
output = []
for i in range(10):
text = " ".join([str(i)] + [str(chunk_idx)] * 5)
resp = " ".join([str(i)])
output.append((text, resp))
return output
def create_message(self, sample_item, entry_idx=0):
text, label = sample_item
return Message({'text': text, 'labels': [label], 'episode_done': True})
class WrongExamplesChunkyTeacher(ChunkyTeacher):
"""
Chunk teacher with an incorrect number of examples.
Useful for testing we don't get a deadlock from a common user error.
"""
def num_examples(self):
return 10
class WrongEpisodesChunkyTeacher(ChunkyTeacher):
"""
Chunk teacher with an incorrect number of episodes.
"""
def num_episodes(self):
return 10
class WrongExamplesEpisodesChunkyTeacher(ChunkyTeacher):
"""
Chunk teacher with an incorrect number of episodes and examples.
"""
def num_examples(self):
return 10
def num_episodes(self):
return 10
class ChunkySmallBufferTeacher(ChunkyTeacher):
def get_buffersize(self):
return NUM_TEST // 2
class InfiniteTrainTeacher(FixedDialogTeacher):
"""
Teacher with an effectively infinite number of training examples.
"""
def num_examples(self):
return INFINITE
def num_episodes(self):
return INFINITE
def get(self, episode_idx=0, entry_idx=0):
field = (
'labels'
if DatatypeHelper.is_training(self.opt['datatype'])
else 'eval_labels'
)
return Message({'text': '1 2 3 4', field: ['1 2 3 4'], 'episode_done': True})
class ChunkySlowTeacher(ChunkyTeacher):
"""
Unique examples that load slowly.
"""
def load_from_chunk(self, chunk_idx: int):
time.sleep(0.1)
return super().load_from_chunk(chunk_idx)
class ShortFixedTeacher(FixedDialogCandidateTeacher):
"""
Fixed Dialog Candidate teacher with only 10 training examples.
"""
def __init__(self, opt: Opt, shared: dict = None):
super().__init__(opt, shared, num_train=10, num_test=10)
class DefaultTeacher(CandidateTeacher):
pass
class TinyTeacher(DialogTeacher):
"""
Teacher with a single example, to test data stratification with fewer examples than
GPUs.
"""
def __init__(self, opt, shared=None):
opt['datafile'] = 'tiny_data'
super().__init__(opt, shared)
def setup_data(self, _):
yield {'text': 'hi', 'label': 'there'}, True
|
|
from bson import DBRef, SON
from base import (
BaseDict, BaseList, EmbeddedDocumentList,
TopLevelDocumentMetaclass, get_document
)
from fields import (ReferenceField, ListField, DictField, MapField)
from connection import get_db
from queryset import QuerySet
from document import Document, EmbeddedDocument
class DeReference(object):
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the conversion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param get: A boolean determining if being called by __get__
"""
if items is None or isinstance(items, basestring):
return items
# cheapest way to convert a queryset to a list
# list(queryset) uses a count() query to determine length
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and isinstance(instance, (Document, EmbeddedDocument,
TopLevelDocumentMetaclass)):
doc_type = instance._fields.get(name)
while hasattr(doc_type, 'field'):
doc_type = doc_type.field
if isinstance(doc_type, ReferenceField):
field = doc_type
doc_type = doc_type.document_type
is_list = not hasattr(items, 'items')
if is_list and all([i.__class__ == doc_type for i in items]):
return items
elif not is_list and all([i.__class__ == doc_type
for i in items.values()]):
return items
elif not field.dbref:
if not hasattr(items, 'items'):
def _get_items(items):
new_items = []
for v in items:
if isinstance(v, list):
new_items.append(_get_items(v))
elif not isinstance(v, (DBRef, Document)):
new_items.append(field.to_python(v))
else:
new_items.append(v)
return new_items
items = _get_items(items)
else:
items = dict([
(k, field.to_python(v))
if not isinstance(v, (DBRef, Document)) else (k, v)
for k, v in items.iteritems()]
)
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
# Determine the iterator to use
if not hasattr(items, 'items'):
iterator = enumerate(items)
else:
iterator = items.iteritems()
# Recursively find dbreferences
depth += 1
for k, item in iterator:
if isinstance(item, (Document, EmbeddedDocument)):
for field_name, field in item._fields.iteritems():
v = item._data.get(field_name, None)
if isinstance(v, (DBRef)):
reference_map.setdefault(field.document_type, set()).add(v.id)
elif isinstance(v, (dict, SON)) and '_ref' in v:
reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
field_cls = getattr(getattr(field, 'field', None), 'document_type', None)
references = self._find_references(v, depth)
for key, refs in references.iteritems():
if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):
key = field_cls
reference_map.setdefault(key, set()).update(refs)
elif isinstance(item, (DBRef)):
reference_map.setdefault(item.collection, set()).add(item.id)
elif isinstance(item, (dict, SON)) and '_ref' in item:
reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)
elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in references.iteritems():
reference_map.setdefault(key, set()).update(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for collection, dbrefs in self.reference_map.iteritems():
refs = [dbref for dbref in dbrefs
if unicode(dbref).encode('utf-8') not in object_map]
if hasattr(collection, 'objects'): # We have a document class for the refs
references = collection.objects.in_bulk(refs)
for key, doc in references.iteritems():
object_map[key] = doc
else: # Generic reference: use the refs data to convert to document
if isinstance(doc_type, (ListField, DictField, MapField,)):
continue
if doc_type:
references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
else:
references = get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
if '_cls' in ref:
doc = get_document(ref["_cls"])._from_son(ref)
elif doc_type is None:
doc = get_document(
''.join(x.capitalize()
for x in collection.split('_')))._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if '_ref' in items:
return self.object_map.get(items['_ref'].id, items)
elif '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
_cls = doc._data.pop('_cls', None)
del items['_cls']
doc._data = self._attach_objects(doc._data, depth, doc, None)
if _cls is not None:
doc._data['_cls'] = _cls
return doc
if not hasattr(items, 'items'):
is_list = True
list_type = BaseList
if isinstance(items, EmbeddedDocumentList):
list_type = EmbeddedDocumentList
as_tuple = isinstance(items, tuple)
iterator = enumerate(items)
data = []
else:
is_list = False
iterator = items.iteritems()
data = {}
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map and not is_list:
data[k] = self.object_map[k]
elif isinstance(v, (Document, EmbeddedDocument)):
for field_name, field in v._fields.iteritems():
v = data[k]._data.get(field_name, None)
if isinstance(v, (DBRef)):
data[k]._data[field_name] = self.object_map.get(v.id, v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = "{0}.{1}.{2}".format(name, k, field_name)
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = '%s.%s' % (name, k) if name else name
data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)
elif hasattr(v, 'id'):
data[k] = self.object_map.get(v.id, v)
if instance and name:
if is_list:
return tuple(data) if as_tuple else list_type(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from py_vulcanize import fake_fs
from py_vulcanize import generate
from py_vulcanize import html_generation_controller
from py_vulcanize import html_module
from py_vulcanize import parse_html_deps
from py_vulcanize import project as project_module
from py_vulcanize import resource
from py_vulcanize import resource_loader as resource_loader
import six
class ResourceWithFakeContents(resource.Resource):
def __init__(self, toplevel_dir, absolute_path, fake_contents):
"""A resource with explicitly provided contents.
If the resource does not exist, then pass fake_contents=None. This will
cause accessing the resource contents to raise an exception mimicking the
behavior of regular resources."""
super(ResourceWithFakeContents, self).__init__(toplevel_dir, absolute_path)
self._fake_contents = fake_contents
@property
def contents(self):
if self._fake_contents is None:
raise Exception('File not found')
return self._fake_contents
class FakeLoader(object):
def __init__(self, source_paths, initial_filenames_and_contents=None):
self._source_paths = source_paths
self._file_contents = {}
if initial_filenames_and_contents:
for k, v in six.iteritems(initial_filenames_and_contents):
self._file_contents[k] = v
def FindResourceGivenAbsolutePath(self, absolute_path):
candidate_paths = []
for source_path in self._source_paths:
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return ResourceWithFakeContents(
longest_candidate, absolute_path,
self._file_contents.get(absolute_path, None))
def FindResourceGivenRelativePath(self, relative_path):
absolute_path = None
for script_path in self._source_paths:
absolute_path = os.path.join(script_path, relative_path)
if absolute_path in self._file_contents:
return ResourceWithFakeContents(script_path, absolute_path,
self._file_contents[absolute_path])
return None
class ParseTests(unittest.TestCase):
def testValidExternalScriptReferenceToRawScript(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<script src="../foo.js">
""")
file_contents = {}
file_contents[os.path.normpath('/tmp/a/foo.js')] = """
'i am just some raw script';
"""
metadata = html_module.Parse(
FakeLoader([os.path.normpath('/tmp')], file_contents),
'a.b.start',
'/tmp/a/b/',
is_component=False,
parser_results=parse_results)
self.assertEquals([], metadata.dependent_module_names)
self.assertEquals(
['a/foo.js'], metadata.dependent_raw_script_relative_paths)
def testExternalScriptReferenceToModuleOutsideScriptPath(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<script src="/foo.js">
""")
file_contents = {}
file_contents[os.path.normpath('/foo.js')] = ''
def DoIt():
html_module.Parse(FakeLoader([os.path.normpath('/tmp')], file_contents),
'a.b.start',
'/tmp/a/b/',
is_component=False,
parser_results=parse_results)
self.assertRaises(Exception, DoIt)
def testExternalScriptReferenceToFileThatDoesntExist(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<script src="/foo.js">
""")
file_contents = {}
def DoIt():
html_module.Parse(FakeLoader([os.path.normpath('/tmp')], file_contents),
'a.b.start',
'/tmp/a/b/',
is_component=False,
parser_results=parse_results)
self.assertRaises(Exception, DoIt)
def testValidImportOfModule(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<link rel="import" href="../foo.html">
""")
file_contents = {}
file_contents[os.path.normpath('/tmp/a/foo.html')] = """
"""
metadata = html_module.Parse(
FakeLoader([os.path.normpath('/tmp')], file_contents),
'a.b.start',
'/tmp/a/b/',
is_component=False,
parser_results=parse_results)
self.assertEquals(['a.foo'], metadata.dependent_module_names)
def testStyleSheetImport(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<link rel="stylesheet" href="../foo.css">
""")
file_contents = {}
file_contents[os.path.normpath('/tmp/a/foo.css')] = """
"""
metadata = html_module.Parse(
FakeLoader([os.path.normpath('/tmp')], file_contents),
'a.b.start',
'/tmp/a/b/',
is_component=False,
parser_results=parse_results)
self.assertEquals([], metadata.dependent_module_names)
self.assertEquals(['a.foo'], metadata.style_sheet_names)
def testUsingAbsoluteHref(self):
parse_results = parse_html_deps.HTMLModuleParserResults("""<!DOCTYPE html>
<script src="/foo.js">
""")
file_contents = {}
file_contents[os.path.normpath('/src/foo.js')] = ''
metadata = html_module.Parse(
FakeLoader([os.path.normpath("/tmp"), os.path.normpath("/src")],
file_contents),
"a.b.start",
"/tmp/a/b/",
is_component=False,
parser_results=parse_results)
self.assertEquals(['foo.js'], metadata.dependent_raw_script_relative_paths)
class HTMLModuleTests(unittest.TestCase):
def testBasicModuleGeneration(self):
file_contents = {}
file_contents[os.path.normpath('/tmp/a/b/start.html')] = """
<!DOCTYPE html>
<link rel="import" href="/widget.html">
<link rel="stylesheet" href="../common.css">
<script src="/raw_script.js"></script>
<script src="/excluded_script.js"></script>
<dom-module id="start">
<template>
</template>
<script>
'use strict';
console.log('inline script for start.html got written');
</script>
</dom-module>
"""
file_contents[os.path.normpath('/py_vulcanize/py_vulcanize.html')] = """<!DOCTYPE html>
"""
file_contents[os.path.normpath('/components/widget.html')] = """
<!DOCTYPE html>
<link rel="import" href="/py_vulcanize.html">
<widget name="widget.html"></widget>
<script>
'use strict';
console.log('inline script for widget.html');
</script>
"""
file_contents[os.path.normpath('/tmp/a/common.css')] = """
/* /tmp/a/common.css was written */
"""
file_contents[os.path.normpath('/raw/raw_script.js')] = """
console.log('/raw/raw_script.js was written');
"""
file_contents[os.path.normpath(
'/raw/components/polymer/polymer.min.js')] = """
"""
with fake_fs.FakeFS(file_contents):
project = project_module.Project(
[os.path.normpath('/py_vulcanize/'),
os.path.normpath('/tmp/'),
os.path.normpath('/components/'),
os.path.normpath('/raw/')])
loader = resource_loader.ResourceLoader(project)
a_b_start_module = loader.LoadModule(
module_name='a.b.start', excluded_scripts=['\/excluded_script.js'])
load_sequence = project.CalcLoadSequenceForModules([a_b_start_module])
# Check load sequence names.
load_sequence_names = [x.name for x in load_sequence]
self.assertEquals(['py_vulcanize',
'widget',
'a.b.start'], load_sequence_names)
# Check module_deps on a_b_start_module
def HasDependentModule(module, name):
return [x for x in module.dependent_modules
if x.name == name]
assert HasDependentModule(a_b_start_module, 'widget')
# Check JS generation.
js = generate.GenerateJS(load_sequence)
assert 'inline script for start.html' in js
assert 'inline script for widget.html' in js
assert '/raw/raw_script.js' in js
assert 'excluded_script.js' not in js
# Check HTML generation.
html = generate.GenerateStandaloneHTMLAsString(
load_sequence, title='', flattened_js_url='/blah.js')
assert '<dom-module id="start">' in html
assert 'inline script for widget.html' not in html
assert 'common.css' in html
def testPolymerConversion(self):
file_contents = {}
file_contents[os.path.normpath('/tmp/a/b/my_component.html')] = """
<!DOCTYPE html>
<dom-module id="my-component">
<template>
</template>
<script>
'use strict';
Polymer ( {
is: "my-component"
});
</script>
</dom-module>
"""
with fake_fs.FakeFS(file_contents):
project = project_module.Project([
os.path.normpath('/py_vulcanize/'), os.path.normpath('/tmp/')])
loader = resource_loader.ResourceLoader(project)
my_component = loader.LoadModule(module_name='a.b.my_component')
f = six.StringIO()
my_component.AppendJSContentsToFile(
f,
use_include_tags_for_scripts=False,
dir_for_include_tag_root=None)
js = f.getvalue().rstrip()
expected_js = """
'use strict';
Polymer ( {
is: "my-component"
});
""".rstrip()
self.assertEquals(expected_js, js)
def testInlineStylesheetURLs(self):
file_contents = {}
file_contents[os.path.normpath('/tmp/a/b/my_component.html')] = """
<!DOCTYPE html>
<style>
.some-rule {
background-image: url('../something.jpg');
}
</style>
"""
file_contents[os.path.normpath('/tmp/a/something.jpg')] = 'jpgdata'
with fake_fs.FakeFS(file_contents):
project = project_module.Project([
os.path.normpath('/py_vulcanize/'), os.path.normpath('/tmp/')])
loader = resource_loader.ResourceLoader(project)
my_component = loader.LoadModule(module_name='a.b.my_component')
computed_deps = []
my_component.AppendDirectlyDependentFilenamesTo(computed_deps)
self.assertEquals(set(computed_deps),
set([os.path.normpath('/tmp/a/b/my_component.html'),
os.path.normpath('/tmp/a/something.jpg')]))
f = six.StringIO()
ctl = html_generation_controller.HTMLGenerationController()
my_component.AppendHTMLContentsToFile(f, ctl)
html = f.getvalue().rstrip()
# FIXME: This is apparently not used.
expected_html = """
.some-rule {
background-image: url(data:image/jpg;base64,anBnZGF0YQ==);
}
""".rstrip()
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from ez_setup import use_setuptools
use_setuptools()
import os
import sys
import imp
import subprocess
from glob import glob
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'temposeqcount'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from temposeqcount import metadata
#
# However, when we do this, we also import `temposeqcount/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Biologist',
'License :: OSI Approved :: GPL License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'temposeqcount_cli = temposeqcount.main:main'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'temposeqcount_gui = temposeqcount.gui:entry_point'
# ]
},
# These all get copied to our installation's bin folder for us
scripts = [
'temposeqcount/download/seqtk/seqtk',
'temposeqcount/download/samtools/samtools',
'temposeqcount/download/help2man-1.43.3/help2man',
'temposeqcount/download/graphviz-2.41.20170103.1755/bin/dot',
] + glob('temposeqcount/lib/fastx_toolkit/bin/fast*') +
glob('temposeqcount/download/libtool-2.4/bin/lib*') +
glob('temposeqcount/download/texinfo-6.1/bin/*' ) +
glob('temposeqcount/download/graphviz-2.41.20170103.1755/bin/*'),
package_data = {
'temposeqcount': ['files/*'],
}
)
def runTasks():
"""run paver tasks"""
cmd = "paver prepare"
return subprocess.Popen(cmd, shell=True).communicate()
def main():
#setup(**setup_dict)
import os
try:
import paver.tasks
except ImportError:
if os.path.exists("paver-minilib.zip"):
import sys
sys.path.insert(0, "paver-minilib.zip")
else:
raise ValueError("No paver in the path")
import paver.tasks
paver.tasks.main()
runTasks()
if __name__ == '__main__':
main()
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pyparsing import ( # noqa: N817
Forward,
Group,
Literal as L,
ParseException,
ParseResults,
QuotedString,
ZeroOrMore,
stringEnd,
stringStart,
)
from .specifiers import InvalidSpecifier, Specifier
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node:
def __init__(self, value: Any) -> None:
self.value = value
def __str__(self) -> str:
return str(self.value)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
class Undefined:
pass
_undefined = Undefined()
def _get_env(environment: Dict[str, str], name: str) -> str:
value: Union[str, Undefined] = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
f"{name!r} does not exist in evaluation environment."
)
return value
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
raise InvalidMarker(
f"Invalid marker: {marker!r}, parse error at "
f"{marker[e.loc : e.loc + 8]!r}"
)
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.lite.experimental.examples.lstm import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = tf.compat.v1.flags.FLAGS
# Number of steps to train model.
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(
data_dir, fake_data=True, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.compat.v1.lite.experimental.nn.TfLiteRNNCell(
self.num_units, name="rnn1"),
tf.compat.v1.lite.experimental.nn.TfLiteRNNCell(
self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random.normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.compat.v1.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.compat.v1.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.compat.v1.Session()
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self,
sess,
test_inputs,
input_tensor,
output_tensor,
use_mlir_converter=False):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
use_mlir_converter: Whether or not to use MLIRConverter to convert the
model.
Returns:
The tflite inference result.
"""
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, [input_tensor], [output_tensor])
tflite = converter.convert()
converter.experimental_new_converter = use_mlir_converter
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session()
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.compat.v1.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session()
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.compat.v1.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
tf.disable_v2_behavior()
test.main()
|
|
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
from django import VERSION as DJANGO_VERSION
import socket
import os
import warnings
from urlparse import urlparse
# Django settings for XOS.
from config import Config
from config import set_override
config = Config()
# Override the config from the environment. This is used leverage the LINK
# capability of docker. It would be far better to use DNS and that can be
# done in environments like kubernetes. Look for environment variables that
# match the link pattern and set the appropriate overeides. It is expected
# that the set of overrides will be expanded with need
def overrideDbSettings(v):
parsed = urlparse(v)
config.db_host = parsed.hostname
config.db_port = parsed.port
env_to_config_dict = {
"XOS_DB_PORT": overrideDbSettings
}
for key, ofunc in env_to_config_dict.items():
if key in os.environ:
ofunc(os.environ[key])
GEOIP_PATH = "/usr/share/GeoIP"
XOS_DIR = "/opt/xos"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# LOGIN_REDIRECT_URL = '/admin/core/user'
LOGIN_REDIRECT_URL = '/admin/loggedin/'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': config.db_name, # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': config.db_user,
'PASSWORD': config.db_password,
# 'HOST': config.db_host, # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'HOST': 'xos_db', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': config.db_port, # Set to empty string for default.
}
}
AUTH_USER_MODEL = 'core.User'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Verbose warnings when a naive datetime is used, gives a traceback
# from: https://docs.djangoproject.com/en/1.9/topics/i18n/timezones/#code
warnings.filterwarnings(
'error', r"DateTimeField .* received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/var/www/html/files/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/files/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = ( XOS_DIR + "/core/static/",
XOS_DIR + "/core/xoslib/static/",
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'i0=a)c7_#2)5m%k_fu#%53xap$tlqc+#&z5as+bl7&)(@be_f9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'core.middleware.GlobalRequestMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xos.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'xos.wsgi.application'
# Default: 'csrftoken'
CSRF_COOKIE_NAME = 'xoscsrftoken'
# Default: 'django_language'
LANGUAGE_COOKIE_NAME = 'xos_django_language'
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'xossessionid'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
XOS_DIR + "/templates",
XOS_DIR + "/core/xoslib/templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'suit',
'xos.apps.MyDjangoSuitConfig',
'xos.admin_customize',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'rest_framework',
'django_extensions',
'core',
'services.hpc',
'services.mcord',
'services.requestrouter',
'services.syndicate_storage',
'geoposition',
# 'rest_framework_swagger',
)
# add services that were configured by xosbuilder to INSTALLED_APPS
if os.path.exists("/opt/xos/xos/xosbuilder_app_list"):
for line in file("/opt/xos/xos/xosbuilder_app_list").readlines():
line = line.strip()
if line:
INSTALLED_APPS = list(INSTALLED_APPS) + [line]
if DJANGO_VERSION[1] >= 7:
# if django >= 1.7, then change the admin module
INSTALLED_APPS = list(INSTALLED_APPS)
INSTALLED_APPS[INSTALLED_APPS.index('django.contrib.admin')] = 'django.contrib.admin.apps.SimpleAdminConfig'
# Added for django-suit form
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'core.context_processors.xos',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/django_debug.log',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},'django.db.backends': {
'level': 'WARNING',
},
}
}
RESTAPI_HOSTNAME = getattr(config, "server_restapi_hostname", getattr(config, "server_hostname", socket.gethostname()))
RESTAPI_PORT = int(getattr(config, "server_restapi_port", getattr(config, "server_port", "8000")))
BIGQUERY_TABLE = getattr(config, "bigquery_table", "demoevents")
XOS_BRANDING_NAME = getattr(config, "gui_branding_name", "OpenCloud")
XOS_BRANDING_CSS = getattr(config, "gui_branding_css", None)
XOS_BRANDING_ICON = getattr(config, "gui_branding_icon", "/static/logo.png")
XOS_BRANDING_FAVICON = getattr(config, "gui_branding_favicon", "/static/favicon.png")
XOS_BRANDING_BG = getattr(config, "gui_branding_bg", "/static/bg.png")
DISABLE_MINIDASHBOARD = getattr(config, "gui_disable_minidashboard", False)
ENCRYPTED_FIELDS_KEYDIR = XOS_DIR + '/private_keys'
ENCRYPTED_FIELD_MODE = 'ENCRYPT'
STATISTICS_DRIVER = getattr(config, "statistics_driver", "ceilometer")
# prevents warnings on django 1.7
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# API key for Google Maps, created by zdw on 2016-06-29. Testing only, not for production
GEOPOSITION_GOOGLE_MAPS_API_KEY = 'AIzaSyBWAHP9mvLqWLRkVqK8o5wMskaIe9w7DaM'
|
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module implementing low-level socket communication with MySQL servers.
"""
from collections import deque
import socket
import struct
import sys
import zlib
try:
import ssl
except:
# If import fails, we don't have SSL support.
pass
from . import constants, errors
from .catch23 import PY2, init_bytearray, struct_unpack
def _strioerror(err):
"""Reformat the IOError error message
This function reformats the IOError error message.
"""
if not err.errno:
return str(err)
return '{errno} {strerr}'.format(errno=err.errno, strerr=err.strerror)
def _prepare_packets(buf, pktnr):
"""Prepare a packet for sending to the MySQL server"""
pkts = []
pllen = len(buf)
maxpktlen = constants.MAX_PACKET_LENGTH
while pllen > maxpktlen:
pkts.append(b'\xff\xff\xff' + struct.pack('<B', pktnr)
+ buf[:maxpktlen])
buf = buf[maxpktlen:]
pllen = len(buf)
pktnr = pktnr + 1
pkts.append(struct.pack('<I', pllen)[0:3]
+ struct.pack('<B', pktnr) + buf)
return pkts
class BaseMySQLSocket(object):
"""Base class for MySQL socket communication
This class should not be used directly but overloaded, changing the
at least the open_connection()-method. Examples of subclasses are
mysql.connector.network.MySQLTCPSocket
mysql.connector.network.MySQLUnixSocket
"""
def __init__(self):
self.sock = None # holds the socket connection
self._connection_timeout = None
self._packet_number = -1
self._packet_queue = deque()
self.recvsize = 8192
@property
def next_packet_number(self):
"""Increments the packet number"""
self._packet_number = self._packet_number + 1
if self._packet_number > 255:
self._packet_number = 0
return self._packet_number
def open_connection(self):
"""Open the socket"""
raise NotImplementedError
def get_address(self):
"""Get the location of the socket"""
raise NotImplementedError
def shutdown(self):
"""Shut down the socket before closing it"""
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
del self._packet_queue
except (socket.error, AttributeError):
pass
def close_connection(self):
"""Close the socket"""
try:
self.sock.close()
del self._packet_queue
except (socket.error, AttributeError):
pass
def send_plain(self, buf, packet_number=None):
"""Send packets to the MySQL server"""
if packet_number is None:
self.next_packet_number # pylint: disable=W0104
else:
self._packet_number = packet_number
packets = _prepare_packets(buf, self._packet_number)
for packet in packets:
try:
if PY2:
self.sock.sendall(buffer(packet)) # pylint: disable=E0602
else:
self.sock.sendall(packet)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except AttributeError:
raise errors.OperationalError(errno=2006)
send = send_plain
def send_compressed(self, buf, packet_number=None):
"""Send compressed packets to the MySQL server"""
if packet_number is None:
self.next_packet_number # pylint: disable=W0104
else:
self._packet_number = packet_number
pktnr = self._packet_number
pllen = len(buf)
zpkts = []
maxpktlen = constants.MAX_PACKET_LENGTH
if pllen > maxpktlen:
pkts = _prepare_packets(buf, pktnr)
if PY2:
tmpbuf = bytearray()
for pkt in pkts:
tmpbuf += pkt
tmpbuf = buffer(tmpbuf) # pylint: disable=E0602
else:
tmpbuf = b''.join(pkts)
del pkts
seqid = 0
zbuf = zlib.compress(tmpbuf[:16384])
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ b'\x00\x40\x00')
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
tmpbuf = tmpbuf[16384:]
pllen = len(tmpbuf)
seqid = seqid + 1
while pllen > maxpktlen:
zbuf = zlib.compress(tmpbuf[:maxpktlen])
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ b'\xff\xff\xff')
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
tmpbuf = tmpbuf[maxpktlen:]
pllen = len(tmpbuf)
seqid = seqid + 1
if tmpbuf:
zbuf = zlib.compress(tmpbuf)
header = (struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', seqid)
+ struct.pack('<I', pllen)[0:3])
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + zbuf)
del tmpbuf
else:
pkt = (struct.pack('<I', pllen)[0:3] +
struct.pack('<B', pktnr) + buf)
if PY2:
pkt = buffer(pkt) # pylint: disable=E0602
pllen = len(pkt)
if pllen > 50:
zbuf = zlib.compress(pkt)
zpkts.append(struct.pack('<I', len(zbuf))[0:3]
+ struct.pack('<B', 0)
+ struct.pack('<I', pllen)[0:3]
+ zbuf)
else:
header = (struct.pack('<I', pllen)[0:3]
+ struct.pack('<B', 0)
+ struct.pack('<I', 0)[0:3])
if PY2:
header = buffer(header) # pylint: disable=E0602
zpkts.append(header + pkt)
for zip_packet in zpkts:
try:
self.sock.sendall(zip_packet)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except AttributeError:
raise errors.OperationalError(errno=2006)
def recv_plain(self):
"""Receive packets from the MySQL server"""
try:
# Read the header of the MySQL packet, 4 bytes
packet = bytearray(4)
read = self.sock.recv_into(packet, 4)
if read != 4:
raise errors.InterfaceError(errno=2013)
# Save the packet number and payload length
self._packet_number = packet[3]
if PY2:
payload_len = struct.unpack_from(
"<I",
buffer(packet[0:3] + b'\x00'))[0] # pylint: disable=E0602
else:
payload_len = struct.unpack("<I", packet[0:3] + b'\x00')[0]
# Read the payload
rest = payload_len
packet.extend(bytearray(payload_len))
packet_view = memoryview(packet) # pylint: disable=E0602
packet_view = packet_view[4:]
while rest:
read = self.sock.recv_into(packet_view, rest)
if read == 0 and rest > 0:
raise errors.InterfaceError(errno=2013)
packet_view = packet_view[read:]
rest -= read
return packet
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
def recv_py26_plain(self):
"""Receive packets from the MySQL server"""
try:
# Read the header of the MySQL packet, 4 bytes
header = bytearray(b'')
while len(header) < 4:
chunk = self.sock.recv(4)
if not chunk:
raise errors.InterfaceError(errno=2013)
header += chunk
# Save the packet number and payload length
self._packet_number = header[3]
payload_len = struct_unpack("<I", header[0:3] + b'\x00')[0]
# Read the payload
rest = payload_len
payload = init_bytearray(b'')
while rest > 0:
chunk = self.sock.recv(rest)
if not chunk:
raise errors.InterfaceError(errno=2013)
payload += chunk
rest = payload_len - len(payload)
return header + payload
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
if sys.version_info[0:2] == (2, 6):
recv = recv_py26_plain
recv_plain = recv_py26_plain
else:
recv = recv_plain
def _split_zipped_payload(self, packet_bunch):
"""Split compressed payload"""
while packet_bunch:
payload_length = struct_unpack("<I",
packet_bunch[0:3] + b'\x00')[0]
self._packet_queue.append(packet_bunch[0:payload_length + 4])
packet_bunch = packet_bunch[payload_length + 4:]
def recv_compressed(self):
"""Receive compressed packets from the MySQL server"""
try:
return self._packet_queue.popleft()
except IndexError:
pass
header = bytearray(b'')
packets = []
try:
abyte = self.sock.recv(1)
while abyte and len(header) < 7:
header += abyte
abyte = self.sock.recv(1)
while header:
if len(header) < 7:
raise errors.InterfaceError(errno=2013)
zip_payload_length = struct_unpack("<I",
header[0:3] + b'\x00')[0]
payload_length = struct_unpack("<I", header[4:7] + b'\x00')[0]
zip_payload = init_bytearray(abyte)
while len(zip_payload) < zip_payload_length:
chunk = self.sock.recv(zip_payload_length
- len(zip_payload))
if len(chunk) == 0:
raise errors.InterfaceError(errno=2013)
zip_payload = zip_payload + chunk
if payload_length == 0:
self._split_zipped_payload(zip_payload)
return self._packet_queue.popleft()
packets.append(header + zip_payload)
if payload_length != 16384:
break
header = init_bytearray(b'')
abyte = self.sock.recv(1)
while abyte and len(header) < 7:
header += abyte
abyte = self.sock.recv(1)
except IOError as err:
raise errors.OperationalError(
errno=2055, values=(self.get_address(), _strioerror(err)))
tmp = init_bytearray(b'')
for packet in packets:
payload_length = struct_unpack("<I", header[4:7] + b'\x00')[0]
if payload_length == 0:
tmp.append(packet[7:])
else:
if PY2:
tmp += zlib.decompress(
buffer(packet[7:])) # pylint: disable=E0602
else:
tmp += zlib.decompress(packet[7:])
self._split_zipped_payload(tmp)
del tmp
try:
return self._packet_queue.popleft()
except IndexError:
pass
def set_connection_timeout(self, timeout):
"""Set the connection timeout"""
self._connection_timeout = timeout
# pylint: disable=C0103
def switch_to_ssl(self, ca, cert, key, verify_cert=False):
"""Switch the socket to use SSL"""
if not self.sock:
raise errors.InterfaceError(errno=2048)
try:
if verify_cert:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(
self.sock, keyfile=key, certfile=cert, ca_certs=ca,
cert_reqs=cert_reqs, do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_TLSv1)
self.sock.do_handshake()
except NameError:
raise errors.NotSupportedError(
"Python installation has no SSL support")
except (ssl.SSLError, IOError) as err:
raise errors.InterfaceError(
errno=2055, values=(self.get_address(), _strioerror(err)))
except NotImplementedError as err:
raise errors.InterfaceError(str(err))
# pylint: enable=C0103
class MySQLUnixSocket(BaseMySQLSocket):
"""MySQL socket class using UNIX sockets
Opens a connection through the UNIX socket of the MySQL Server.
"""
def __init__(self, unix_socket='/tmp/mysql.sock'):
super(MySQLUnixSocket, self).__init__()
self.unix_socket = unix_socket
def get_address(self):
return self.unix_socket
def open_connection(self):
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.settimeout(self._connection_timeout)
self.sock.connect(self.unix_socket)
except IOError as err:
raise errors.InterfaceError(
errno=2002, values=(self.get_address(), _strioerror(err)))
except Exception as err:
raise errors.InterfaceError(str(err))
class MySQLTCPSocket(BaseMySQLSocket):
"""MySQL socket class using TCP/IP
Opens a TCP/IP connection to the MySQL Server.
"""
def __init__(self, host='127.0.0.1', port=3306, force_ipv6=False):
super(MySQLTCPSocket, self).__init__()
self.server_host = host
self.server_port = port
self.force_ipv6 = force_ipv6
self._family = 0
def get_address(self):
return "{0}:{1}".format(self.server_host, self.server_port)
def open_connection(self):
"""Open the TCP/IP connection to the MySQL server
"""
# Get address information
addrinfo = None
try:
addrinfos = socket.getaddrinfo(self.server_host,
self.server_port,
0, socket.SOCK_STREAM,
socket.SOL_TCP)
# If multiple results we favor IPv4, unless IPv6 was forced.
for info in addrinfos:
if self.force_ipv6 and info[0] == socket.AF_INET6:
addrinfo = info
break
elif info[0] == socket.AF_INET:
addrinfo = info
break
if self.force_ipv6 and not addrinfo:
raise errors.InterfaceError(
"No IPv6 address found for {0}".format(self.server_host))
if not addrinfo:
addrinfo = addrinfos[0]
except IOError as err:
raise errors.InterfaceError(
errno=2003, values=(self.get_address(), _strioerror(err)))
else:
(self._family, socktype, proto, _, sockaddr) = addrinfo
# Instanciate the socket and connect
try:
self.sock = socket.socket(self._family, socktype, proto)
self.sock.settimeout(self._connection_timeout)
self.sock.connect(sockaddr)
except IOError as err:
raise errors.InterfaceError(
errno=2003, values=(self.get_address(), _strioerror(err)))
except Exception as err:
raise errors.OperationalError(str(err))
|
|
import os
import shutil
from gppylib.db import dbconn
from test.behave_utils.utils import check_schema_exists, check_table_exists, drop_table_if_exists
from gppylib.operations.backup_utils import get_lines_from_file
CREATE_MULTI_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (trans_id int, date date, amount decimal(9,2), region text)
WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (trans_id)
PARTITION BY RANGE (date)
SUBPARTITION BY LIST (region)
SUBPARTITION TEMPLATE
( SUBPARTITION usa VALUES ('usa'),
SUBPARTITION asia VALUES ('asia'),
SUBPARTITION europe VALUES ('europe'),
DEFAULT SUBPARTITION other_regions)
(START (date '2011-01-01') INCLUSIVE
END (date '2012-01-01') EXCLUSIVE
EVERY (INTERVAL '5 month'),
DEFAULT PARTITION outlying_dates)
"""
CREATE_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (id int, date date) WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (id)
PARTITION BY RANGE (date)
( START (date '2008-01-01') INCLUSIVE
END (date '2008-01-04') EXCLUSIVE
EVERY (INTERVAL '1 day'),
DEFAULT PARTITION default_dates);
"""
@given('there is a regular "{storage_type}" table "{tablename}" with column name list "{col_name_list}" and column type list "{col_type_list}" in schema "{schemaname}"')
def impl(context, storage_type, tablename, col_name_list, col_type_list, schemaname):
schemaname_no_quote = schemaname
if '"' in schemaname:
schemaname_no_quote = schemaname[1:-1]
if not check_schema_exists(context, schemaname_no_quote, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname_no_quote, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
create_table_with_column_list(context.conn, storage_type, schemaname, tablename, col_name_list, col_type_list)
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type = storage_type)
@given('there is a hard coded ao partition table "{tablename}" with 4 child partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type = 'ao')
@given('there is a hard coded multi-level ao partition table "{tablename}" with 4 mid-level and 16 leaf-level partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_MULTI_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type = 'ao')
@given('no state files exist for database "{dbname}"')
def impl(context, dbname):
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
analyze_dir = os.path.join(master_data_dir, 'db_analyze', dbname)
if os.path.exists(analyze_dir):
shutil.rmtree(analyze_dir)
@given('a view "{view_name}" exists on table "{table_name}" in schema "{schema_name}"')
def impl(context, view_name, table_name, schema_name):
create_view_on_table(context.conn, schema_name, table_name, view_name)
@given('"{qualified_table}" appears in the latest state files')
@then('"{qualified_table}" should appear in the latest state files')
def impl(context, qualified_table):
found,filename = table_found_in_state_file(context.dbname, qualified_table)
if not found:
if filename == '':
assert False, "no state files found for database %s" % context.dbname
else:
assert False, "table %s not found in state file %s" % (qualified_table, os.path.basename(filename))
@given('columns "{col_name_list}" of table "{qualified_table}" appear in the latest column state file')
@then('columns "{col_name_list}" of table "{qualified_table}" should appear in the latest column state file')
def impl(context, col_name_list, qualified_table):
found,column,filename = column_found_in_state_file(context.dbname, qualified_table, col_name_list)
if not found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "column(s) %s of table %s not found in state file %s" % (column, qualified_table, os.path.basename(filename))
@given('column "{col_name}" of table "{qualified_table}" does not appear in the latest column state file')
@then('column "{col_name}" of table "{qualified_table}" should not appear in the latest column state file')
def impl(context, col_name, qualified_table):
found,column,filename = column_found_in_state_file(context.dbname, qualified_table, col_name)
if found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "unexpected column %s of table %s found in state file %s" % (column, qualified_table, os.path.basename(filename))
@then('output should contain either "{output1}" or "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) and not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' or '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@then('output should not contain "{output1}"')
def impl(context, output1):
pat1 = re.compile(output1)
if pat1.search(context.stdout_message):
err_str = "Unexpected stdout string '%s', found:\n'%s'" % (output1, context.stdout_message)
raise Exception(err_str)
@then('output should contain both "{output1}" and "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) or not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' and '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@given('table "{qualified_table}" does not appear in the latest state files')
def impl(context, qualified_table):
found,filename = table_found_in_state_file(context.dbname, qualified_table)
if found:
delete_table_from_state_files(context.dbname, qualified_table)
@given('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
@when('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
def impl(context, tablename, schemaname, column_type_list):
insert_data_into_table(context.conn, schemaname, tablename, column_type_list)
@given('some ddl is performed on table "{tablename}" in schema "{schemaname}"')
def impl(context, tablename, schemaname):
perform_ddl_on_table(context.conn, schemaname, tablename)
def table_found_in_state_file(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False,""
state_file = ""
for state_file in files:
found = False
for line in get_lines_from_file(state_file):
if comma_name in line:
found = True
continue
if not found:
return False,state_file
return True,state_file
def column_found_in_state_file(dbname, qualified_table, col_name_list):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False,"",""
for state_file in files:
if not "col_state_file" in state_file:
continue
for line in get_lines_from_file(state_file):
if comma_name in line:
for column in col_name_list.split(','):
if not column in line.split(',')[2:]:
return False,column,state_file
return True,"",state_file
return False,col_name_list,state_file
def delete_table_from_state_files(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
for filename in files:
lines = get_lines_from_file(filename)
f = open(filename,"w")
for line in lines:
if not comma_name in line:
f.write(line)
f.close()
def get_latest_analyze_state_files(dbname):
"""
return the latest state files (absolute paths)
"""
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
analyze_dir = os.path.join(master_data_dir, 'db_analyze', dbname)
if not os.path.exists(analyze_dir):
return []
folders = sorted(os.listdir(analyze_dir), reverse=True)
if len(folders) == 0:
return []
state_files_dir = os.path.join(analyze_dir, folders[0])
files = os.listdir(state_files_dir)
if len(files) != 4:
raise Exception("Missing or unexpected state files in folder %s" % state_files_dir)
ret = []
for f in files:
if 'report' not in f:
ret.append(os.path.join(state_files_dir, f))
return ret
def create_table_with_column_list(conn, storage_type, schemaname, tablename, col_name_list, col_type_list):
col_name_list = col_name_list.strip().split(',')
col_type_list = col_type_list.strip().split(',')
col_list = ' (' + ','.join(['%s %s' % (x,y) for x,y in zip(col_name_list,col_type_list)]) + ') '
if storage_type.lower() == 'heap':
storage_str = ''
elif storage_type.lower() == 'ao':
storage_str = " with (appendonly=true) "
elif storage_type.lower() == 'co':
storage_str = " with (appendonly=true, orientation=column) "
else:
raise Exception("Invalid storage type")
query = 'CREATE TABLE %s.%s %s %s DISTRIBUTED RANDOMLY' % (schemaname, tablename, col_list, storage_str)
dbconn.execSQL(conn, query)
conn.commit()
def insert_data_into_table(conn, schemaname, tablename, col_type_list):
col_type_list = col_type_list.strip().split(',')
col_str = ','.join(["(random()*i)::%s" % x for x in col_type_list])
query = "INSERT INTO " + schemaname + '.' + tablename + " SELECT " + col_str + " FROM generate_series(1,100) i"
dbconn.execSQL(conn, query)
conn.commit()
def perform_ddl_on_table(conn, schemaname, tablename):
query = "ALTER TABLE " + schemaname + '.' + tablename + " ADD COLUMN tempcol int default 0"
dbconn.execSQL(conn, query)
query = "ALTER TABLE " + schemaname + '.' + tablename + " DROP COLUMN tempcol"
dbconn.execSQL(conn, query)
conn.commit()
def create_view_on_table(conn, schemaname, tablename, viewname):
query = "CREATE OR REPLACE VIEW " + schemaname + "." + viewname + " AS SELECT * FROM " + schemaname + "." + tablename
dbconn.execSQL(conn, query)
conn.commit()
|
|
#!/usr/bin/env python
import os
import pysam
import random
import logging
import argparse
import itertools
logger = logging.getLogger(__name__)
import multiprocessing as mp
from collections import defaultdict as dd
from bx.intervals.intersection import Intersecter, Interval
''' identify clusters of discordant read ends where one end is in BED file '''
class Genome:
def __init__(self, bamfn):
bam = pysam.AlignmentFile(bamfn, 'rb')
self.chrlen = {r: l for r,l in zip(bam.references, bam.lengths)}
self.bp = sum(bam.lengths)
def addpad(self, interval, pad):
''' pad interval such that it doesn't go out of bounds '''
chrom, start, end = interval
start = int(start) - int(pad)
end = int(end) + int(pad)
assert chrom in self.chrlen, "error padding interval %s, %s not a known chromosome" % (str(interval), chrom)
if start < 0: start = 0
if end > self.chrlen[chrom]: end = self.chrlen[chrom]
return (chrom, start, end)
def chunk(self, n, seed=None, sorted=True, pad=0, minlen=1e6):
''' break genome into n evenly-sized chunks, return n lists of (chrom, start, end) '''
chunklen = int(self.bp/n)
chunks = []
intervals = []
chunkleft = chunklen # track how much genome needs to go into each chunk
chromlist = list(self.chrlen.keys())
if sorted:
chromlist.sort()
else:
if seed is not None: random.seed(seed)
random.shuffle(chromlist)
for chrom in chromlist:
length = self.chrlen[chrom]
if length < minlen:
continue
lenleft = length
if length <= chunkleft:
chunkleft -= length
lenleft -= length
intervals.append( self.addpad((chrom, 0, length), pad) )
assert lenleft == 0
if chunkleft == 0:
chunkleft = chunklen
chunks.append(intervals)
intervals = []
else:
while lenleft > 0:
if lenleft >= chunkleft:
intervals.append( self.addpad((chrom, length-lenleft, length-lenleft+chunkleft), pad) )
lenleft -= chunkleft
chunkleft = chunklen
chunks.append(intervals)
intervals = []
else: # lenleft < chunkleft
intervals.append( self.addpad((chrom, length-lenleft, length), pad) )
chunkleft -= lenleft
lenleft -= lenleft
return chunks
class DiscoCoord:
def __init__(self, chrom, start, end, strand, mchrom, mstart, mend, mstrand, label, bam_name):
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.mchrom = mchrom
self.mstart = int(mstart)
self.mend = int(mend)
self.mstrand = mstrand
self.label = label
self.bam = bam_name.decode()
# if strand of genome element is '-', flip apparent mate strand
elt_str = self.label.split('|')[-1]
assert elt_str in ('+', '-'), 'malformed input BED: last three cols need to be class, family, orientation (+/-)'
if elt_str == '-': self.mstrand = flip(self.mstrand)
def __gt__(self, other):
if self.chrom == other.chrom:
return self.start > other.start
else:
return self.chrom > other.chrom
def __str__(self):
return '\t'.join(map(str, (self.bam, self.label, self.chrom, self.start, self.end, self.strand, self.mchrom, self.mstart, self.mend, self.mstrand)))
class DiscoInsCall:
def __init__(self, coord_list, chrom, start, end, strand, bamlist, mapscore, nonref):
self.coord_list = coord_list
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.bamlist = bamlist
self.length = len(coord_list)
self.mapscore = mapscore
self.nonref = nonref
def out(self, verbose=True):
output = ['#BEGIN']
output.append('%s\t%d\t%d\t%s\t%s\t%d\t%0.3f\t%s' % (self.chrom, self.start, self.end, self.strand, self.bamlist, self.length, self.mapscore, self.nonref))
if verbose:
for c in self.coord_list: output.append(str(c))
output.append('#END')
return '\n'.join(output)
def overlaps(self, other):
''' return true if overlap > 0 '''
return min(self.end, other.end) - max(self.start, other.start) > 0
def __gt__(self, other):
if self.chrom == other.chrom:
return self.start > other.start
else:
return self.chrom > other.chrom
def __str__(self):
return self.out(verbose=False)
def avgmap(maptabix, chrom, start, end):
''' return average mappability across chrom:start-end region; maptabix = pysam.Tabixfile '''
scores = []
if None in (start, end): return None
if chrom in maptabix.contigs:
for rec in maptabix.fetch(chrom, int(start), int(end)):
mchrom, mstart, mend, mscore = rec.strip().split()
mstart, mend = int(mstart), int(mend)
mscore = float(mscore)
while mstart < mend and mstart:
mstart += 1
if mstart >= int(start) and mstart <= int(end):
scores.append(mscore)
if len(scores) > 0:
return sum(scores) / float(len(scores))
else:
return 0.0
else:
return 0.0
def flip(strand):
if strand == '+': return '-'
if strand == '-': return '+'
def interval_forest(bed_file):
''' build dictionary of interval trees '''
forest = dd(Intersecter)
with open(bed_file, 'r') as bed:
for line in bed:
chrom, start, end = line.strip().split()[:3]
label = '|'.join(line.strip().split())
forest[chrom].add_interval(Interval(int(start), int(end), value=label))
return forest
def read_gen(bam, chrom=None, start=None, end=None):
if None in (chrom, start, end):
for read in bam.fetch():
yield read
else:
for read in bam.fetch(chrom, start, end):
yield read
def disco_get_coords(forest, bams, chrom=None, start=None, end=None, min_mapq=1, min_dist=10000):
coords = []
for bam in bams:
tick = 10000000
try:
tick = int((bam.mapped + bam.unmapped) * 0.01)
if tick == 0: tick = 1
logger.debug('outputting status every %d reads (1 pct)' % tick)
except ValueError as e:
logger.debug('no index found, outputting status every %d reads' % tick)
#for i, read in enumerate(bam.fetch()):
for i, read in enumerate(read_gen(bam, chrom=chrom, start=start, end=end)):
if not read.is_unmapped and not read.mate_is_unmapped and not read.is_duplicate:
rchrom = bam.getrname(read.reference_id)
rstart = read.reference_start
rend = read.reference_end
rstr = '+'
if read.is_reverse: rstr = '-'
mdist = abs(read.reference_start-read.next_reference_start)
if read.reference_id != read.next_reference_id: mdist=3e9
if read.mapq >= min_mapq and mdist >= min_dist:
mchrom = bam.getrname(read.next_reference_id)
mstart = read.next_reference_start
mend = mstart + len(read.seq)
mstr = '+'
if read.mate_is_reverse: mstr = '-'
if mchrom in forest:
for rec in forest[mchrom].find(mstart, mend):
coords.append(DiscoCoord(rchrom, rstart, rend, rstr, mchrom, mstart, mend, mstr, rec.value, os.path.basename(bam.filename)))
break
if i % tick == 0:
if read.is_unmapped:
logger.debug('parsed %d reads, last position unmapped' % i)
else:
logger.debug('parsed %d reads, last position: %s:%d' % (i, bam.getrname(read.tid), read.pos))
return coords
def disco_subcluster_by_label(cluster):
subclusters = dd(list)
for c in cluster:
subclusters[c.label.split('|')[3]].append(c)
return subclusters.values()
def disco_eval_strands(s):
left = 0
for i in range(1,len(s)):
if s[i] != s[0]:
left = i
break
right = 0
for i in range(len(s)-1, 0, -1):
if s[i] != s[-1]:
right = i+1
break
if left == right: return left
return 0
def disco_filter_cluster(cluster):
s1 = disco_eval_strands([c.strand for c in cluster])
s2 = disco_eval_strands([c.mstrand for c in cluster])
if s1 == s2 and s1 > 0: return False
return True
def disco_infer_strand(cluster):
c1 = [c.strand for c in cluster]
c2 = [c.mstrand for c in cluster]
if c1[0] == c2[0] and c1[-1] == c2[-1]: return '-'
if c1[0] != c2[0] and c1[-1] != c2[-1]: return '+'
return 'NA'
def disco_output_cluster(cluster, forest, mapping, nonref, min_size=4, min_map=0.5):
if len(cluster) >= min_size:
cluster_chrom = cluster[0].chrom
cluster_start = cluster[0].start
if cluster_start < 0: cluster_start = 0
cluster_end = cluster[-1].end
bamlist = ','.join(list(set([c.bam for c in cluster])))
if cluster_chrom not in forest or len(list(forest[cluster_chrom].find(cluster_start, cluster_end))) == 0:
map_score = 0.0
if mapping is not None:
map_score = avgmap(mapping, cluster_chrom, cluster_start, cluster_end)
if not disco_filter_cluster(cluster) and (map_score >= float(min_map) or mapping is None):
nr = ['NA']
if nonref is not None:
if cluster_chrom in nonref.contigs:
nr = ['|'.join(te.split()) for te in nonref.fetch(cluster_chrom, cluster_start, cluster_end)]
else:
nr = ['NA']
if not nr: nr = ['NA']
nr = ','.join(nr)
return DiscoInsCall(cluster, cluster_chrom, cluster_start, cluster_end, disco_infer_strand(cluster), bamlist, map_score, nr)
def disco_cluster(forest, coords, mapping, nonref, min_size=4, min_map=0.5, max_spacing=250):
logger.debug('sorting coordinates')
coords.sort()
cluster = []
insertion_list = []
for c in coords:
if len(cluster) == 0:
cluster = [c]
else:
if c.chrom == cluster[-1].chrom and c.start - cluster[-1].end <= max_spacing:
cluster.append(c)
else:
for cluster in disco_subcluster_by_label(cluster):
i = disco_output_cluster(cluster, forest, mapping, nonref, min_size=min_size)
if i is not None: insertion_list.append(i)
cluster = [c]
for cluster in disco_subcluster_by_label(cluster):
i = disco_output_cluster(cluster, forest, mapping, nonref, min_size=min_size, min_map=min_map)
if i is not None: insertion_list.append(i)
return insertion_list
def disco_run_chunk(args, chunk):
''' chunk is a list of (chrom, start, end) tuples '''
bams = [pysam.AlignmentFile(bam, 'rb') for bam in args.bam.split(',')]
mapping = None
if args.mapping is not None:
mapping = pysam.Tabixfile(args.mapping)
nonref = None
if args.nonref is not None:
nonref = pysam.Tabixfile(args.nonref)
logger.debug('building interval trees for %s' % args.bed)
forest = interval_forest(args.bed)
coords = []
# operate over intervals in chunk
for interval in chunk:
chrom, start, end = interval
logger.debug('%s:%d-%d: fetching coordinates from %s' % (chrom, start, end, args.bam))
coords += disco_get_coords(forest, bams, chrom=chrom, start=start, end=end)
logger.debug('%s:%d-%d: found %d anchored reads' % (chrom, start, end, len(coords)))
return disco_cluster(forest, coords, mapping, nonref, min_size=int(args.minsize), min_map=float(args.minmap), max_spacing=int(args.maxspacing))
def disco_resolve_dups(ins_list):
''' resolve cases where the same insertion has been called in multiple chunks '''
ins_list.sort()
new_list = []
last = None
for ins in ins_list:
if last is None:
last = ins
elif last.overlaps(ins):
if ins.length > last.length:
last = ins
else:
new_list.append(last)
last = ins
if last is not None:
new_list.append(last)
return new_list
def main(args):
logger.setLevel(logging.DEBUG)
g = Genome(args.bam.split(',')[0])
chunks = g.chunk(int(args.procs), pad=5000)
pool = mp.Pool(processes=int(args.procs))
reslist = []
for chunk in chunks:
res = res = pool.apply_async(disco_run_chunk, [args, chunk])
reslist.append(res)
ins_list = []
for res in reslist:
ins_list += res.get()
ins_list = disco_resolve_dups(ins_list)
for i in ins_list:
print(i.out())
if __name__ == '__main__':
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser(description='identify clusters of discordant read ends where one end is in BED file')
parser.add_argument('--bam', required=True, help='can be comma-delimited for multiple BAMs')
parser.add_argument('--bed', required=True, help='locations of source locations (e.g. reference TEs) in genome')
parser.add_argument('--mapping', default=None, help='mappability track tabix')
parser.add_argument('--minmap', default=0.5, help='minimum region mappability score (default = 0.5)')
parser.add_argument('--nonref', default=None, help='known nonreference element annotation')
parser.add_argument('--minsize', default=4, help='minimum cluster size to output (default = 4)')
parser.add_argument('--maxspacing', default=250, help='maximum spacing between support reads (default=250)')
parser.add_argument('-p', '--procs', default=1, help='split work over multiple processes')
args = parser.parse_args()
main(args)
|
|
"""
Evaluation of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy evaluation is really good because it
only *evaluates* what needs to be *evaluated*. All the statements and modules
that are not used are just being ignored.
"""
import copy
import sys
from jedi.parser import tree
from jedi import debug
from jedi.common import unite
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import memoize_default
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
class Evaluator(object):
def __init__(self, grammar, sys_path=None):
self.grammar = grammar
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.mixed_cache = {} # see `evaluate.compiled.mixed.create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.python_version = sys.version_info[:2]
if sys_path is None:
sys_path = sys.path
self.sys_path = copy.copy(sys_path)
try:
self.sys_path.remove('')
except ValueError:
pass
self.reset_recursion_limitations()
# Constants
self.BUILTINS = compiled.get_special_object(self, 'BUILTINS')
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def find_types(self, context, name_or_str, name_context, position=None,
search_global=False, is_goto=False):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, context, name_context, name_or_str, position)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def eval_statement(self, context, stmt, seek_name=None):
with recursion.execution_allowed(self, stmt) as allowed:
if allowed or context.get_root_context() == self.BUILTINS:
return self._eval_stmt(context, stmt, seek_name)
return set()
#@memoize_default(default=[], evaluator_is_first_arg=True)
@debug.increase_indent
def _eval_stmt(self, context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
types = self.eval_element(context, rhs)
if seek_name:
types = finder.check_tuple_assignments(self, types, seek_name)
first_operation = stmt.first_operation()
if first_operation not in ('=', None) and first_operation.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operation)
operator.value = operator.value[:-1]
name = str(stmt.get_defined_names()[0])
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = stmt.get_parent_until(tree.ForStmt)
if isinstance(for_stmt, tree.ForStmt) and types \
and for_stmt.defines_one_name():
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_input_node()
for_iterables = self.eval_element(context, node)
ordered = list(iterable.py__iter__(self, for_iterables, node))
for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = self.eval_element(context, rhs)
left = precedence.calculate(self, context, left, operator, t)
types = left
else:
types = precedence.calculate(self, context, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
def eval_element(self, context, element):
if isinstance(context, iterable.CompForContext):
return self._eval_element_not_cached(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if if_stmt.is_scope():
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt':
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# context.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = helpers.get_names_of_node(if_stmt_test)
element_names = helpers.get_names_of_node(element)
str_element_names = [str(e) for e in element_names]
if any(str(i) in str_element_names for i in if_names):
for if_name in if_names:
definitions = self.goto_definitions(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can evaluate, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][str(if_name)] = set([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[str(if_name)] = definitions
if len(name_dicts) > 1:
result = set()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= self._eval_element_not_cached(context, element)
return result
else:
return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return self._eval_element_not_cached(context, element)
else:
return self._eval_element_if_evaluated(context, element)
def _eval_element_if_evaluated(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return self._eval_element_not_cached(context, element)
return self._eval_element_cached(context, element)
@memoize_default(default=set(), evaluator_is_first_arg=True)
def _eval_element_cached(self, context, element):
return self._eval_element_not_cached(context, element)
@debug.increase_indent
def _eval_element_not_cached(self, context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
types = set()
if isinstance(element, (tree.Name, tree.Literal)) or element.type == 'atom':
types = self.eval_atom(context, element)
elif isinstance(element, tree.Keyword):
# For False/True/None
if element.value in ('False', 'True', 'None'):
types.add(compiled.builtin_from_name(self, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
elif isinstance(element, tree.Lambda):
types = set([er.FunctionContext(self, context, element)])
elif element.type == 'expr_stmt':
types = self.eval_statement(context, element)
elif element.type in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
types = self.eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = self.eval_element(context, element.children[2])
types = set(precedence.calculate(self, context, types, trailer, right))
break
types = self.eval_trailer(context, types, trailer)
elif element.type in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
types = set([iterable.SequenceLiteralContext(self, context, element)])
elif element.type in ('not_test', 'factor'):
types = self.eval_element(context, element.children[-1])
for operator in element.children[:-1]:
types = set(precedence.factor_calculate(self, types, operator))
elif element.type == 'test':
# `x if foo else y` case.
types = (self.eval_element(context, element.children[0]) |
self.eval_element(context, element.children[-1]))
elif element.type == 'operator':
# Must be an ellipsis, other operators are not evaluated.
assert element.value == '...'
types = set([compiled.create(self, Ellipsis)])
elif element.type == 'dotted_name':
types = self.eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
types = unite(
typ.py__getattribute__(next_name, name_context=context)
for typ in types
)
types = types
elif element.type == 'eval_input':
types = self._eval_element_not_cached(context, element.children[0])
elif element.type == 'annassign':
print(element.children[1])
types = pep0484._evaluate_for_annotation(context, element.children[1])
print('xxx')
else:
types = precedence.calculate_children(self, context, element.children)
debug.dbg('eval_element result %s', types)
return types
def eval_atom(self, context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if isinstance(atom, tree.Name):
# This is the first global lookup.
stmt = atom.get_definition()
if isinstance(stmt, tree.CompFor):
stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt))
if stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
return set([compiled.create(self, atom.eval())])
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
types = self.eval_atom(context, c[0])
for string in c[1:]:
right = self.eval_atom(context, string)
types = precedence.calculate(self, context, types, '+', right)
return types
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return self.eval_element(context, c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type == 'comp_for':
return set([iterable.Comprehension.from_atom(self, context, atom)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
context = iterable.DictLiteralContext(self, context, atom)
else:
context = iterable.SequenceLiteralContext(self, context, atom)
return set([context])
def eval_trailer(self, context, types, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
new_types = set()
if trailer_op == '[':
new_types |= iterable.py__getitem__(self, context, types, trailer)
else:
for typ in types:
debug.dbg('eval_trailer: %s in scope %s', trailer, typ)
if trailer_op == '.':
new_types |= typ.py__getattribute__(
name_context=context,
name_or_str=node
)
elif trailer_op == '(':
arguments = param.TreeArguments(self, context, node, trailer)
new_types |= self.execute(typ, arguments)
return new_types
@debug.increase_indent
def execute(self, obj, arguments):
if not isinstance(arguments, param.AbstractArguments):
raise NotImplementedError
arguments = param.Arguments(self, arguments)
if self.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', obj, arguments)
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self, obj, arguments)
except stdlib.NotInStdLib:
pass
try:
func = obj.py__call__
except AttributeError:
debug.warning("no execution possible %s", obj)
return set()
else:
types = func(arguments)
debug.dbg('execute result: %s in %s', types, obj)
return types
def goto_definitions(self, context, name):
def_ = name.get_definition()
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
if name.parent.type == 'classdef' and name.parent.name == name:
return [er.ClassContext(self, name.parent, context)]
elif name.parent.type == 'funcdef':
return [er.FunctionContext(self, context, name.parent)]
elif name.parent.type == 'file_input':
raise NotImplementedError
if def_.type == 'expr_stmt' and name in def_.get_defined_names():
return self.eval_statement(context, def_, name)
elif def_.type == 'for_stmt':
container_types = self.eval_element(context, def_.children[3])
for_types = iterable.py__iter__types(self, container_types, def_.children[3])
return finder.check_tuple_assignments(self, for_types, name)
elif def_.type in ('import_from', 'import_name'):
return imports.infer_import(context, name)
return helpers.evaluate_call_of_leaf(context, name)
def goto(self, context, name):
stmt = name.get_definition()
par = name.parent
if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
types = self.eval_element(context, trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
types = self.eval_element(context, to_evaluate[0])
for trailer in to_evaluate[1:]:
types = self.eval_trailer(context, types, trailer)
param_names = []
for typ in types:
try:
get_param_names = typ.get_param_names
except AttributeError:
pass
else:
for param_name in get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names():
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
return [TreeNameDefinition(context, name)]
elif par.type == 'param' and par.name:
return [ParamName(context, name)]
elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name:
return [TreeNameDefinition(context, name)]
elif isinstance(stmt, tree.Import):
module_names = imports.infer_import(context, name, is_goto=True)
return module_names
elif par.type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = self.eval_element(context, new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
if par.type == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
else:
if stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = name
return context.py__getattribute__(
name,
position=stmt.start_pos,
search_global=True, is_goto=True
)
def create_context(self, base_context, node, node_is_context=False, node_is_object=False):
def parent_scope(node):
while True:
node = node.parent
if node.is_scope():
return node
elif node.type in ('argument', 'testlist_comp'):
if node.children[1].type == 'comp_for':
return node.children[1]
elif node.type == 'dictorsetmaker':
for n in node.children[1:4]:
# In dictionaries it can be pretty much anything.
if n.type == 'comp_for':
return n
def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False):
if scope_node == base_node:
return base_context
is_funcdef = scope_node.type in ('funcdef', 'lambda')
parent_scope = scope_node.get_parent_scope()
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
if is_funcdef:
if isinstance(parent_context, AnonymousInstance):
func = BoundMethod(
self, parent_context, parent_context.class_context,
parent_context.parent_context, scope_node
)
else:
func = er.FunctionContext(
self,
parent_context,
scope_node
)
if is_nested and not node_is_object:
return func.get_function_execution()
return func
elif scope_node.type == 'classdef':
class_context = er.ClassContext(self, scope_node, parent_context)
if child_is_funcdef:
# anonymous instance
return AnonymousInstance(self, parent_context, class_context)
else:
return class_context
elif scope_node.type == 'comp_for':
if node.start_pos >= scope_node.children[-1].start_pos:
return parent_context
return iterable.CompForContext.from_comp_for(parent_context, scope_node)
raise Exception("There's a scope that was not managed.")
base_node = base_context.tree_node
if node_is_context and node.is_scope():
scope_node = node
else:
if node.parent.type in ('funcdef', 'classdef'):
# When we're on class/function names/leafs that define the
# object itself and not its contents.
node = node.parent
scope_node = parent_scope(node)
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
| Plural subsystem is created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop@gmail.com>
Translation system
--------------------------------------------
"""
import os
import re
import sys
import pkgutil
import logging
from cgi import escape
from threading import RLock
from gluon.utils import local_html_escape
from gluon._compat import copyreg, PY2, maketrans, iterkeys, unicodeT, to_unicode, to_bytes, iteritems, to_native, pjoin
from pydal.contrib.portalocker import read_locked, LockedFile
from gluon.fileutils import listdir
from gluon.cfs import getcfs
from gluon.html import XML, xmlescape
from gluon.contrib.markmin.markmin2html import render, markmin_escape
__all__ = ['translator', 'findT', 'update_all_languages']
ostat = os.stat
oslistdir = os.listdir
pdirname = os.path.dirname
isdir = os.path.isdir
DEFAULT_LANGUAGE = 'en'
DEFAULT_LANGUAGE_NAME = 'English'
# DEFAULT PLURAL-FORMS RULES:
# language doesn't use plural forms
DEFAULT_NPLURALS = 1
# only one singular/plural form is used
DEFAULT_GET_PLURAL_ID = lambda n: 0
# word is unchangeable
DEFAULT_CONSTRUCT_PLURAL_FORM = lambda word, plural_id: word
if PY2:
NUMBERS = (int, long, float)
from gluon.utf8 import Utf8
else:
NUMBERS = (int, float)
Utf8 = str
# pattern to find T(blah blah blah) expressions
PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\
+ r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\
+ r"(?:'(?:[^'\\]|\\.)*')|" + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\
+ r'(?:"(?:[^"\\]|\\.)*"))'
PY_M_STRING_LITERAL_RE = r'(?<=[^\w]T\.M\()(?P<name>'\
+ r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\
+ r"(?:'(?:[^'\\]|\\.)*')|" + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\
+ r'(?:"(?:[^"\\]|\\.)*"))'
regex_translate = re.compile(PY_STRING_LITERAL_RE, re.DOTALL)
regex_translate_m = re.compile(PY_M_STRING_LITERAL_RE, re.DOTALL)
regex_param = re.compile(r'{(?P<s>.+?)}')
# pattern for a valid accept_language
regex_language = \
re.compile('([a-z]{2,3}(?:\-[a-z]{2})?(?:\-[a-z]{2})?)(?:[,;]|$)')
regex_langfile = re.compile('^[a-z]{2,3}(-[a-z]{2})?\.py$')
regex_backslash = re.compile(r"\\([\\{}%])")
regex_plural = re.compile('%({.+?})')
regex_plural_dict = re.compile('^{(?P<w>[^()[\]][^()[\]]*?)\((?P<n>[^()\[\]]+)\)}$') # %%{word(varname or number)}
regex_plural_tuple = re.compile(
'^{(?P<w>[^[\]()]+)(?:\[(?P<i>\d+)\])?}$') # %%{word[index]} or %%{word}
regex_plural_file = re.compile('^plural-[a-zA-Z]{2}(-[a-zA-Z]{2})?\.py$')
def is_writable():
""" returns True if and only if the filesystem is writable """
from gluon.settings import global_settings
return not global_settings.web2py_runtime_gae
def safe_eval(text):
if text.strip():
try:
import ast
return ast.literal_eval(text)
except ImportError:
return eval(text, {}, {})
return None
# used as default filter in translator.M()
def markmin(s):
def markmin_aux(m):
return '{%s}' % markmin_escape(m.group('s'))
return render(regex_param.sub(markmin_aux, s),
sep='br', autolinks=None, id_prefix='')
# UTF8 helper functions
def upper_fun(s):
return to_unicode(s).upper()
def title_fun(s):
return to_unicode(s).title()
def cap_fun(s):
return to_unicode(s).capitalize()
ttab_in = maketrans("\\%{}", '\x1c\x1d\x1e\x1f')
ttab_out = maketrans('\x1c\x1d\x1e\x1f', "\\%{}")
# cache of translated messages:
# global_language_cache:
# { 'languages/xx.py':
# ( {"def-message": "xx-message",
# ...
# "def-message": "xx-message"}, lock_object )
# 'languages/yy.py': ( {dict}, lock_object )
# ...
# }
global_language_cache = {}
def get_from_cache(cache, val, fun):
lang_dict, lock = cache
lock.acquire()
try:
result = lang_dict.get(val)
finally:
lock.release()
if result:
return result
lock.acquire()
try:
result = lang_dict.setdefault(val, fun())
finally:
lock.release()
return result
def clear_cache(filename):
cache = global_language_cache.setdefault(
filename, ({}, RLock()))
lang_dict, lock = cache
lock.acquire()
try:
lang_dict.clear()
finally:
lock.release()
def read_dict_aux(filename):
lang_text = read_locked(filename).replace(b'\r\n', b'\n')
clear_cache(filename)
try:
return safe_eval(to_native(lang_text)) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_dict(filename):
""" Returns dictionary with translation messages
"""
return getcfs('lang:' + filename, filename,
lambda: read_dict_aux(filename))
def read_possible_plural_rules():
"""
Creates list of all possible plural rules files
The result is cached in PLURAL_RULES dictionary to increase speed
"""
plurals = {}
try:
import gluon.contrib.plural_rules as package
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
if len(modname) == 2:
module = __import__(package.__name__ + '.' + modname,
fromlist=[modname])
lang = modname
pname = modname + '.py'
nplurals = getattr(module, 'nplurals', DEFAULT_NPLURALS)
get_plural_id = getattr(
module, 'get_plural_id',
DEFAULT_GET_PLURAL_ID)
construct_plural_form = getattr(
module, 'construct_plural_form',
DEFAULT_CONSTRUCT_PLURAL_FORM)
plurals[lang] = (lang, nplurals, get_plural_id,
construct_plural_form)
except ImportError:
e = sys.exc_info()[1]
logging.warn('Unable to import plural rules: %s' % e)
return plurals
PLURAL_RULES = read_possible_plural_rules()
def read_possible_languages_aux(langdir):
def get_lang_struct(lang, langcode, langname, langfile_mtime):
if lang == 'default':
real_lang = langcode.lower()
else:
real_lang = lang
(prules_langcode,
nplurals,
get_plural_id,
construct_plural_form
) = PLURAL_RULES.get(real_lang[:2], ('default',
DEFAULT_NPLURALS,
DEFAULT_GET_PLURAL_ID,
DEFAULT_CONSTRUCT_PLURAL_FORM))
if prules_langcode != 'default':
(pluraldict_fname,
pluraldict_mtime) = plurals.get(real_lang,
plurals.get(real_lang[:2],
('plural-%s.py' % real_lang, 0)))
else:
pluraldict_fname = None
pluraldict_mtime = 0
return (langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname, # name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime, # m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
plurals = {}
flist = oslistdir(langdir) if isdir(langdir) else []
# scan languages directory for plural dict files:
for pname in flist:
if regex_plural_file.match(pname):
plurals[pname[7:-3]] = (pname,
ostat(pjoin(langdir, pname)).st_mtime)
langs = {}
# scan languages directory for langfiles:
for fname in flist:
if regex_langfile.match(fname) or fname == 'default.py':
fname_with_path = pjoin(langdir, fname)
d = read_dict(fname_with_path)
lang = fname[:-3]
langcode = d.get('!langcode!', lang if lang != 'default'
else DEFAULT_LANGUAGE)
langname = d.get('!langname!', langcode)
langfile_mtime = ostat(fname_with_path).st_mtime
langs[lang] = get_lang_struct(lang, langcode,
langname, langfile_mtime)
if 'default' not in langs:
# if default.py is not found,
# add DEFAULT_LANGUAGE as default language:
langs['default'] = get_lang_struct('default', DEFAULT_LANGUAGE,
DEFAULT_LANGUAGE_NAME, 0)
deflang = langs['default']
deflangcode = deflang[0]
if deflangcode not in langs:
# create language from default.py:
langs[deflangcode] = deflang[:2] + (0,) + deflang[3:]
return langs
def read_possible_languages(langpath):
return getcfs('langs:' + langpath, langpath,
lambda: read_possible_languages_aux(langpath))
def read_plural_dict_aux(filename):
lang_text = read_locked(filename).replace(b'\r\n', b'\n')
try:
return eval(lang_text) or {}
except Exception:
e = sys.exc_info()[1]
status = 'Syntax error in %s (%s)' % (filename, e)
logging.error(status)
return {'__corrupted__': status}
def read_plural_dict(filename):
return getcfs('plurals:' + filename, filename,
lambda: read_plural_dict_aux(filename))
def write_plural_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n{\n# "singular form (0)": ["first plural form (1)", "second plural form (2)", ...],\n')
for key in sorted(contents, sort_function):
forms = '[' + ','.join([repr(Utf8(form))
for form in contents[key]]) + ']'
fp.write('%s: %s,\n' % (repr(Utf8(key)), forms))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
def sort_function(x, y):
return cmp(unicode(x, 'utf-8').lower(), unicode(y, 'utf-8').lower())
def write_dict(filename, contents):
if '__corrupted__' in contents:
return
fp = None
try:
fp = LockedFile(filename, 'w')
fp.write('# -*- coding: utf-8 -*-\n{\n')
for key in sorted(contents, key=lambda x: to_unicode(x, 'utf-8').lower()):
fp.write('%s: %s,\n' % (repr(Utf8(key)),
repr(Utf8(contents[key]))))
fp.write('}\n')
except (IOError, OSError):
if is_writable():
logging.warning('Unable to write to file %s' % filename)
return
finally:
if fp:
fp.close()
class lazyT(object):
"""
Never to be called explicitly, returned by
translator.__call__() or translator.M()
"""
m = s = T = f = t = None
M = is_copy = False
def __init__(
self,
message,
symbols={},
T=None,
filter=None,
ftag=None,
M=False
):
if isinstance(message, lazyT):
self.m = message.m
self.s = message.s
self.T = message.T
self.f = message.f
self.t = message.t
self.M = message.M
self.is_copy = True
else:
self.m = message
self.s = symbols
self.T = T
self.f = filter
self.t = ftag
self.M = M
self.is_copy = False
def __repr__(self):
return "<lazyT %s>" % (repr(Utf8(self.m)), )
def __str__(self):
return str(self.T.apply_filter(self.m, self.s, self.f, self.t) if self.M else
self.T.translate(self.m, self.s))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __add__(self, other):
return '%s%s' % (self, other)
def __radd__(self, other):
return '%s%s' % (other, self)
def __mul__(self, other):
return str(self) * other
def __cmp__(self, other):
return cmp(str(self), str(other))
def __hash__(self):
return hash(str(self))
def __getattr__(self, name):
return getattr(str(self), name)
def __getitem__(self, i):
return str(self)[i]
def __getslice__(self, i, j):
return str(self)[i:j]
def __iter__(self):
for c in str(self):
yield c
def __len__(self):
return len(str(self))
def xml(self):
return str(self) if self.M else local_html_escape(str(self), quote=False)
def encode(self, *a, **b):
return str(self).encode(*a, **b)
def decode(self, *a, **b):
return str(self).decode(*a, **b)
def read(self):
return str(self)
def __mod__(self, symbols):
if self.is_copy:
return lazyT(self)
return lazyT(self.m, symbols, self.T, self.f, self.t, self.M)
def pickle_lazyT(c):
return str, (c.xml(),)
copyreg.pickle(lazyT, pickle_lazyT)
class translator(object):
"""
This class is instantiated by gluon.compileapp.build_environment
as the T object
Example:
T.force(None) # turns off translation
T.force('fr, it') # forces web2py to translate using fr.py or it.py
T("Hello World") # translates "Hello World" using the selected file
Note:
- there is no need to force since, by default, T uses
http_accept_language to determine a translation file.
- en and en-en are considered different languages!
- if language xx-yy is not found force() probes other similar languages
using such algorithm: `xx-yy.py -> xx.py -> xx-yy*.py -> xx*.py`
"""
def __init__(self, langpath, http_accept_language):
self.langpath = langpath
self.http_accept_language = http_accept_language
# filled in self.force():
# ------------------------
# self.cache
# self.accepted_language
# self.language_file
# self.plural_language
# self.nplurals
# self.get_plural_id
# self.construct_plural_form
# self.plural_file
# self.plural_dict
# self.requested_languages
# ----------------------------------------
# filled in self.set_current_languages():
# ----------------------------------------
# self.default_language_file
# self.default_t
# self.current_languages
self.set_current_languages()
self.lazy = True
self.otherTs = {}
self.filter = markmin
self.ftag = 'markmin'
self.ns = None
self.is_writable = True
def get_possible_languages_info(self, lang=None):
"""
Returns info for selected language or dictionary with all
possible languages info from `APP/languages/*.py`
It Returns:
- a tuple containing::
langcode, langname, langfile_mtime,
pluraldict_fname, pluraldict_mtime,
prules_langcode, nplurals,
get_plural_id, construct_plural_form
or None
- if *lang* is NOT defined a dictionary with all possible
languages::
{ langcode(from filename):
( langcode, # language code from !langcode!
langname,
# language name in national spelling from !langname!
langfile_mtime, # m_time of language file
pluraldict_fname,# name of plural dictionary file or None (when default.py is not exist)
pluraldict_mtime,# m_time of plural dictionary file or 0 if file is not exist
prules_langcode, # code of plural rules language or 'default'
nplurals, # nplurals for current language
get_plural_id, # get_plural_id() for current language
construct_plural_form) # construct_plural_form() for current language
}
Args:
lang (str): language
"""
info = read_possible_languages(self.langpath)
if lang:
info = info.get(lang)
return info
def get_possible_languages(self):
""" Gets list of all possible languages for current application """
return list(set(self.current_languages +
[lang for lang in read_possible_languages(self.langpath)
if lang != 'default']))
def set_current_languages(self, *languages):
"""
Sets current AKA "default" languages
Setting one of this languages makes the force() function to turn
translation off
"""
if len(languages) == 1 and isinstance(languages[0], (tuple, list)):
languages = languages[0]
if not languages or languages[0] is None:
# set default language from default.py/DEFAULT_LANGUAGE
pl_info = self.get_possible_languages_info('default')
if pl_info[2] == 0: # langfile_mtime
# if languages/default.py is not found
self.default_language_file = self.langpath
self.default_t = {}
self.current_languages = [DEFAULT_LANGUAGE]
else:
self.default_language_file = pjoin(self.langpath,
'default.py')
self.default_t = read_dict(self.default_language_file)
self.current_languages = [pl_info[0]] # !langcode!
else:
self.current_languages = list(languages)
self.force(self.http_accept_language)
def plural(self, word, n):
"""
Gets plural form of word for number *n*
invoked from T()/T.M() in `%%{}` tag
Note:
"word" MUST be defined in current language (T.accepted_language)
Args:
word (str): word in singular
n (numeric): number plural form created for
Returns:
word (str): word in appropriate singular/plural form
"""
if int(n) == 1:
return word
elif word:
id = self.get_plural_id(abs(int(n)))
# id = 0 singular form
# id = 1 first plural form
# id = 2 second plural form
# etc.
if id != 0:
forms = self.plural_dict.get(word, [])
if len(forms) >= id:
# have this plural form:
return forms[id - 1]
else:
# guessing this plural form
forms += [''] * (self.nplurals - len(forms) - 1)
form = self.construct_plural_form(word, id)
forms[id - 1] = form
self.plural_dict[word] = forms
if self.is_writable and is_writable() and self.plural_file:
write_plural_dict(self.plural_file,
self.plural_dict)
return form
return word
def force(self, *languages):
"""
Selects language(s) for translation
if a list of languages is passed as a parameter,
the first language from this list that matches the ones
from the possible_languages dictionary will be
selected
default language will be selected if none
of them matches possible_languages.
"""
pl_info = read_possible_languages(self.langpath)
def set_plural(language):
"""
initialize plural forms subsystem
"""
lang_info = pl_info.get(language)
if lang_info:
(pname,
pmtime,
self.plural_language,
self.nplurals,
self.get_plural_id,
self.construct_plural_form
) = lang_info[3:]
pdict = {}
if pname:
pname = pjoin(self.langpath, pname)
if pmtime != 0:
pdict = read_plural_dict(pname)
self.plural_file = pname
self.plural_dict = pdict
else:
self.plural_language = 'default'
self.nplurals = DEFAULT_NPLURALS
self.get_plural_id = DEFAULT_GET_PLURAL_ID
self.construct_plural_form = DEFAULT_CONSTRUCT_PLURAL_FORM
self.plural_file = None
self.plural_dict = {}
language = ''
if len(languages) == 1 and isinstance(languages[0], str):
languages = regex_language.findall(languages[0].lower())
elif not languages or languages[0] is None:
languages = []
self.requested_languages = languages = tuple(languages)
if languages:
all_languages = set(lang for lang in pl_info
if lang != 'default') \
| set(self.current_languages)
for lang in languages:
# compare "aa-bb" | "aa" from *language* parameter
# with strings from langlist using such alghorythm:
# xx-yy.py -> xx.py -> xx*.py
lang5 = lang[:5]
if lang5 in all_languages:
language = lang5
else:
lang2 = lang[:2]
if len(lang5) > 2 and lang2 in all_languages:
language = lang2
else:
for l in all_languages:
if l[:2] == lang2:
language = l
if language:
if language in self.current_languages:
break
self.language_file = pjoin(self.langpath, language + '.py')
self.t = read_dict(self.language_file)
self.cache = global_language_cache.setdefault(
self.language_file,
({}, RLock()))
set_plural(language)
self.accepted_language = language
return languages
self.accepted_language = language
if not language:
if self.current_languages:
self.accepted_language = self.current_languages[0]
else:
self.accepted_language = DEFAULT_LANGUAGE
self.language_file = self.default_language_file
self.cache = global_language_cache.setdefault(self.language_file,
({}, RLock()))
self.t = self.default_t
set_plural(self.accepted_language)
return languages
def __call__(self, message, symbols={}, language=None, lazy=None, ns=None):
"""
get cached translated plain text message with inserted parameters(symbols)
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self)
else:
return self.translate(message, symbols)
else:
if ns:
if ns != self.ns:
self.langpath = os.path.join(self.langpath, ns)
if self.ns is None:
self.ns = ns
otherT = self.__get_otherT__(language, ns)
return otherT(message, symbols, lazy=lazy)
def __get_otherT__(self, language=None, namespace=None):
if not language and not namespace:
raise Exception('Incorrect parameters')
if namespace:
if language:
index = '%s/%s' % (namespace, language)
else:
index = namespace
else:
index = language
try:
otherT = self.otherTs[index]
except KeyError:
otherT = self.otherTs[index] = translator(self.langpath,
self.http_accept_language)
if language:
otherT.force(language)
return otherT
def apply_filter(self, message, symbols={}, filter=None, ftag=None):
def get_tr(message, prefix, filter):
s = self.get_t(message, prefix)
return filter(s) if filter else self.filter(s)
if filter:
prefix = '@' + (ftag or 'userdef') + '\x01'
else:
prefix = '@' + self.ftag + '\x01'
message = get_from_cache(
self.cache, prefix + message,
lambda: get_tr(message, prefix, filter))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, xmlescape(value).translate(ttab_in))
for key, value in iteritems(symbols)
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else to_native(xmlescape(value)).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return to_native(XML(message.translate(ttab_out)).xml())
def M(self, message, symbols={}, language=None,
lazy=None, filter=None, ftag=None, ns=None):
"""
Gets cached translated markmin-message with inserted parametes
if lazy==True lazyT object is returned
"""
if lazy is None:
lazy = self.lazy
if not language and not ns:
if lazy:
return lazyT(message, symbols, self, filter, ftag, True)
else:
return self.apply_filter(message, symbols, filter, ftag)
else:
if ns:
self.langpath = os.path.join(self.langpath, ns)
otherT = self.__get_otherT__(language, ns)
return otherT.M(message, symbols, lazy=lazy)
def get_t(self, message, prefix=''):
"""
Use ## to add a comment into a translation string
the comment can be useful do discriminate different possible
translations for the same string (for example different locations)::
T(' hello world ') -> ' hello world '
T(' hello world ## token') -> ' hello world '
T('hello ## world## token') -> 'hello ## world'
the ## notation is ignored in multiline strings and strings that
start with ##. This is needed to allow markmin syntax to be translated
"""
message = to_native(message, 'utf8')
prefix = to_native(prefix, 'utf8')
key = prefix + message
mt = self.t.get(key, None)
if mt is not None:
return mt
# we did not find a translation
if message.find('##') > 0:
pass
if message.find('##') > 0 and not '\n' in message:
# remove comments
message = message.rsplit('##', 1)[0]
# guess translation same as original
self.t[key] = mt = self.default_t.get(key, message)
# update language file for latter translation
if self.is_writable and is_writable() and \
self.language_file != self.default_language_file:
write_dict(self.language_file, self.t)
return regex_backslash.sub(
lambda m: m.group(1).translate(ttab_in), to_native(mt))
def params_substitution(self, message, symbols):
"""
Substitutes parameters from symbols into message using %.
also parse `%%{}` placeholders for plural-forms processing.
Returns:
string with parameters
Note:
*symbols* MUST BE OR tuple OR dict of parameters!
"""
def sub_plural(m):
"""String in `%{}` is transformed by this rules:
If string starts with `\\`, `!` or `?` such transformations
take place::
"!string of words" -> "String of word" (Capitalize)
"!!string of words" -> "String Of Word" (Title)
"!!!string of words" -> "STRING OF WORD" (Upper)
"\\!string of words" -> "!string of word"
(remove \\ and disable transformations)
"?word?number" -> "word" (return word, if number == 1)
"?number" or "??number" -> "" (remove number,
if number == 1)
"?word?number" -> "number" (if number != 1)
"""
def sub_tuple(m):
""" word[number], !word[number], !!word[number], !!!word[number]
word, !word, !!word, !!!word, ?word?number, ??number, ?number
?word?word[number], ?word?[number], ??word[number]
"""
w, i = m.group('w', 'i')
c = w[0]
if c not in '!?':
return self.plural(w, symbols[int(i or 0)])
elif c == '?':
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
if i is None:
# ?[word]?number[?number] or ?number
if not part2:
return m.group(0)
num = int(part2)
else:
# ?[word]?word2[?word3][number]
num = int(symbols[int(i or 0)])
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
if i is not None:
return fun(self.plural(word, symbols[int(i)]))
return fun(word)
def sub_dict(m):
""" word(var), !word(var), !!word(var), !!!word(var)
word(num), !word(num), !!word(num), !!!word(num)
?word2(var), ?word1?word2(var), ?word1?word2?word0(var)
?word2(num), ?word1?word2(num), ?word1?word2?word0(num)
"""
w, n = m.group('w', 'n')
c = w[0]
n = int(n) if n.isdigit() else symbols[n]
if c not in '!?':
return self.plural(w, n)
elif c == '?':
# ?[word1]?word2[?word0](var or num), ?[word1]?word2(var or num) or ?word2(var or num)
(p1, sep, p2) = w[1:].partition("?")
part1 = p1 if sep else ""
(part2, sep, part3) = (p2 if sep else p1).partition("?")
if not sep:
part3 = part2
num = int(n)
return part1 if num == 1 else part3 if num == 0 else part2
elif w.startswith('!!!'):
word = w[3:]
fun = upper_fun
elif w.startswith('!!'):
word = w[2:]
fun = title_fun
else:
word = w[1:]
fun = cap_fun
return fun(self.plural(word, n))
s = m.group(1)
part = regex_plural_tuple.sub(sub_tuple, s)
if part == s:
part = regex_plural_dict.sub(sub_dict, s)
if part == s:
return m.group(0)
return part
message = message % symbols
message = regex_plural.sub(sub_plural, message)
return message
def translate(self, message, symbols):
"""
Gets cached translated message with inserted parameters(symbols)
"""
message = get_from_cache(self.cache, message,
lambda: self.get_t(message))
if symbols or symbols == 0 or symbols == "":
if isinstance(symbols, dict):
symbols.update(
(key, str(value).translate(ttab_in))
for key, value in iteritems(symbols)
if not isinstance(value, NUMBERS))
else:
if not isinstance(symbols, tuple):
symbols = (symbols,)
symbols = tuple(
value if isinstance(value, NUMBERS)
else str(value).translate(ttab_in)
for value in symbols)
message = self.params_substitution(message, symbols)
return message.translate(ttab_out)
def findT(path, language=DEFAULT_LANGUAGE):
"""
Note:
Must be run by the admin app
"""
from gluon.tools import Auth, Crud
lang_file = pjoin(path, 'languages', language + '.py')
sentences = read_dict(lang_file)
mp = pjoin(path, 'models')
cp = pjoin(path, 'controllers')
vp = pjoin(path, 'views')
mop = pjoin(path, 'modules')
def add_message(message):
if not message.startswith('#') and not '\n' in message:
tokens = message.rsplit('##', 1)
else:
# this allows markmin syntax in translations
tokens = [message]
if len(tokens) == 2:
message = tokens[0].strip() + '##' + tokens[1].strip()
if message and not message in sentences:
sentences[message] = message.replace("@markmin\x01", "")
for filename in \
listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\
+ listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0):
data = to_native(read_locked(filename))
items = regex_translate.findall(data)
for x in regex_translate_m.findall(data):
if x[0:3] in ["'''", '"""']: items.append("%s@markmin\x01%s" %(x[0:3], x[3:]))
else: items.append("%s@markmin\x01%s" %(x[0], x[1:]))
for item in items:
try:
message = safe_eval(item)
except:
continue # silently ignore inproperly formatted strings
add_message(message)
gluon_msg = [Auth.default_messages, Crud.default_messages]
for item in [x for m in gluon_msg for x in m.values() if x is not None]:
add_message(item)
if not '!langcode!' in sentences:
sentences['!langcode!'] = (
DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language)
if not '!langname!' in sentences:
sentences['!langname!'] = (
DEFAULT_LANGUAGE_NAME if language in ('default', DEFAULT_LANGUAGE)
else sentences['!langcode!'])
write_dict(lang_file, sentences)
def update_all_languages(application_path):
"""
Note:
Must be run by the admin app
"""
path = pjoin(application_path, 'languages/')
for language in oslistdir(path):
if regex_langfile.match(language):
findT(application_path, language[:-3])
def update_from_langfile(target, source, force_update=False):
"""this will update untranslated messages in target from source (where both are language files)
this can be used as first step when creating language file for new but very similar language
or if you want update your app from welcome app of newer web2py version
or in non-standard scenarios when you work on target and from any reason you have partial translation in source
Args:
force_update: if False existing translations remain unchanged, if True existing translations will update from source
"""
src = read_dict(source)
sentences = read_dict(target)
for key in sentences:
val = sentences[key]
if not val or val == key or force_update:
new_val = src.get(key)
if new_val and new_val != val:
sentences[key] = new_val
write_dict(target, sentences)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through flags.
"""
import cStringIO
import inspect
import itertools
import json
import logging
import logging.config
import logging.handlers
import os
import stat
import sys
import traceback
import nova
from nova import flags
from nova import local
from nova.openstack.common import cfg
from nova import version
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
'%(user_id)s %(project_id)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s %(levelname)s %(name)s [-] %(instance)s'
'%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='from (pid=%(process)d) %(funcName)s '
'%(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s TRACE %(name)s %(instance)s',
help='prefix each line of exception output with this format'),
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = FLAGS.log_file or FLAGS.logfile
logdir = FLAGS.log_dir or FLAGS.logdir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class NovaContextAdapter(logging.LoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger):
self.logger = logger
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = FLAGS.instance_format % instance
extra.update({'instance': instance_extra})
extra.update({"nova_version": version.version_string_with_vcs()})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(lambda x: x,
line.rstrip().splitlines())
for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return json.dumps(message)
class LegacyNovaFormatter(logging.Formatter):
"""A nova.context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
if 'instance' not in record.__dict__:
record.__dict__['instance'] = ''
if record.__dict__.get('request_id', None):
self._fmt = FLAGS.logging_context_format_string
else:
self._fmt = FLAGS.logging_default_format_string
if (record.levelno == logging.DEBUG and
FLAGS.logging_debug_format_suffix):
self._fmt += " " + FLAGS.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with FLAGS.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if FLAGS.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = FLAGS.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if 'list_notifier_drivers' in FLAGS:
if 'nova.notifier.log_notifier' in FLAGS.list_notifier_drivers:
return
nova.notifier.api.notify('nova.error.publisher', 'error_notification',
nova.notifier.api.ERROR, dict(error=record.msg))
def handle_exception(type, value, tb):
extra = {}
if FLAGS.verbose:
extra['exc_info'] = (type, value, tb)
getLogger().critical(str(value), **extra)
def setup():
"""Setup nova logging."""
sys.excepthook = handle_exception
if FLAGS.log_config:
try:
logging.config.fileConfig(FLAGS.log_config)
except Exception:
traceback.print_exc()
raise
else:
_setup_logging_from_flags()
def _find_facility_from_flags():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
FLAGS.syslog_log_facility,
None)
if facility is None and FLAGS.syslog_log_facility in facility_names:
facility = facility_names.get(FLAGS.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_flags():
nova_root = getLogger().logger
for handler in nova_root.handlers:
nova_root.removeHandler(handler)
if FLAGS.use_syslog:
facility = _find_facility_from_flags()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
nova_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
nova_root.addHandler(filelog)
mode = int(FLAGS.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if FLAGS.use_stderr:
streamlog = logging.StreamHandler()
nova_root.addHandler(streamlog)
elif not FLAGS.log_file:
streamlog = logging.StreamHandler(stream=sys.stdout)
nova_root.addHandler(streamlog)
if FLAGS.publish_errors:
nova_root.addHandler(PublishErrorsHandler(logging.ERROR))
for handler in nova_root.handlers:
datefmt = FLAGS.log_date_format
if FLAGS.log_format:
handler.setFormatter(logging.Formatter(fmt=FLAGS.log_format,
datefmt=datefmt))
handler.setFormatter(LegacyNovaFormatter(datefmt=datefmt))
if FLAGS.verbose or FLAGS.debug:
nova_root.setLevel(logging.DEBUG)
else:
nova_root.setLevel(logging.INFO)
level = logging.NOTSET
for pair in FLAGS.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
# NOTE(jkoelker) Clear the handlers for the root logger that was setup
# by basicConfig in nova/__init__.py and install the
# NullHandler.
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
handler = NullHandler()
handler.setFormatter(logging.Formatter())
root.addHandler(handler)
_loggers = {}
def getLogger(name='nova'):
if name not in _loggers:
_loggers[name] = NovaContextAdapter(logging.getLogger(name))
return _loggers[name]
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 04 13:19:01 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lmap
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from statsmodels import iolib
from statsmodels.tools.tools import add_constant
from statsmodels.regression.linear_model import OLS
import statsmodels.sandbox.regression.gmm as gmm
def get_griliches76_data():
import os
curdir = os.path.split(__file__)[0]
path = os.path.join(curdir, 'griliches76.dta')
griliches76_data = iolib.genfromdta(path, missing_flt=np.NaN, pandas=True)
# create year dummies
years = griliches76_data['year'].unique()
N = griliches76_data.shape[0]
for yr in years:
griliches76_data['D_%i' %yr] = np.zeros(N)
for i in range(N):
if griliches76_data.ix[i, 'year'] == yr:
griliches76_data.ix[i, 'D_%i' %yr] = 1
else:
pass
griliches76_data['const'] = 1
X = add_constant(griliches76_data[['s', 'iq', 'expr', 'tenure', 'rns',
'smsa', 'D_67', 'D_68', 'D_69', 'D_70',
'D_71', 'D_73']],
prepend=True) # for R comparison
#prepend=False) # for Stata comparison
Z = add_constant(griliches76_data[['expr', 'tenure', 'rns', 'smsa', \
'D_67', 'D_68', 'D_69', 'D_70', 'D_71',
'D_73', 'med', 'kww', 'age', 'mrt']])
Y = griliches76_data['lw']
return Y, X, Z
# use module global to load only once
yg_df, xg_df, zg_df = get_griliches76_data()
endog = np.asarray(yg_df, dtype=float) # TODO: why is yg_df float32
exog, instrument = lmap(np.asarray, [xg_df, zg_df])
assert exog.dtype == np.float64
assert instrument.dtype == np.float64
# from R
#-----------------
varnames = np.array(["(Intercept)", "s", "iq", "expr", "tenure", "rns", "smsa", "D_67", "D_68", "D_69", "D_70",
"D_71", "D_73"])
params = np.array([ 4.03350989, 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323977])
bse = np.array([ 0.31816162, 0.02091823, 0.00474527, 0.00822543, 0.00891969,
0.03447337, 0.03119615, 0.05577582, 0.05246796, 0.05201092,
0.06027671, 0.05461436, 0.05767865])
tvalues = np.array([ 12.6775501, 8.2428242, -1.9174531, 5.9923305, 4.7330205,
-2.9528144, 4.0425165, -1.0688701, 0.9277959, 2.9381834,
2.8939212, 1.6784225, 1.6165385])
pvalues = np.array([ 1.72360000e-33, 7.57025400e-16, 5.55625000e-02,
3.21996700e-09, 2.64739100e-06, 3.24794100e-03,
5.83809900e-05, 2.85474400e-01, 3.53813900e-01,
3.40336100e-03, 3.91575100e-03, 9.36840200e-02,
1.06401300e-01])
#-----------------
def test_iv2sls_r():
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
# print(res.params)
# print(res.params - params)
n, k = exog.shape
assert_allclose(res.params, params, rtol=1e-7, atol=1e-9)
# TODO: check df correction
#assert_allclose(res.bse * np.sqrt((n - k) / (n - k - 1.)), bse,
assert_allclose(res.bse, bse, rtol=0, atol=3e-7)
def test_ivgmm0_r():
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-8, 'disp': 0})
assert_allclose(res.params, params, rtol=1e-4, atol=1e-4)
# TODO : res.bse and bse are not the same, rtol=0.09 is large in this case
#res.bse is still robust?, bse is not a sandwich ?
assert_allclose(res.bse, bse, rtol=0.09, atol=0)
score = res.model.score(res.params, w0)
assert_allclose(score, np.zeros(score.shape), rtol=0, atol=5e-6) # atol=1e-8) ??
def test_ivgmm1_stata():
# copied constant to the beginning
params_stata = np.array(
[ 4.0335099 , 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323976])
# robust bse with gmm onestep
bse_stata = np.array(
[ 0.33503289, 0.02073947, 0.00488624, 0.0080498 , 0.00946363,
0.03371053, 0.03081138, 0.05171372, 0.04981322, 0.0479285 ,
0.06112515, 0.0554618 , 0.06084901])
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
start = OLS(endog, exog).fit().params
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv, optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
# move constant to end for Stata
idx = lrange(len(params))
idx = idx[1:] + idx[:1]
exog_st = exog[:, idx]
class TestGMMOLS(object):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
# use exog as instrument
nobs, k_instr = exog.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, exog)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
self.res2 = res_ols
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-4, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-5)
n = res1.model.exog.shape[0]
dffac = 1#np.sqrt((n - 1.) / n) # currently different df in cov calculation
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=5e-6, atol=0)
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=0, atol=1e-7)
def test_other(self):
res1, res2 = self.res1, self.res2
class CheckGMM(object):
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
n = res1.model.exog.shape[0]
dffac = 1 #np.sqrt((n - 1.) / n) # currently different df in cov calculation
rtol, atol = self.bse_tol
assert_allclose(res1.bse * dffac, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse * dffac, res2.bse, rtol=0, atol=atol)
#skip temporarily
def _est_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.q, res2.Q, rtol=5e-6, atol=0)
assert_allclose(res1.jval, res2.J, rtol=5e-5, atol=0)
class TestGMMSt1(CheckGMM):
@classmethod
def setup_class(self):
#self.bse_tol = [5e-7, 5e-7]
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=10, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches_iter import results
self.res2 = results
class TestGMMStTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStTwostepNO(CheckGMM):
#with Stata default `has_optimal_weights=False`
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [1e-6, 5e-5]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStOnestep(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False))))
#weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#nobs = instrument.shape[0]
#w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, np.linalg.inv(self.res1.weights))
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOnestepNO(CheckGMM):
# matches Stats's defaults wargs={'centered':False}, has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiter(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
moms = res1.model.momcond(res1.params)
w = res1.model.calc_weightmatrix(moms)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False,
weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=False,
#use_weights=True #weights=w
))))
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#This doesn't replicate Stata oneway either
nobs = instrument.shape[0]
w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, w)#self.res1.weights)
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOneiterNO(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
#------------ Crosscheck subclasses
class TestGMMStOneiterNO_Linear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-9, 1e-9]
self.bse_tol = [5e-10, 1e-10]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiterNO_Nonlinear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 1e-1]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
def func(params, exog):
return np.dot(exog, params)
mod = gmm.NonlinearIVGMM(endog, exog, instrument, func)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_score(self):
params = self.res1.params * 1.1
weights = self.res1.weights
sc1 = self.res1.model.score(params, weights)
sc2 = super(self.res1.model.__class__, self.res1.model).score(params,
weights)
assert_allclose(sc1, sc2, rtol=1e-6, atol=0)
assert_allclose(sc1, sc2, rtol=0, atol=1e-7)
# score at optimum
sc1 = self.res1.model.score(self.res1.params, weights)
assert_allclose(sc1, np.zeros(len(params)), rtol=0, atol=1e-8)
class TestGMMStOneiterOLS_Linear(CheckGMM):
@classmethod
def setup_class(self):
# replicating OLS by GMM - high agreement
self.params_tol = [1e-11, 1e-12]
self.bse_tol = [1e-13, 1e-13]
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
#Note: start is irrelevant but required
start = np.ones(len(res_ols.params))
nobs, k_instr = instrument.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, exog)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
#optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
optim_args={'disp': 0},
weights_method='iid',
wargs={'centered':False, 'ddof':'k_params'},
has_optimal_weights=True)
self.res1 = res
#from .results_gmm_griliches import results_onestep as results
#self.res2 = results
self.res2 = res_ols
#------------------
class TestGMMSt2(object):
# this looks like an old version, trying out different comparisons
# of options with Stats
@classmethod
def setup_class(self):
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=2, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_ivreg2_griliches import results_gmm2s_robust as results
self.res2 = results
# TODO: remove after testing, compare bse from 1 iteration
# see test_basic
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res3 = res
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-05, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=5e-06)
n = res1.model.exog.shape[0]
# TODO: check df correction np.sqrt(745./758 )*res1.bse matches better
dffact = np.sqrt(745. / 758 )
assert_allclose(res1.bse * dffact, res2.bse, rtol=5e-03, atol=0)
assert_allclose(res1.bse * dffact, res2.bse, rtol=0, atol=5e-03)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=True,
weights=res1.weights))))
assert_allclose(res1.bse, res2.bse, rtol=5e-01, atol=0)
bse = np.sqrt(np.diag((res1.cov_params(has_optimal_weights=True,
weights=res1.weights,
use_weights=True))))
assert_allclose(res1.bse, res2.bse, rtol=5e-02, atol=0)
# TODO: resolve this
# try bse from previous step, is closer to Stata
# guess: Stata ivreg2 doesn't calc for bse update after final iteration
# need better test case, bse difference is close to numerical optimization precision
assert_allclose(self.res3.bse, res2.bse, rtol=5e-05, atol=0)
assert_allclose(self.res3.bse, res2.bse, rtol=0, atol=5e-06)
# TODO; tvalues are not available yet, no inheritance
#assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
class CheckIV2SLS(object):
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=1e-9, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-10)
n = res1.model.exog.shape[0]
assert_allclose(res1.bse, res2.bse, rtol=1e-10, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=1e-11)
assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
def test_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.rsquared, res2.r2, rtol=1e-7, atol=0)
assert_allclose(res1.rsquared_adj, res2.r2_a, rtol=1e-7, atol=0)
# TODO: why is fvalue different, IV2SLS uses inherited linear
assert_allclose(res1.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res1.f_pvalue, res2.Fp, rtol=1e-8, atol=0)
assert_allclose(np.sqrt(res1.mse_resid), res2.rmse, rtol=1e-10, atol=0)
assert_allclose(res1.ssr, res2.rss, rtol=1e-10, atol=0)
assert_allclose(res1.uncentered_tss, res2.yy, rtol=1e-10, atol=0)
assert_allclose(res1.centered_tss, res2.yyc, rtol=1e-10, atol=0)
assert_allclose(res1.ess, res2.mss, rtol=1e-9, atol=0)
assert_equal(res1.df_model, res2.df_m)
assert_equal(res1.df_resid, res2.df_r)
# TODO: llf raise NotImplementedError
#assert_allclose(res1.llf, res2.ll, rtol=1e-10, atol=0)
def test_hypothesis(self):
res1, res2 = self.res1, self.res2
restriction = np.eye(len(res1.params))
res_t = res1.t_test(restriction)
assert_allclose(res_t.tvalue, res1.tvalues, rtol=1e-12, atol=0)
assert_allclose(res_t.pvalue, res1.pvalues, rtol=1e-12, atol=0)
res_f = res1.f_test(restriction[:-1]) # without constant
# TODO res1.fvalue problem, see issue #1104
assert_allclose(res_f.fvalue, res1.fvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.pvalue, res1.f_pvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res_f.pvalue, res2.Fp, rtol=1e-08, atol=0)
def test_hausman(self):
res1, res2 = self.res1, self.res2
hausm = res1.spec_hausman()
# hausman uses se2 = ssr / nobs, no df correction
assert_allclose(hausm[0], res2.hausman['DWH'], rtol=1e-11, atol=0)
assert_allclose(hausm[1], res2.hausman['DWHp'], rtol=1e-10, atol=1e-25)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestIV2SLSSt1(CheckIV2SLS):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
self.res1 = res
from .results_ivreg2_griliches import results_small as results
self.res2 = results
|
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import shutil
import logging
import xmltodict
import copy
import re
from codecs import open
from os import getcwd
from os.path import basename, join, normpath
from collections import OrderedDict
from project_generator_definitions.definitions import ProGenDef
from .tool import Tool, Builder, Exporter
from ..util import SOURCE_KEYS
logger = logging.getLogger('progen.tools.uvision')
class uVisionDefinitions():
debuggers = {
'ulink2-me': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\UL2CM3.dll',
},
'Utilities': {
'Flash2': 'BIN\\UL2CM3.DLL',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '1',
'pMon': 'BIN\\UL2CM3.DLL',
},
'SetRegEntry' : {
'Key' : 'UL2CM3',
},
},
},
'cmsis-dap': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\CMSIS_AGDI.dll',
},
'Utilities': {
'Flash2': 'BIN\\CMSIS_AGDI.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '12',
'pMon': 'BIN\\CMSIS_AGDI.dll',
},
'SetRegEntry' : {
'Key' : 'CMSIS_AGDI',
},
},
},
'j-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'Segger\\JL2CM3.dll',
},
'Utilities': {
'Flash2': 'Segger\\JL2CM3.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '6',
'pMon': 'Segger\\JL2CM3.dll',
},
'SetRegEntry' : {
'Key' : 'JL2CM3',
},
},
},
'ulink-pro': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\ULP2CM3.dll',
},
'Utilities': {
'Flash2': 'BIN\\ULP2CM3.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '7',
'pMon': 'BIN\\ULP2CM3.DLL',
},
'SetRegEntry' : {
'Key' : 'ULP2CM3',
},
},
},
'st-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
'Utilities': {
'Flash2': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '11',
'pMon': 'STLink\\ST-LINKIII-KEIL_SWO.dll',
},
'SetRegEntry' : {
'Key' : 'ST-LINKIII-KEIL_SWO',
},
},
},
'nu-link': {
'uvproj': {
'TargetDlls': {
'Driver': 'BIN\\Nu_Link.dll',
},
'Utilities': {
'Flash2': 'BIN\\Nu_Link.dll',
},
},
'uvoptx' : {
'DebugOpt' : {
'nTsel' : '9',
'pMon': 'NULink\\Nu_Link.dll',
},
'SetRegEntry' : {
'Key' : 'Nu_Link',
},
},
},
}
# use cmsis-dap debugger as default
debuggers_default = 'cmsis-dap'
class Uvision(Tool, Builder, Exporter):
optimization_options = ['O0', 'O1', 'O2', 'O3']
file_types = {'cpp': 8, 'c': 1, 's': 2, 'obj': 3,'o':3, 'lib': 4, 'ar': 4, 'h': 5}
# flags mapping to uvision uvproj dics
# for available flags, check armcc/armasm/armlink command line guide
# this does not provide all options within a project, most usable options are
# exposed via command line, the rest is covered via template project files
FLAGS_TO_UVISION = {
'asm_flags': 'Aads',
'c_flags': 'Cads',
'cxx_flags': 'Cads',
'ld_flags': 'LDads',
}
ERRORLEVEL = {
0: 'success (0 warnings, 0 errors)',
1: 'warnings',
2: 'errors',
3: 'fatal errors',
11: 'cant write to project file',
12: 'device error',
13: 'error writing',
15: 'error reading xml file',
}
SUCCESSVALUE = 0
WARNVALUE = 1
generated_project = {
'path': '',
'files': {
'uvproj': '',
}
}
def __init__(self, workspace, env_settings):
self.definitions = uVisionDefinitions()
# workspace or project
self.workspace = workspace
self.env_settings = env_settings
self.uvproj_file = join(self.TEMPLATE_DIR, "uvision.uvproj")
self.uvmpw_file = join(self.TEMPLATE_DIR, "uvision.uvmpw")
self.uvoptx_file = join(self.TEMPLATE_DIR, "uvision.uvoptx")
@staticmethod
def get_toolnames():
return ['uvision']
@staticmethod
def get_toolchain():
return 'uvision'
def _expand_one_file(self, source, new_data, extension):
return {"FilePath": source, "FileName": basename(source),
"FileType": self.file_types[extension]}
def _normalize_mcu_def(self, mcu_def):
for k, v in mcu_def['TargetOption'].items():
mcu_def['TargetOption'][k] = v[0]
def _uvproj_clean_xmldict(self, uvproj_dic):
for k, v in uvproj_dic.items():
if v is None:
uvproj_dic[k] = ''
def _uvproj_set_CommonProperty(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_set_DebugOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
self._uvproj_clean_xmldict(uvproj_dic['SimDlls'])
self._uvproj_clean_xmldict(uvproj_dic['Simulator'])
self._uvproj_clean_xmldict(uvproj_dic['Target'])
self._uvproj_clean_xmldict(uvproj_dic['TargetDlls'])
def _uvproj_set_DllOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_set_TargetArmAds(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic['Aads'])
self._uvproj_clean_xmldict(uvproj_dic['Aads']['VariousControls'])
self._uvproj_clean_xmldict(uvproj_dic['ArmAdsMisc'])
self._uvproj_clean_xmldict(uvproj_dic['Cads'])
self._uvproj_clean_xmldict(uvproj_dic['Cads']['VariousControls'])
self._uvproj_clean_xmldict(uvproj_dic['LDads'])
uvproj_dic['LDads']['ScatterFile'] = project_dic['linker_file']
uvproj_dic['Cads']['VariousControls']['IncludePath'] = '; '.join(project_dic['include_paths']).encode('utf-8')
uvproj_dic['Cads']['VariousControls']['Define'] = ', '.join(project_dic['macros']).encode('utf-8')
if project_dic['macros']:
uvproj_dic['Aads']['VariousControls']['MiscControls'] = '--cpreproc --cpreproc_opts=-D' + ',-D'.join(project_dic['macros'])
for misc_keys in project_dic['misc'].keys():
# ld-flags dont follow the same as asm/c flags, why?!? Please KEIL fix this
if misc_keys == 'ld_flags':
for item in project_dic['misc'][misc_keys]:
uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['Misc'] += ' ' + item
else:
for item in project_dic['misc'][misc_keys]:
uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['VariousControls']['MiscControls'] += ' ' + item
def _uvproj_set_TargetCommonOption(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
self._uvproj_clean_xmldict(uvproj_dic['AfterMake'])
self._uvproj_clean_xmldict(uvproj_dic['BeforeCompile'])
self._uvproj_clean_xmldict(uvproj_dic['BeforeMake'])
self._uvproj_clean_xmldict(uvproj_dic['TargetStatus'])
uvproj_dic['OutputDirectory'] = project_dic['build_dir']
uvproj_dic['OutputName'] = project_dic['name']
uvproj_dic['CreateExecutable'] = 1 if project_dic['output_type'] == 'exe' else 0
uvproj_dic['CreateLib'] = 1 if project_dic['output_type'] == 'lib' else 0
def _uvproj_set_Utilities(self, uvproj_dic, project_dic):
self._uvproj_clean_xmldict(uvproj_dic)
def _uvproj_files_set(self, uvproj_dic, project_dic):
uvproj_dic['Project']['Targets']['Target']['Groups'] = OrderedDict()
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'] = []
i = 0
for group_name, files in project_dic['groups'].items():
# Why OrderedDict() - uvision project requires an order. GroupName must be before Files,
# otherwise it does not sense any file. Same applies for other attributes, like VariousControl.
# Therefore be aware that order matters in this exporter
group = OrderedDict()
group['GroupName'] = group_name
# group['Files'] = {}
group['Files'] = {'File': []}
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'].append(group)
for file in files:
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'].append(file)
files = uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File']
uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'] = sorted(files, key=lambda x: x['FileName'].lower())
i += 1
def _generate_uvmpw_file(self):
uvmpw_dic = xmltodict.parse(open(self.uvmpw_file))
uvmpw_dic['ProjectWorkspace']['project'] = []
for project in self.workspace['projects']:
# We check how far is project from root and workspace. IF they dont match,
# get relpath for project and inject it into workspace
path_project = os.path.dirname(project['files']['uvproj'])
path_workspace = os.path.dirname(self.workspace['settings']['path'] + '\\')
destination = os.path.join(os.path.relpath(self.env_settings.root, path_project), project['files']['uvproj'])
if path_project != path_workspace:
destination = os.path.join(os.path.relpath(self.env_settings.root, path_workspace), project['files']['uvproj'])
uvmpw_dic['ProjectWorkspace']['project'].append({'PathAndName': destination})
# generate the file
uvmpw_xml = xmltodict.unparse(uvmpw_dic, pretty=True)
project_path, uvmpw = self.gen_file_raw(uvmpw_xml, '%s.uvmpw' % self.workspace['settings']['name'], self.workspace['settings']['path'])
return project_path, uvmpw
def _set_target(self, expanded_dic, uvproj_dic, tool_name):
pro_def = ProGenDef(tool_name)
if not pro_def.is_supported(expanded_dic['target'].lower()):
raise RuntimeError("Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())
if not mcu_def_dic:
raise RuntimeError(
"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
logger.debug("Mcu definitions: %s" % mcu_def_dic)
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device'] = mcu_def_dic['TargetOption']['Device'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] = mcu_def_dic['TargetOption']['DeviceId'][0]
try:
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor'] = mcu_def_dic['TargetOption']['Vendor'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu'] = mcu_def_dic['TargetOption']['Cpu'][0].encode('utf-8')
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0]).encode('utf-8')
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile'] = mcu_def_dic['TargetOption']['SFDFile'][0]
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile'] = mcu_def_dic['TargetOption']['RegisterFile'][0]
except KeyError:
pass
# overwrite the template if target has defined debugger
# later progen can overwrite this if debugger is set in project data
try:
debugger_name = pro_def.get_debugger(expanded_dic['target'])['name']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[debugger_name]['uvproj']['TargetDlls']['Driver']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[debugger_name]['uvproj']['Utilities']['Flash2']
except (TypeError, KeyError) as err:
pass
# Support new device packs
if 'PackID' in mcu_def_dic['TargetOption']:
if tool_name != 'uvision5':
# using software packs require v5
logger.info("The target might not be supported in %s, requires uvision5" % tool_name)
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['PackID'] = mcu_def_dic['TargetOption']['PackID'][0]
def _uvoptx_set_debugger(self, expanded_dic, uvoptx_dic, tool_name):
pro_def = ProGenDef(tool_name)
if not pro_def.is_supported(expanded_dic['target'].lower()):
raise RuntimeError("Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())
if not mcu_def_dic:
raise RuntimeError(
"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions" % expanded_dic['target'].lower())
logger.debug("Mcu definitions: %s" % mcu_def_dic)
# set the same target name FlashDriverDll config as in uvprojx file
try:
uvoptx_dic['ProjectOpt']['Target']['TargetName'] = expanded_dic['name']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Name'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0]).encode('utf-8')
except KeyError:
return
# load debugger from target dictionary or use default debugger
try:
debugger_dic = pro_def.get_debugger(expanded_dic['target'])
if debugger_dic is None:
debugger_name = self.definitions.debuggers_default
else:
debugger_name = debugger_dic['name']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['nTsel'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['nTsel']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['pMon'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['pMon']
uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Key'] = self.definitions.debuggers[debugger_name]['uvoptx']['SetRegEntry']['Key']
except KeyError:
raise RuntimeError("Debugger %s is not supported" % expanded_dic['debugger'])
def _export_single_project(self, tool_name):
expanded_dic = self.workspace.copy()
groups = self._get_groups(self.workspace)
expanded_dic['groups'] = {}
for group in groups:
expanded_dic['groups'][group] = []
# get relative path and fix all paths within a project
self._iterate(self.workspace, expanded_dic)
expanded_dic['build_dir'] = '.\\' + expanded_dic['build_dir'] + '\\'
# generic tool template specified or project
if expanded_dic['template']:
for template in expanded_dic['template']:
template = join(getcwd(), template)
if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \
re.match('.*\.uvproj.tmpl$', template) or re.match('.*\.uvprojx.tmpl$', template):
try:
uvproj_dic = xmltodict.parse(open(template, encoding="utf8").read())
except IOError:
logger.info("Template file %s not found" % template)
return None, None
else:
logger.info("Template file %s contains unknown template extension (.uvproj/x are valid). Using default one" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file))
elif 'uvision' in self.env_settings.templates.keys():
# template overrides what is set in the yaml files
for template in self.env_settings.templates['uvision']:
template = join(getcwd(), template)
if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \
re.match('.*\.uvproj.tmpl$', template) or re.match('.*\.uvprojx.tmpl$', template):
try:
uvproj_dic = xmltodict.parse(open(template, encoding="utf8").read())
except IOError:
logger.info("Template file %s not found. Using default template" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file))
else:
logger.info("Template file %s contains unknown template extension (.uvproj/x are valid). Using default one" % template)
uvproj_dic = xmltodict.parse(open(self.uvproj_file))
else:
uvproj_dic = xmltodict.parse(open(self.uvproj_file))
try:
uvproj_dic['Project']['Targets']['Target']['TargetName'] = expanded_dic['name']
except KeyError:
raise RuntimeError("The uvision template is not valid .uvproj file")
self._uvproj_files_set(uvproj_dic, expanded_dic)
self._uvproj_set_CommonProperty(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['CommonProperty'], expanded_dic)
self._uvproj_set_DebugOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption'], expanded_dic)
self._uvproj_set_DllOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DllOption'], expanded_dic)
self._uvproj_set_TargetArmAds(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetArmAds'], expanded_dic)
self._uvproj_set_TargetCommonOption(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption'], expanded_dic)
self._uvproj_set_Utilities(
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities'], expanded_dic)
# set target only if defined, otherwise use from template/default one
if tool_name == 'uvision5':
extension = 'uvprojx'
uvproj_dic['Project']['SchemaVersion'] = '2.1'
else:
extension = 'uvproj'
uvproj_dic['Project']['SchemaVersion'] = '1.1'
if expanded_dic['target']:
self._set_target(expanded_dic, uvproj_dic, tool_name)
# load debugger
if expanded_dic['debugger']:
try:
uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['TargetDlls']['Driver']
uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['Utilities']['Flash2']
except KeyError:
raise RuntimeError("Debugger %s is not supported" % expanded_dic['debugger'])
# Project file
uvproj_xml = xmltodict.unparse(uvproj_dic, pretty=True)
project_path, uvproj = self.gen_file_raw(uvproj_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])
uvoptx = None
# generic tool template specified
uvoptx_dic = xmltodict.parse(open(self.uvoptx_file))
self._uvoptx_set_debugger(expanded_dic, uvoptx_dic, tool_name)
# set target only if defined, otherwise use from template/default one
if tool_name == 'uvision5':
extension = 'uvoptx'
else:
extension = 'uvopt'
# Project file
uvoptx_xml = xmltodict.unparse(uvoptx_dic, pretty=True)
project_path, uvoptx = self.gen_file_raw(uvoptx_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])
return project_path, [uvproj, uvoptx]
def export_workspace(self):
path, workspace = self._generate_uvmpw_file()
return path, [workspace]
def export_project(self):
path, files = self._export_single_project('uvision') #todo: uvision will switch to uv4
generated_projects = copy.deepcopy(self.generated_project)
generated_projects['path'] = path
generated_projects['files']['uvproj'] = files[0]
return generated_projects
def get_generated_project_files(self):
return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvproj']]}
def _build_project(self, tool_name, extension):
# > UV4 -b [project_path]
path = join(self.env_settings.root, self.workspace['files'][extension])
if path.split('.')[-1] != extension:
path = path + extension
if not os.path.exists(path):
logger.debug("The file: %s does not exists, exported prior building?" % path)
return -1
logger.debug("Building uVision project: %s" % path)
build_log_path = join(os.path.dirname(path),'build','build_log.txt')
args = [self.env_settings.get_env_settings(tool_name), '-r', '-j0', '-o', build_log_path, path]
logger.debug(args)
try:
ret_code = None
ret_code = subprocess.call(args)
except:
logger.error(
"Error whilst calling UV4: '%s'. Please set uvision path in the projects.yaml file." % self.env_settings.get_env_settings('uvision'))
return -1
else:
if ret_code != self.SUCCESSVALUE and ret_code != self.WARNVALUE:
# Seems like something went wrong.
logger.error("Project: %s build failed with the status: %s" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, "Unknown")))
return -1
else:
logger.info("Project: %s build succeeded with the status: %s" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, "Unknown")))
return 0
def build_project(self):
return self._build_project('uvision', 'uvproj')
class Uvision5(Uvision):
generated_project = {
'path': '',
'files': {
'uvprojx': '',
'uvoptx': '',
}
}
def __init__(self, workspace, env_settings):
super(Uvision5, self).__init__(workspace, env_settings)
@staticmethod
def get_toolnames():
return ['uvision5']
def export_project(self):
path, files = self._export_single_project('uvision5')
generated_projects = copy.deepcopy(self.generated_project)
generated_projects['path'] = path
generated_projects['files']['uvprojx'] = files[0]
generated_projects['files']['uvoptx'] = files[1]
return generated_projects
def get_generated_project_files(self):
return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvprojx'], self.workspace['files']['uvoptx']]}
def build_project(self):
# tool_name uvision as uv4 is still used in uv5
return self._build_project('uvision', 'uvprojx')
|
|
#!/usr/bin/python
import unittest
import os
import random
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.surface import Slab, SlabGenerator, generate_all_slabs, \
get_symmetrically_distinct_miller_indices
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False,
center_slab=False).get_slab()
self.zno1 = zno1
self.zno55 = zno55
self.h = Structure(Lattice.cubic(3), ["H"],
[[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
m =self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
self.assertEqual(zno_slab.oriented_unit_cell.composition,
self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
def test_add_adsorbate_atom(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
zno_slab.add_adsorbate_atom([1], 'H', 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), 'H')
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in
self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
#Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
class SlabGeneratorTest(PymatgenTest):
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab = gen.get_slab()
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab), 6)
self.assertEqual(len(slab_non_prim), len(slab) * 4)
#Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (sg.crystal_system == \
"trigonal" and sg.symbol.endswith("H")):
latt = Lattice.hexagonal(5, 10)
else:
#Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (random.randint(0, 6), random.randint(0, 6),
random.randint(0, 6))
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
#Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
#If we allow some broken bonds, there are a few slabs.
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3},
max_broken_bonds=2)), 2)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4)), 15)
LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
gen = SlabGenerator(LiCoO2, [0, 0, 1], 10, 10)
lco = gen.get_slabs(bonds={("Co", "O"): 3})
self.assertEqual(len(lco), 1)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_triclinic_TeI(self):
# Test case for a triclinic structure of TeI. Only these three
# Miller indices are used because it is easier to identify which
# atoms should be in a surface together. The closeness of the sites
# in other Miller indices can cause some ambiguity when choosing a
# higher tolerance.
numb_slabs = {(0, 0, 1): 5, (0, 1, 0): 3, (1, 0, 0): 7}
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
for k, v in numb_slabs.items():
trclnc_TeI = SlabGenerator(TeI, k, 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
self.assertEqual(v, len(TeI_slabs))
class FuncTest(PymatgenTest):
def setUp(self):
self.cscl = self.get_structure("CsCl")
self.lifepo4 = self.get_structure("LiFePO4")
self.tei = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
self.LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
self.p1 = Structure(Lattice.from_parameters(3, 4, 5, 31, 43, 50),
["H", "He"], [[0, 0, 0], [0.1, 0.2, 0.3]])
def test_get_symmetrically_distinct_miller_indices(self):
indices = get_symmetrically_distinct_miller_indices(self.cscl, 1)
self.assertEqual(len(indices), 3)
indices = get_symmetrically_distinct_miller_indices(self.cscl, 2)
self.assertEqual(len(indices), 6)
self.assertEqual(len(get_symmetrically_distinct_miller_indices(
self.lifepo4, 1)), 7)
# The TeI P-1 structure should have 13 unique millers (only inversion
# symmetry eliminates pairs)
indices = get_symmetrically_distinct_miller_indices(self.tei, 1)
self.assertEqual(len(indices), 13)
# P1 and P-1 should have the same # of miller indices since surfaces
# always have inversion symmetry.
indices = get_symmetrically_distinct_miller_indices(self.p1, 1)
self.assertEqual(len(indices), 13)
def test_generate_all_slabs(self):
slabs = generate_all_slabs(self.cscl, 1, 10, 10)
# Only three possible slabs, one each in (100), (110) and (111).
self.assertEqual(len(slabs), 3)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4})
# No slabs if we don't allow broken Cs-Cl
self.assertEqual(len(slabs), 0)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4},
max_broken_bonds=100)
self.assertEqual(len(slabs), 3)
slabs1 = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3})
self.assertEqual(len(slabs1), 4)
slabs2 = generate_all_slabs(self.lifepo4, 1, 10, 10,
bonds={("P", "O"): 3, ("Fe", "O"): 3})
self.assertEqual(len(slabs2), 0)
# There should be only one possible stable surfaces, all of which are
# in the (001) oriented unit cell
slabs3 = generate_all_slabs(self.LiCoO2, 1, 10, 10,
bonds={("Co", "O"): 3})
self.assertEqual(len(slabs3), 1)
mill = (0, 0, 1)
for s in slabs3:
self.assertEqual(s.miller_index, mill)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType, Type
import cpp_util
import model
import schema_util
import sys
import util_cc_helper
class CCGenerator(object):
def __init__(self, type_generator, cpp_namespace):
self._type_generator = type_generator
self._cpp_namespace = cpp_namespace
def Generate(self, namespace):
return _Generator(namespace,
self._type_generator,
self._cpp_namespace).Generate()
class _Generator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator, cpp_namespace):
self._namespace = namespace
self._type_helper = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._target_namespace = (
self._type_helper.GetCppNamespaceName(self._namespace))
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._type_helper))
def Generate(self):
"""Generates a Code object with the .cc for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "base/json/json_writer.h"')
.Append('#include "base/logging.h"')
.Append('#include "base/string_number_conversions.h"')
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.unix_name))
.Cblock(self._type_helper.GenerateIncludes(include_soft=True))
.Concat(cpp_util.OpenNamespace(self._cpp_namespace))
.Cblock(self._type_helper.GetNamespaceStart())
)
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
property,
'const %(type)s %(name)s = %(value)s;',
nodoc=True)
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(None, self._namespace.types.values()))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
(c.Concat(self._type_helper.GetNamespaceEnd())
.Cblock(cpp_util.CloseNamespace(self._cpp_namespace))
)
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append())
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
c.Cblock(self._GenerateType(cpp_namespace, type_.item_type))
elif (type_.property_type == PropertyType.OBJECT or
type_.property_type == PropertyType.CHOICES):
if cpp_namespace is None:
classname_in_namespace = classname
else:
classname_in_namespace = '%s::%s' % (cpp_namespace, classname)
if type_.property_type == PropertyType.OBJECT:
c.Cblock(self._GeneratePropertyFunctions(classname_in_namespace,
type_.properties.values()))
else:
c.Cblock(self._GenerateTypes(classname_in_namespace, type_.choices))
(c.Append('%s::%s()' % (classname_in_namespace, classname))
.Cblock(self._GenerateInitializersAndBody(type_))
.Append('%s::~%s() {}' % (classname_in_namespace, classname))
.Append()
)
if type_.origin.from_json:
c.Cblock(self._GenerateTypePopulate(classname_in_namespace, type_))
if type_.origin.from_client:
c.Cblock(self._GenerateTypeToValue(classname_in_namespace, type_))
elif type_.property_type == PropertyType.ENUM:
(c.Cblock(self._GenerateEnumToString(cpp_namespace, type_))
.Cblock(self._GenerateEnumFromString(cpp_namespace, type_))
)
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
if prop.optional:
continue
t = prop.type_
if t.property_type == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t.property_type == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t.property_type == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif t.property_type == PropertyType.BINARY:
items.append('%s(NULL)' % prop.unix_name)
elif (t.property_type == PropertyType.ANY or
t.property_type == PropertyType.ARRAY or
t.property_type == PropertyType.CHOICES or
t.property_type == PropertyType.ENUM or
t.property_type == PropertyType.OBJECT or
t.property_type == PropertyType.FUNCTION or
t.property_type == PropertyType.REF or
t.property_type == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES and ENUM, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
raise TypeError(t)
if items:
s = ': %s' % (', '.join(items))
else:
s = ''
s = s + ' {}'
return Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('bool %(namespace)s::Populate(')
.Sblock(' const base::Value& value, %(name)s* out) {')
)
if type_.property_type == PropertyType.CHOICES:
for choice in type_.choices:
value_type = cpp_util.GetValueType(self._type_helper.FollowRef(choice))
(c.Sblock('if (value.IsType(%s)) {' % value_type)
.Concat(self._GeneratePopulateVariableFromValue(
choice,
'(&value)',
'out->as_%s' % choice.unix_name,
'false',
is_ptr=True))
.Append('return true;')
.Eblock('}')
)
c.Append('return false;')
elif type_.property_type == PropertyType.OBJECT:
(c.Append('if (!value.IsType(base::Value::TYPE_DICTIONARY))')
.Append(' return false;')
)
if type_.properties or type_.additional_properties is not None:
c.Append('const base::DictionaryValue* dict = '
'static_cast<const base::DictionaryValue*>(&value);')
for prop in type_.properties.values():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.values():
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('out->additional_properties.MergeDictionary(dict);')
else:
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Append('for (base::DictionaryValue::Iterator it(*dict);')
.Sblock(' it.HasNext(); it.Advance()) {')
.Append('%s tmp;' % cpp_type)
.Concat(self._GeneratePopulateVariableFromValue(
type_.additional_properties,
'(&it.value())',
'tmp',
'false'))
.Append('out->additional_properties[it.key()] = tmp;')
.Eblock('}')
)
c.Append('return true;')
(c.Eblock('}')
.Substitute({'namespace': cpp_namespace, 'name': classname}))
return c
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: base::DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('const base::Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false')))
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
(c.Append('} else {')
.Append('%%(dst)s->%%(name)s = %s;' %
self._type_helper.GetEnumNoneValue(prop.type_)))
c.Eblock('}')
else:
(c.Append(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s))')
.Append(' return false;')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({
'value_var': value_var,
'key': prop.name,
'src': src,
'dst': dst,
'name': prop.unix_name
})
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a base::Value.
E.g. for type "Foo" generates Foo::ToValue()
"""
if type_.property_type == PropertyType.OBJECT:
return self._GenerateObjectTypeToValue(cpp_namespace, type_)
elif type_.property_type == PropertyType.CHOICES:
return self._GenerateChoiceTypeToValue(cpp_namespace, type_)
else:
raise ValueError("Unsupported property type %s" % type_.type_)
def _GenerateObjectTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes an object-representing type
into a base::DictionaryValue.
"""
c = Code()
(c.Sblock('scoped_ptr<base::DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('scoped_ptr<base::DictionaryValue> value('
'new base::DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
if prop.optional:
# Optional enum values are generated with a NONE enum value.
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
c.Sblock('if (%s != %s) {' %
(prop.unix_name,
self._type_helper.GetEnumNoneValue(prop.type_)))
else:
c.Sblock('if (%s.get()) {' % prop.unix_name)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# it will always be a pointer.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
c.Append('value->SetWithoutPathExpansion("%s", %s);' % (
prop.name,
self._CreateValueFromType(prop.type_,
'this->%s' % prop.unix_name,
is_ptr=is_ptr)))
if prop.optional:
c.Eblock('}');
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('value->MergeDictionary(&additional_properties);')
else:
# Non-copyable types will be wrapped in a linked_ptr for inclusion in
# maps, so we need to unwrap them.
needs_unwrap = (
not self._type_helper.IsCopyable(type_.additional_properties))
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Sblock('for (std::map<std::string, %s>::const_iterator it =' %
cpp_util.PadForGenerics(cpp_type))
.Append(' additional_properties.begin();')
.Append(' it != additional_properties.end(); ++it) {')
.Append('value->SetWithoutPathExpansion(it->first, %s);' %
self._CreateValueFromType(
type_.additional_properties,
'%sit->second' % ('*' if needs_unwrap else '')))
.Eblock('}')
)
return (c.Append()
.Append('return value.Pass();')
.Eblock('}'))
def _GenerateChoiceTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes a choice-representing type
into a base::Value.
"""
c = Code()
c.Sblock('scoped_ptr<base::Value> %s::ToValue() const {' % cpp_namespace)
c.Append('scoped_ptr<base::Value> result;');
for choice in type_.choices:
choice_var = 'as_%s' % choice.unix_name
(c.Sblock('if (%s) {' % choice_var)
.Append('DCHECK(!result) << "Cannot set multiple choices for %s";' %
type_.unix_name)
.Append('result.reset(%s);' %
self._CreateValueFromType(choice, '*%s' % choice_var))
.Eblock('}')
)
(c.Append('DCHECK(result) << "Must set at least one choice for %s";' %
type_.unix_name)
.Append('return result.Pass();')
.Eblock('}')
)
return c
def _GenerateFunction(self, function):
"""Generates the definitions for function structs.
"""
c = Code()
# TODO(kalman): use function.unix_name not Classname.
function_namespace = cpp_util.Classname(function.name)
(c.Append('namespace %s {' % function_namespace)
.Append()
)
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions('Params', function.params))
(c.Append('Params::Params() {}')
.Append('Params::~Params() {}')
.Append()
.Cblock(self._GenerateFunctionParamsCreate(function))
)
# Results::Create function
if function.callback:
c.Concat(self._GenerateCreateCallbackArguments('Results',
function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateEvent(self, event):
# TODO(kalman): use event.unix_name not Classname.
c = Code()
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Cblock(self._GenerateCreateCallbackArguments(None, event))
.Append('} // namespace %s' % event_namespace)
)
return c
def _CreateValueFromType(self, type_, var, is_ptr=False):
"""Creates a base::Value given a type. Generated code passes ownership
to caller.
var: variable or variable*
E.g for std::string, generate base::Value::CreateStringValue(var)
"""
underlying_type = self._type_helper.FollowRef(type_)
if (underlying_type.property_type == PropertyType.CHOICES or
underlying_type.property_type == PropertyType.OBJECT):
if is_ptr:
return '(%s)->ToValue().release()' % var
else:
return '(%s).ToValue().release()' % var
elif (underlying_type.property_type == PropertyType.ANY or
underlying_type.property_type == PropertyType.FUNCTION):
if is_ptr:
vardot = '(%s)->' % var
else:
vardot = '(%s).' % var
return '%sDeepCopy()' % vardot
elif underlying_type.property_type == PropertyType.ENUM:
return 'base::Value::CreateStringValue(ToString(%s))' % var
elif underlying_type.property_type == PropertyType.BINARY:
if is_ptr:
vardot = var + '->'
else:
vardot = var + '.'
return ('base::BinaryValue::CreateWithCopiedBuffer(%sdata(), %ssize())' %
(vardot, vardot))
elif underlying_type.property_type == PropertyType.ARRAY:
return '%s.release()' % self._util_cc_helper.CreateValueFromArray(
underlying_type,
var,
is_ptr)
elif underlying_type.property_type.is_fundamental:
if is_ptr:
var = '*%s' % var
if underlying_type.property_type == PropertyType.STRING:
return 'new base::StringValue(%s)' % var
else:
return 'new base::FundamentalValue(%s)' % var
else:
raise NotImplementedError('Conversion of %s to base::Value not '
'implemented' % repr(type_.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Append('if (%(var)s.GetSize() != %(total)d)')
elif not num_required:
c.Append('if (%(var)s.GetSize() > %(total)d)')
else:
c.Append('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d)')
c.Append(' return scoped_ptr<Params>();')
c.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
})
return c
def _GenerateFunctionParamsCreate(self, function):
"""Generate function to create an instance of Params. The generated
function takes a base::ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = Code()
(c.Append('// static')
.Sblock('scoped_ptr<Params> '
'Params::Create(const base::ListValue& args) {')
.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('scoped_ptr<Params> params(new Params());')
)
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# for optional arguments, we allow missing arguments and proceed because
# there may be other arguments following it.
failure_value = 'scoped_ptr<Params>()'
c.Append()
value_var = param.unix_name + '_value'
(c.Append('const base::Value* %(value_var)s = NULL;')
.Append('if (args.Get(%(i)s, &%(value_var)s) &&')
.Sblock(' !%(value_var)s->IsType(base::Value::TYPE_NULL)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
.Eblock('}')
)
if not param.optional:
(c.Sblock('else {')
.Append('return %s;' % failure_value)
.Eblock('}')
)
c.Substitute({'value_var': value_var, 'i': i})
(c.Append()
.Append('return params.Pass();')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(self,
prop,
src_var,
dst_class_var,
failure_value):
"""Generates code to populate property |prop| of |dst_class_var| (a
pointer) from a Value*. See |_GeneratePopulateVariableFromValue| for
semantics.
"""
return self._GeneratePopulateVariableFromValue(prop.type_,
src_var,
'%s->%s' % (dst_class_var,
prop.unix_name),
failure_value,
is_ptr=prop.optional)
def _GeneratePopulateVariableFromValue(self,
type_,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Generates code to populate a variable |dst_var| of type |type_| from a
Value* at |src_var|. The Value* is assumed to be non-NULL. In the generated
code, if |dst_var| fails to be populated then Populate will return
|failure_value|.
"""
c = Code()
c.Sblock('{')
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type.is_fundamental:
if is_ptr:
(c.Append('%(cpp_type)s temp;')
.Append('if (!%s)' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_), src_var, '&temp'))
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s.reset(new %(cpp_type)s(temp));')
)
else:
(c.Append('if (!%s)' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_),
src_var,
'&%s' % dst_var))
.Append(' return %(failure_value)s;')
)
elif underlying_type.property_type == PropertyType.OBJECT:
if is_ptr:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Append('if (!%(src_var)s->GetAsDictionary(&dictionary))')
.Append(' return %(failure_value)s;')
.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%(cpp_type)s::Populate(*dictionary, temp.get()))')
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s = temp.Pass();')
)
else:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Append('if (!%(src_var)s->GetAsDictionary(&dictionary))')
.Append(' return %(failure_value)s;')
.Append('if (!%(cpp_type)s::Populate(*dictionary, &%(dst_var)s))')
.Append(' return %(failure_value)s;')
)
elif underlying_type.property_type == PropertyType.FUNCTION:
if is_ptr:
c.Append('%(dst_var)s.reset(new base::DictionaryValue());')
elif underlying_type.property_type == PropertyType.ANY:
c.Append('%(dst_var)s.reset(%(src_var)s->DeepCopy());')
elif underlying_type.property_type == PropertyType.ARRAY:
# util_cc_helper deals with optional and required arrays
(c.Append('const base::ListValue* list = NULL;')
.Append('if (!%(src_var)s->GetAsList(&list))')
.Append(' return %(failure_value)s;'))
item_type = underlying_type.item_type
if item_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateListValueToEnumArrayConversion(
item_type,
'list',
dst_var,
failure_value,
is_ptr=is_ptr))
else:
(c.Append('if (!%s)' % self._util_cc_helper.PopulateArrayFromList(
underlying_type,
'list',
dst_var,
is_ptr))
.Append(' return %(failure_value)s;')
)
elif underlying_type.property_type == PropertyType.CHOICES:
if is_ptr:
(c.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%(cpp_type)s::Populate(*%(src_var)s, temp.get()))')
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s = temp.Pass();')
)
else:
(c.Append('if (!%(cpp_type)s::Populate(*%(src_var)s, &%(dst_var)s))')
.Append(' return %(failure_value)s;')
)
elif underlying_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateStringToEnumConversion(type_,
src_var,
dst_var,
failure_value))
elif underlying_type.property_type == PropertyType.BINARY:
(c.Append('if (!%(src_var)s->IsType(%(value_type)s))')
.Append(' return %(failure_value)s;')
.Append('const base::BinaryValue* binary_value =')
.Append(' static_cast<const base::BinaryValue*>(%(src_var)s);')
)
if is_ptr:
(c.Append('%(dst_var)s.reset(')
.Append(' new std::string(binary_value->GetBuffer(),')
.Append(' binary_value->GetSize()));')
)
else:
(c.Append('%(dst_var)s.assign(binary_value->GetBuffer(),')
.Append(' binary_value->GetSize());')
)
else:
raise NotImplementedError(type_)
sub = {
'cpp_type': self._type_helper.GetCppType(type_),
'src_var': src_var,
'dst_var': dst_var,
'failure_value': failure_value,
}
if underlying_type.property_type not in (PropertyType.ANY,
PropertyType.CHOICES):
sub['value_type'] = cpp_util.GetValueType(underlying_type)
return c.Eblock('}').Substitute(sub)
def _GenerateListValueToEnumArrayConversion(self,
item_type,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Returns Code that converts a ListValue of string constants from
|src_var| into an array of enums of |type_| in |dst_var|. On failure,
returns |failure_value|.
"""
c = Code()
accessor = '.'
if is_ptr:
accessor = '->'
cpp_type = self._type_helper.GetCppType(item_type, is_in_container=True)
c.Append('%s.reset(new std::vector<%s>);' %
(dst_var, cpp_util.PadForGenerics(cpp_type)))
(c.Sblock('for (base::ListValue::const_iterator it = %s->begin(); '
'it != %s->end(); ++it) {' % (src_var, src_var))
.Append('%s tmp;' % self._type_helper.GetCppType(item_type))
.Concat(self._GenerateStringToEnumConversion(item_type,
'(*it)',
'tmp',
failure_value))
.Append('%s%spush_back(tmp);' % (dst_var, accessor))
.Eblock('}')
)
return c
def _GenerateStringToEnumConversion(self,
type_,
src_var,
dst_var,
failure_value):
"""Returns Code that converts a string type in |src_var| to an enum with
type |type_| in |dst_var|. In the generated code, if |src_var| is not
a valid enum name then the function will return |failure_value|.
"""
c = Code()
enum_as_string = '%s_as_string' % type_.unix_name
(c.Append('std::string %s;' % enum_as_string)
.Append('if (!%s->GetAsString(&%s))' % (src_var, enum_as_string))
.Append(' return %s;' % failure_value)
.Append('%s = Parse%s(%s);' % (dst_var,
self._type_helper.GetCppType(type_),
enum_as_string))
.Append('if (%s == %s)' % (dst_var,
self._type_helper.GetEnumNoneValue(type_)))
.Append(' return %s;' % failure_value)
)
return c
def _GeneratePropertyFunctions(self, namespace, params):
"""Generates the member functions for a list of parameters.
"""
return self._GenerateTypes(namespace, (param.type_ for param in params))
def _GenerateTypes(self, namespace, types):
"""Generates the member functions for a list of types.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(namespace, type_))
return c
def _GenerateEnumToString(self, cpp_namespace, type_):
"""Generates ToString() which gets the string representation of an enum.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('std::string %sToString(%s enum_param) {' %
(maybe_namespace, classname))
c.Sblock('switch (enum_param) {')
for enum_value in self._type_helper.FollowRef(type_).enum_values:
(c.Append('case %s: ' % self._type_helper.GetEnumValue(type_, enum_value))
.Append(' return "%s";' % enum_value))
(c.Append('case %s:' % self._type_helper.GetEnumNoneValue(type_))
.Append(' return "";')
.Eblock('}')
.Append('NOTREACHED();')
.Append('return "";')
.Eblock('}')
)
return c
def _GenerateEnumFromString(self, cpp_namespace, type_):
"""Generates FromClassNameString() which gets an enum from its string
representation.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('%s%s %sParse%s(const std::string& enum_string) {' %
(maybe_namespace, classname, maybe_namespace, classname))
for i, enum_value in enumerate(
self._type_helper.FollowRef(type_).enum_values):
# This is broken up into all ifs with no else ifs because we get
# "fatal error C1061: compiler limit : blocks nested too deeply"
# on Windows.
(c.Append('if (enum_string == "%s")' % enum_value)
.Append(' return %s;' %
self._type_helper.GetEnumValue(type_, enum_value)))
(c.Append('return %s;' % self._type_helper.GetEnumNoneValue(type_))
.Eblock('}')
)
return c
def _GenerateCreateCallbackArguments(self, function_scope, callback):
"""Generate all functions to create Value parameters for a callback.
E.g for function "Bar", generate Bar::Results::Create
E.g for event "Baz", generate Baz::Create
function_scope: the function scope path, e.g. Foo::Bar for the function
Foo::Bar::Baz(). May be None if there is no function scope.
callback: the Function object we are creating callback arguments for.
"""
c = Code()
params = callback.params
c.Concat(self._GeneratePropertyFunctions(function_scope, params))
(c.Sblock('scoped_ptr<base::ListValue> %(function_scope)s'
'Create(%(declaration_list)s) {')
.Append('scoped_ptr<base::ListValue> create_results('
'new base::ListValue());')
)
declaration_list = []
for param in params:
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Append('create_results->Append(%s);' %
self._CreateValueFromType(param.type_, param.unix_name))
c.Append('return create_results.Pass();')
c.Eblock('}')
c.Substitute({
'function_scope': ('%s::' % function_scope) if function_scope else '',
'declaration_list': ', '.join(declaration_list),
'param_names': ', '.join(param.unix_name for param in params)
})
return c
def _InitializePropertyToDefault(self, prop, dst):
"""Initialize a model.Property to its default value inside an object.
E.g for optional enum "state", generate dst->state = STATE_NONE;
dst: Type*
"""
c = Code()
underlying_type = self._type_helper.FollowRef(prop.type_)
if (underlying_type.property_type == PropertyType.ENUM and
prop.optional):
c.Append('%s->%s = %s;' % (
dst,
prop.unix_name,
self._type_helper.GetEnumNoneValue(prop.type_)))
return c
|
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
import atexit
import os
import sys
# import the SWIG-wrapped main C++ functions
import internal
import core
import stats
import SimObject
import ticks
import objects
from m5.util.dot_writer import do_dot
from m5.internal.stats import updateEvents as updateStatEvents
from util import fatal
from util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
"atomic_noncaching" : objects.params.atomic_noncaching,
}
# The final hook to generate .ini files. Called from the user script
# once the config is built.
def instantiate(ckpt_dir=None):
from m5 import options
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
if options.dump_config:
ini_file = file(os.path.join(options.outdir, options.dump_config), 'w')
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = file(os.path.join(options.outdir, options.json_config), 'w')
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
do_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
# Do a third pass to initialize statistics
for obj in root.descendants(): obj.regStats()
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
ckpt = internal.core.getCheckpoint(ckpt_dir)
internal.core.unserializeGlobals(ckpt);
for obj in root.descendants(): obj.loadState(ckpt)
need_resume.append(root)
else:
for obj in root.descendants(): obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
# Reset to put the stats in a consistent state.
stats.reset()
need_resume = []
need_startup = True
def simulate(*args, **kwargs):
global need_resume, need_startup
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
need_startup = False
for root in need_resume:
resume(root)
need_resume = []
return internal.event.simulate(*args, **kwargs)
# Export curTick to user script.
def curTick():
return internal.core.curTick()
# Python exit handlers happen in reverse order. We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(internal.core.doExitCleanup)
# Drain the system in preparation of a checkpoint or memory mode
# switch.
def drain(root):
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
all_drained = False
dm = internal.drain.createDrainManager()
unready_objs = sum(obj.drain(dm) for obj in root.descendants())
# If we've got some objects that can't drain immediately, then simulate
if unready_objs > 0:
dm.setCount(unready_objs)
simulate()
else:
all_drained = True
internal.drain.cleanupDrainManager(dm)
return all_drained
all_drained = _drain()
while (not all_drained):
all_drained = _drain()
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def resume(root):
for obj in root.descendants(): obj.drainResume()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError, "Checkpoint must be called on a root object."
drain(root)
memWriteback(root)
print "Writing checkpoint"
internal.core.serializeAll(dir)
resume(root)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError, "Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System)
if system.getMemoryMode() != mode:
drain(system)
system.setMemoryMode(mode)
else:
print "System already in target mode. Memory mode unchanged."
def switchCpus(system, cpuList, do_drain=True):
"""Switch CPUs in a system.
By default, this method drains and resumes the system. This
behavior can be disabled by setting the keyword argument
'do_drain' to false, which might be desirable if multiple
operations requiring a drained system are going to be performed in
sequence.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
Keyword Arguments:
do_drain -- Perform a drain/resume of the system when switching.
"""
print "switching cpus"
if not isinstance(cpuList, list):
raise RuntimeError, "Must pass a list to this function"
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError, "List must have tuples of (oldCPU,newCPU)"
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % old_cpu
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError, "%s is not of type BaseCPU" % new_cpu
if new_cpu in old_cpu_set:
raise RuntimeError, \
"New CPU (%s) is in the list of old CPUs." % (old_cpu,)
if not new_cpu.switchedOut():
raise RuntimeError, \
"New CPU (%s) is already active." % (new_cpu,)
if not new_cpu.support_take_over():
raise RuntimeError, \
"New CPU (%s) does not support CPU handover." % (old_cpu,)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError, \
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0])
if old_cpu.switchedOut():
raise RuntimeError, \
"Old CPU (%s) is inactive." % (new_cpu,)
if not old_cpu.support_take_over():
raise RuntimeError, \
"Old CPU (%s) does not support CPU handover." % (old_cpu,)
try:
memory_mode = _memory_modes[memory_mode_name]
except KeyError:
raise RuntimeError, "Invalid memory mode (%s)" % memory_mode_name
if do_drain:
drain(system)
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == objects.params.atomic_noncaching:
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
if do_drain:
resume(system)
from internal.core import disableAllListeners
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class StatsAccumulatorScalarTest(test_util.TensorFlowTestCase):
"""Tests for scalar gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(0, [1], [2], [0.1], [0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.2, 0.4])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[2],
gradients=[0.1],
hessians=[0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.1, 0.2])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
(stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates, partition_1, feature_1, grads_1, hessians_1,
num_updates_2, partition_2, feature_2, grads_2, hessians_2) = sess.run(
[
stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2)], [0.1, 0.2])
self.assertAllClose(result_1[(2, 3)], [0.3, 0.4])
self.assertAllEqual(result_1, result_2)
self.assertEqual(0, stamp_token)
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
deserialize = (accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[5, 6],
gradients=[0.4, 0.5],
hessians=[0.6, 0.7]))
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 5)], [0.4, 0.6])
self.assertAllClose(result[(4, 6)], [0.5, 0.7])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[2, 3, 2],
gradients=[0.1, 0.3, 0.1],
hessians=[0.2, 0.4, 0.2])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.2, 0.4])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
class StatsAccumulatorTensorTest(test_util.TensorFlowTestCase):
"""Tests for tensor gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[2],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2)][1], [[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[2],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.1, 0.1])
self.assertAllClose(result[(1, 2)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
with ops.control_dependencies([op1]):
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2) = sess.run([
stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates_1, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2)][0], [0.1, 0.1])
self.assertAllClose(result_1[(1, 2)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result_1[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result_1[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
self.assertAllEqual(result_1[1, 2][0], result_2[1, 2][0])
self.assertAllEqual(result_1[1, 2][1], result_2[1, 2][1])
self.assertAllEqual(result_1[2, 3][0], result_2[2, 3][0])
self.assertAllEqual(result_1[2, 3][1], result_2[2, 3][1])
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
with ops.control_dependencies([op1]):
deserialize = accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[4, 5],
# Two values for gradients,
gradients=[[0.3, 0.3], [0.5, 0.5]],
# A 2x2 matrix for each hessian.
hessians=[[[0.03, 0.04], [0.05, 0.06]], [[0.07, 0.08], [0.09,
0.10]]])
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 4)][0], [0.3, 0.3])
self.assertAllClose(result[(3, 4)][1], [[0.03, 0.04], [0.05, 0.06]])
self.assertAllClose(result[(4, 5)][0], [0.5, 0.5])
self.assertAllClose(result[(4, 5)][1], [[0.07, 0.08], [0.09, 0.10]])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[2, 3, 2],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07, 0.08]],
[[0.011, 0.022], [0.033, 0.044]]])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2)][1], [[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i]): (grads[i], hessians[i])
for i in range(len(partition))}
if __name__ == "__main__":
googletest.main()
|
|
"""
This code receives and maps points
Authors: Niharika Jayanthi, Dheeraj Kamath
Project: Marker-based Localization
Mentor: Sanam Shakya
Main functions: draw_arena(), draw_marker(), draw_robot()
getCoordinates(), get_socket()
Global variables: arena_length, arena_breadth, s, host, port, room_width
"""
import socket
import cv2
from matplotlib import pyplot as plt
import numpy as np
import math
#Define Globals
arena_length=600
arena_breadth=600
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 7000
s.bind((host, port))
s.listen(2)
room_width = 1160
#Helper functions
def draw_arena(img1):
"""
* Function Name: draw_arena
* Input: Image to be drawn in
* Output: Modified image with arena drawn
* Logic: The arena lines are drawn one at a time, with a distance
of 50 units separating them.
* Example Call: draw_arena(image)
"""
# Draw a diagonal blue line with thickness of 5 px
row_width = 50
col_width = 50
#print arena_length/50
for i in range(0, arena_length/row_width):
cv2.line(img1,(row_width,0),(row_width,arena_length),(0,0,0),1)
cv2.line(img1,(0,col_width),(arena_breadth,col_width),(0,0,0),1)
row_width=row_width+50
col_width=col_width+50
def draw_marker(id, img1):
"""
* Function Name: draw_marker
* Input: Marker ID, image to be drawn in
* Output: Returns 1 if valid marker is drawn. Else, it returns -1.
* Logic: The marker id is checked for validity. If marker is
valid, it draws in fixed position and returns 1. Else,
it returns -1.
* Example Call: draw_marker(65, image)
"""
marker_width = 50
marker_length = 50
font = cv2.FONT_HERSHEY_SIMPLEX
if id == 65:
x = 0
y = 0
elif id == 250:
x = 550
y = 0
elif id == 796:
x = 0
y = 550
elif id == 500:
x = 550
y = 550
else:
return '-1'
cv2.rectangle(img1,(x,y),(x+marker_width,y+marker_length),(255,0,0,10),-1)
cv2.putText(img1,'#'+str(id),(x+10,y+30), font, 0.5,(0,0,0),1)
return 1
def draw_robot(x, y, theta_radian, img1):
"""
* Function Name: draw_robot()
* Input: x,y coordinates of the robot's position in map, angle
of inclination(theta) and image to be drawn in.
* Output: Modified image with the robot drawn in it and result is
displayed.
* Logic: The end point of the line is found by calculating a
rotation matrix and translating it. The robot is then
drawn as a circle and the line on the robot depicts
its orientation.
* Example Call: draw_robot(250, 250, 45, image)
"""
radius = 20
rotation_matrix= [[np.cos(theta_radian), -np.sin(theta_radian)],
[np.sin(theta_radian), np.cos(theta_radian)]]
R = np.array(rotation_matrix)
xy = [[radius],[0]]
xy = np.array(xy)
rotated_xy = np.dot(R,xy)
translation = [[x],[y]]
translation = np.array(translation)
trans_xy = rotated_xy + translation
#Convert from floating point to integers
x = int(x)
y = int(y)
cv2.circle(img1,(x,y), radius, (0,0,255), -1)
cv2.line(img1,(x,y),(trans_xy[0],trans_xy[1]),(0,0,0),2)
cv2.imshow("Position", img1)
cv2.waitKey(1000)
def getCoordinates(x, y, t, mID):
"""
* Function Name: getCoordinates
* Input: x, y coordinates of point, angle t, marker ID
* Output: Returns new values of x, y and t.
* Logic: It compares the marker ID to a valid list of markers,
whose position in a room is already known to us and
returns the values of x,y and t according to the ID.
* Example Call: getCoordinates(125, 235, 45, 500)
"""
if mID == 65:
return x, y, 3*math.pi/2 - abs(t)
elif mID == 796:
return x, 550 - y, math.pi/2 + abs(t)
elif mID == 500:
return 550 - x, 550 - y, math.pi/2 - abs(t)
elif mID == 250:
return 550 - x, y, 3 * math.pi/2 + abs(t)
else:
print "Marker doen't match!"
return 0, 0, 0
def get_socket():
"""
* Function Name: get_socket
* Input: -
* Output: -
* Logic: This function creates a TCP socket and receives
information from the client. This information includes
x, y coordinates of a marker, the angle t(calculated
with sine inverse), angle t1 (calculated as cosine
inverse) and the marker ID detected. These values are
passed to getCoordinates, which returns the x,y
values scaled to virtual map. Then, the marker/arena and
robot is drawn.
* Example Call: get_socket()
"""
global s, first_msg, arena_M, cur_x, cur_y
c, addr = s.accept()
while True:
msg = c.recv(100)
print "Message received is", msg
try:
if msg == 'q':
print "End of messages.\n"
break
x, y, t, t1, m_id = msg.split()
x = float(x)
y = float(y)
t = float(t)
t1 = float(t1)
m_id = int(m_id)
x = 600 * (x/room_width)
y = 600 * (y/room_width)
Rx = abs(y * math.cos(math.pi/2 - t))
Ry = abs(y * math.sin(math.pi/2 - t))
img_arena = arena_M.copy()
ret = draw_marker(m_id, arena_M)
if ret == '-1':
print "Marker not recognised."
print "X", x, "Y", y, "T", t, "T1", t1, "mID", m_id
print "Rx", Rx, "Ry", Ry
mx, my, t = getCoordinates(Rx, Ry, t, m_id)
if (mx,my,t) == (0,0,0):
print "Invalid coordinates"
continue
arena_copy = arena_M.copy()
draw_robot(mx, my, t, arena_copy)
except ValueError:
print "Bad message!\n"
break
# Create a black image
img = np.ones((arena_length,arena_breadth,4), np.uint8)*245
#img2 = np.ones((arena_length,arena_breadth,4), np.uint8)*255
draw_arena(img)
arena_M = img
get_socket()
s.close()
#cv2.imshow("Map", arena_M)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 17:08:59 2016
@author: bitzer
"""
from __future__ import print_function, division
import math
import numpy as np
import scipy
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from warnings import warn
from abc import ABCMeta, abstractmethod
#%% define transformations
class transform(object):
"""A parameter transformation."""
__metaclass__ = ABCMeta
def __init__(self, **params):
self.transformed_range = None
@abstractmethod
def transform(self, x):
return None
@abstractmethod
def transformed_pdf(self, y, mu, sigma2):
return None
@abstractmethod
def transformed_mode(self, mu, sigma2):
return None
def find_transformed_mode(self, mu, sigma2, **kwargs):
fun = lambda x: -self.transformed_pdf(x, mu, sigma2)
res = scipy.optimize.minimize_scalar(fun, **kwargs)
return res.x
@abstractmethod
def transformed_ppf(self, q, mu, sigma2):
return None
def approximate_transformed_ppf(self, q, mu, sigma2):
# converts to numpy array, if necessary
q = np.array(q)
x = np.random.normal(mu, math.sqrt(sigma2), 1000)
return np.percentile(self.transform(x), q * 100)
class identity(transform):
def __init__(self):
self.transformed_range = np.r_[-np.inf, np.inf]
def transform(self, x):
return x
def transformed_pdf(self, y, mu, sigma2):
return scipy.stats.norm.pdf(y, loc=mu, scale=math.sqrt(sigma2))
def transformed_mode(self, mu, sigma2):
return mu
def transformed_ppf(self, q, mu, sigma2):
return scipy.stats.norm.ppf(q, loc=mu, scale=math.sqrt(sigma2))
class absolute(transform):
"""Takes absolute value of Gaussian, creates folden normal.
scipy's standardised formulation uses c = mu / sigma, scale = sigma
"""
def __init__(self):
self.transformed_range = np.r_[0, np.inf]
warn('The absolute transform is not suited for inference in pyEPABC,'
'because it most likely results in misleading posteriors!')
def transform(self, x):
return np.abs(x)
def transformed_pdf(self, y, mu, sigma2):
sigma = math.sqrt(sigma2)
return scipy.stats.foldnorm.pdf(y, mu/sigma, scale=sigma)
def transformed_mode(self, mu, sigma2):
sigma = math.sqrt(sigma2)
return scipy.optimize.minimize_scalar(
lambda x: -scipy.stats.foldnorm.pdf(
x, mu / sigma, scale=sigma)).x
def transformed_ppf(self, q, mu, sigma2):
sigma = math.sqrt(sigma2)
return scipy.stats.foldnorm.ppf(q, mu/sigma, scale=sigma)
class zero(transform):
"""Maps negative values to 0.
This creates a complicated transformed distribution for which actual
probability mass is collected at 0. I don't currently know whether this
corresponds to a named distribution.
The probability density function is simply that of the underlying Gaussian
distribution with negative values set to 0, but it's unclear what a
pdf-value at 0 should be, if I want this to reflect the probability mass
collected at 0, as this is not differentiable. To be at least a bit
informative the actual probability mass (not the density) is returned for
0.
The transformed mode is defined to be 0 for mu <= 0 and equal to mu else.
"""
def __init__(self):
self.transformed_range = np.r_[0, np.inf]
def transform(self, x):
return np.fmax(x, 0)
def transformed_pdf(self, y, mu, sigma2):
y = np.atleast_1d(y)
ind0 = np.flatnonzero(y == 0)
pdf = scipy.stats.norm.pdf(y, loc=mu, scale=math.sqrt(sigma2))
pdf[ind0] = scipy.stats.norm.cdf(0, loc=mu, scale=math.sqrt(sigma2))
return pdf
def transformed_mode(self, mu, sigma2):
return max(0, mu)
def transformed_ppf(self, q, mu, sigma2):
return np.fmax(scipy.stats.norm.ppf(
q, loc=mu, scale=math.sqrt(sigma2)), 0)
class exponential(transform):
def __init__(self):
self.transformed_range = np.r_[0, np.inf]
def transform(self, x):
return np.exp(x)
def transformed_pdf(self, y, mu, sigma2):
return scipy.stats.lognorm.pdf(y, math.sqrt(sigma2), scale=math.exp(mu))
def transformed_mode(self, mu, sigma2):
return math.exp(mu - sigma2)
def transformed_ppf(self, q, mu, sigma2):
return scipy.stats.lognorm.ppf(q, math.sqrt(sigma2), scale=math.exp(mu))
class gaussprob(transform):
@property
def width(self):
return self._width
@property
def shift(self):
return self._shift
def __init__(self, width=1.0, shift=0.0):
self._width = width
self._shift = shift
self.transformed_range = np.r_[self.shift, self.width + self.shift]
def transform(self, x):
return gaussprob_trans(x, self.width, self.shift)
def transformed_pdf(self, y, mu, sigma2):
if sigma2 == 1.0:
sigma2 -= 1e-15
warn('subtracted 1e-15 from sigma2=1.0 to avoid division by 0')
return np.exp((sigma2 - 1) / 2 / sigma2 * (
scipy.stats.norm.ppf((y - self.shift) / self.width) -
mu / (1 - sigma2)) ** 2 +
mu**2 / (1 - sigma2) / 2
) / np.sqrt(sigma2) / self.width
def transformed_mode(self, mu, sigma2):
return self.find_transformed_mode(mu, sigma2, method='Bounded',
bounds=self.transformed_range)
def transformed_ppf(self, q, mu, sigma2):
if np.isscalar(q):
q = np.array(q)
warn('gaussprob transform: No analytic ppf implemented. '
'Using numeric approximation.')
return self.approximate_transformed_ppf(q, mu, sigma2)
try:
from numba import vectorize, float64
@vectorize([float64(float64, float64, float64)], nopython=True)
def gaussprob_trans(x, width, shift):
cdf = (1 + math.erf(x / math.sqrt(2))) / 2
return cdf * width + shift
except ImportError:
def gaussprob_trans(x, width, shift):
return scipy.stats.norm.cdf(x) * width + shift
#%% define parameter container
class parameter_container:
@property
def names(self):
return self.params.name
def __init__(self):
self.params = pd.DataFrame(columns=('name', 'transform'))
self.P = 0
self.mu = np.array([])
self.cov = np.array([[]])
def __getstate__(self):
state = self.__dict__.copy()
del state['transformfun']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.generate_transformfun()
def add_param(self, name, mu, sigma, transform=identity(), multiples=1):
if multiples > 1:
name += '_{:d}'
for i in range(multiples):
self.params.loc[self.P] = [name.format(i), transform]
self.P += 1
self.generate_transformfun()
self.mu = np.r_[self.mu, mu]
cov = np.zeros((self.P, self.P))
cov[:self.P-1, :self.P-1] = self.cov
self.cov = cov
self.cov[-1, -1] = sigma ** 2
def drop_params(self, names):
for name in names:
if name in self.names.values:
ind = self.names[self.names == name].index[0]
self.params.drop(ind, inplace=True)
self.mu = np.delete(self.mu, ind)
self.cov = np.delete(np.delete(self.cov, 1, axis=0), 1, axis=1)
self.P -= 1
self.params.index = np.arange(self.P)
self.generate_transformfun()
def generate_transformfun(self):
trstr = ""
for i in range(self.P):
trstr += ("self.params.loc[%s, 'transform']." % i +
"transform(values[:, %s])," % i)
trstr = trstr[:-1]
self.transformfun = eval("lambda self, values: np.c_[%s]" % trstr)
def transform(self, values):
return self.transformfun(self, values)
def sample(self, S, mu=None, cov=None):
if mu is None:
mu = self.mu
if cov is None:
cov = self.cov
return np.random.multivariate_normal(mu, cov, S)
def sample_transformed(self, S, mu=None, cov=None):
if mu is None:
mu = self.mu
if cov is None:
cov = self.cov
return self.transform(self.sample(S, mu, cov))
def get_transformed_mode(self, mu=None, cov=None):
if mu is None:
mu = self.mu
if cov is None:
cov = self.cov
mode = np.full(self.P, np.nan)
for par in self.params.itertuples():
i = par.Index
mode[i] = par.transform.transformed_mode(mu[i], cov[i, i])
return mode
def compare_pdfs(self, mu, cov, q_lower=0.025, q_upper=0.975,
label_self='prior', label_arg='posterior', **subplots_kw):
"""Compare pdfs of the interal and an external parameter distribution.
Generates a figure with one subplot per parameter showing the
(marginal) pdf of the parameter distribution defined by self.mu and
self.cov together with another parameter distribution defined by the
mu and cov arguments. Especially useful for comparing the change in
parameter distribution from prior to posterior.
"""
P = self.params.shape[0]
fig, axes = plt.subplots((P - 1) // 4 + 1, min(P, 4), squeeze=False,
**subplots_kw);
for par, ax in zip(self.params.itertuples(), axes.flatten()[:P]):
ind = par.Index
name = par.name
par = par.transform
lim1 = par.transformed_ppf(np.r_[q_lower, q_upper], self.mu[ind],
self.cov[ind, ind])
lim2 = par.transformed_ppf(np.r_[q_lower, q_upper], mu[ind],
cov[ind, ind])
xx = np.linspace(min(lim1[0], lim2[0]), max(lim1[1], lim2[1]), 500)
ax.plot(xx, par.transformed_pdf(
xx, self.mu[ind], self.cov[ind, ind]), label=label_self);
ax.plot(xx, par.transformed_pdf(
xx, mu[ind], cov[ind, ind]), label=label_arg);
ax.set_xlabel(name)
for row in axes:
row[0].set_ylabel('density value')
axes[0, 0].legend()
for ax in axes.flatten()[P:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axes
def plot_param_hist(self, samples, transformed=True, only_marginals=False,
**distplot_kws):
if transformed:
samples = self.transform(samples)
samples = pd.DataFrame(samples, columns=self.params.name)
if only_marginals:
fig, axes = plt.subplots(1, self.P)
for par in self.params.itertuples():
i = par.Index
sns.distplot(samples[par.name], ax=axes[i], **distplot_kws)
axes[i].set_xlabel(par.name)
if i == 0:
axes[i].set_ylabel('pdf')
return fig, axes
else:
pg = sns.PairGrid(samples, diag_sharey=False)
# scatter plot in upper diagonal
pg = pg.map_upper(plt.scatter, alpha=0.3)
# correlation of sampels in lower diagonal
pg = pg.map_lower(plot_corrcoef)
# fill diagonal with empty axes
pg = pg.map_diag(lambda x, **kwargs: None)
# plot marginal histograms in diagonal
for par in self.params.itertuples():
i = par.Index
sns.distplot(samples[par.name], ax=pg.diag_axes[i],
**distplot_kws)
return pg
def plot_param_dist(self, mu=None, cov=None, S=500, q_lower=0.005,
q_upper=0.995, only_marginals=False, dist_names=['']):
if mu is None:
mu = self.mu
if cov is None:
cov = self.cov
if only_marginals:
fig, axes = plt.subplots(1, self.P)
for par in self.params.itertuples():
i = par.Index
xlim = par.transform.transformed_ppf(np.r_[q_lower, q_upper],
mu[i], cov[i, i])
x = np.linspace(xlim[0], xlim[1], 1000)
axes[i].plot(x, par.transform.transformed_pdf(x, mu[i], cov[i, i]))
axes[i].set_xlim(xlim)
axes[i].set_xlabel(par.name)
if i == 0:
axes[i].set_ylabel('pdf')
return fig, axes
else:
samples = np.random.multivariate_normal(mu, cov, S)
samples = self.transform(samples)
samples = pd.DataFrame(samples, columns=self.params.name)
samples['distribution'] = dist_names[0]
pg = sns.PairGrid(samples, hue='distribution', diag_sharey=False)
# scatter plot in upper diagonal
pg = pg.map_upper(plt.scatter, alpha=0.3)
# correlation of sampels in lower diagonal
pg = pg.map_lower(plot_corrcoef)
# fill diagonal with empty axes
pg = pg.map_diag(lambda x, **kwargs: None)
# plot analytical pdfs in diagonal
for par in self.params.itertuples():
i = par.Index
xlim = par.transform.transformed_ppf(np.r_[q_lower, q_upper],
mu[i], cov[i, i])
x = np.linspace(xlim[0], xlim[1], 1000)
pg.diag_axes[i].plot(x, par.transform.transformed_pdf(x, mu[i], cov[i, i]))
pg.diag_axes[i].set_xlim(xlim)
# also set y-limits of off-diagonal
if self.P > 1:
if i==0:
pg.axes[0, 1].set_ylim(xlim)
else:
pg.axes[i, 0].set_ylim(xlim)
# pg.add_legend(frameon=True)
return pg
def plot_corrcoef(x, y, **kwargs):
corrcoefs = np.corrcoef(x, y)
ax = plt.gca()
ax.text(0.5, 0.5, 'R = %4.2f' % corrcoefs[0, 1],
horizontalalignment='center', verticalalignment='center',
transform=ax.transAxes, **kwargs)
# ax.set_axis_off()
#%% some tests
if __name__ == "__main__":
# test parameter container
pars = parameter_container()
pars.add_param('noisestd', 0, 1, transform=exponential())
pars.add_param('prior', 0.8, 0.5, transform=gaussprob())
pars.add_param('ndtmean', -5, 2)
pg = pars.plot_param_dist()
for i, par in pars.params.iterrows():
mu = pars.mu[i]
sigma2 = pars.cov[i, i]
x = par.transform.transformed_mode(mu, sigma2)
pg.diag_axes[i].plot(x, par.transform.transformed_pdf(x, mu, sigma2), '*r')
# function for checking the implemented gaussprobpdf
def check_gaussprobpdf(mu=0.0, sigma=1.0):
g_samples = scipy.stats.norm.rvs(loc=mu, scale=sigma, size=10000)
p_samples = scipy.stats.norm.cdf(g_samples)
gtr = gaussprob()
plt.figure()
ax = sns.distplot(p_samples)
lower, upper = ax.get_xlim()
yy = np.linspace(lower, upper, 1000)
ax.plot(yy, gtr.transformed_pdf(yy, mu, sigma**2))
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Various utilities for interactive notebooks, plus some generic plot-related
functions.
"""
import functools
import collections
import warnings
import importlib
import contextlib
import inspect
from uuid import uuid4
from itertools import starmap
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backend_bases import MouseButton
import holoviews as hv
import bokeh.models
import panel as pn
from cycler import cycler as make_cycler
from ipywidgets import widgets, Layout, interact
from IPython.display import display
from lisa.utils import is_running_ipython, order_as
# Enable all backends, finishing with matplotlib so it becomes the default (for
# backward compat). Since this also corresponds to holoviews default, we are
# not loosing anything, as the user will need hv.extension('bokeh') anyway.
_backends = ['plotly', 'bokeh', 'matplotlib']
# If the user selected a backend already, use it as defaults by activating it
# last
_curr_backend = hv.Store.current_backend
if _curr_backend:
_backends.remove(_curr_backend)
_backends.append(_curr_backend)
for backend in _backends:
try:
importlib.import_module(backend)
except Exception:
pass
else:
hv.extension(backend)
COLOR_CYCLE = [
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00'
]
"""
Colorblind-friendly cycle, see https://gist.github.com/thriveth/8560036
"""
plt.rcParams['axes.prop_cycle'] = make_cycler(color=COLOR_CYCLE)
class WrappingHBox(widgets.HBox):
"""
HBox that will overflow on multiple lines if the content is too large to
fit on one line.
"""
def __init__(self, *args, **kwargs):
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of items
justify_content='space-around',
)
super().__init__(*args, layout=layout, **kwargs)
# Make a subclass so we can integrate better with mplcursors
class _DataframeLinkMarker(mpl.lines.Line2D):
pass
# mplcursors is not a dependency anymore as interactive plots are now done with
# bokeh, but keep this around for compatibility in case someone needs
# matplotlib to get a better fixed output and wants a bit of interactivity for
# development as well.
try:
import mplcursors
except ImportError:
pass
else:
# Tell mplcursors that we are never selecting the marker line, so that it
# will still show the coordinates of the data that were plotted, rather
# than useless coordinates of the marker
@mplcursors.compute_pick.register(_DataframeLinkMarker)
def _(artist, event):
return None
def _make_vline(axis, *args, **kwargs):
vline = axis.axvline(*args, **kwargs)
assert type(vline) is mpl.lines.Line2D # pylint: disable=unidiomatic-typecheck
vline.__class__ = _DataframeLinkMarker
vline.set_visible(False)
return vline
def axis_link_dataframes(axis, df_list, before=1, after=5, cursor_color='red', follow_cursor=False):
"""
Link some dataframes to an axis displayed in the interactive matplotlib widget.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param df_list: List of pandas dataframe to link.
:type df_list: list(pandas.DataFrame)
:param before: Number of dataframe rows to display before the selected
location.
:type before: int
:param after: Number of dataframe rows to display after the selected
location.
:type after: int
:param cursor_color: Color of the vertical line added at the clicked
location.
:type cursor_color: str
:param follow_cursor: If ``True``, the cursor will be followed without the
need to click.
:type follow_cursor: bool
When the user clicks on the graph, a vertical marker will appear and the
dataframe slice will update to show the relevant row.
.. note:: This requires the matplotlib widget enabled using ``%matplotlib
widget`` magic.
"""
df_list = [df for df in df_list if not df.empty]
output_list = [widgets.Output() for df in df_list]
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of item when there is more than one item,
# align left otherwise
justify_content='space-around' if len(df_list) > 1 else 'flex-start',
)
hbox = widgets.HBox(output_list, layout=layout)
cursor_vline = _make_vline(axis, color=cursor_color)
def show_loc(loc):
cursor_vline.set_xdata(loc)
cursor_vline.set_visible(True)
for df, output in zip(df_list, output_list):
if loc < df.index[0]:
iloc = 0
elif loc > df.index[-1]:
iloc = -1
else:
iloc = df.index.get_loc(loc, method='ffill')
index_loc = df.index[iloc]
begin = max(iloc - before, 0)
end = min(iloc + after, len(df))
sliced_df = df.iloc[begin:end]
def highlight_row(row):
if row.name == index_loc: # pylint: disable=cell-var-from-loop
return ['background: lightblue'] * len(row)
else:
return [''] * len(row)
styler = sliced_df.style.apply(highlight_row, axis=1)
styler = styler.set_properties(**{
'text-align': 'left',
# perserve multiple consecutive spaces
'white-space': 'pre',
# Make sure all chars have the same width to preserve column
# alignments in preformatted strings
'font-family': 'monospace',
})
# wait=True avoids flicker by waiting for new content to be ready
# to display before clearing the previous one
output.clear_output(wait=True)
with output:
display(styler)
init_loc = min((df.index[0] for df in df_list), default=0)
show_loc(init_loc)
def handler(event):
loc = event.xdata
return show_loc(loc)
event = 'motion_notify_event' if follow_cursor else 'button_press_event'
axis.get_figure().canvas.mpl_connect(event, handler)
display(hbox)
def axis_cursor_delta(axis, colors=('blue', 'green'), buttons=(MouseButton.LEFT, MouseButton.RIGHT)):
"""
Display the time delta between two vertical lines drawn on clicks.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param colors: List of colors to use for vertical lines.
:type colors: list(str)
:param buttons: Mouse buttons to use for each vertical line.
:type buttons: list(matplotlib.backend_bases.MouseButton)
.. note:: This requires the matplotlib widget enabled using
``%matplotlib widget`` magic.
"""
delta_widget = widgets.Text(
value='0',
placeholder='0',
description='Cursors delta',
disabled=False,
)
vlines = [
_make_vline(axis, color=color)
for color in colors
]
assert len(vlines) == 2
vlines_map = dict(zip(buttons, vlines))
vlines_loc = collections.defaultdict(
lambda: min(axis.get_xbound())
)
def handler(event):
loc = event.xdata
button = event.button
vline = vlines_map[button]
vlines_loc[button] = loc
vline.set_xdata(loc)
vline.set_visible(True)
locs = [
vlines_loc[button]
for button in buttons
]
delta = locs[1] - locs[0]
delta_widget.value = str(delta)
axis.get_figure().canvas.mpl_connect('button_press_event', handler)
display(delta_widget)
def interact_tasks(trace, tasks=None, kind=None):
"""
Decorator to make a block of code parametrized on a task that can be
selected from a dropdown.
:param trace: Trace object in use
:type trace: lisa.trace.Trace
:param tasks: List of tasks that are available. See ``kind`` for
alternative way of specifying tasks.
:type tasks: list(int or str or lisa.trace.TaskID) or None
:param kind: Alternatively to ``tasks``, a kind can be provided and the
tasks will be selected from the trace for you. It can be:
* ``rtapp`` to select all rt-app tasks
* ``all`` to select all tasks.
:type kind: str or None
**Example**::
trace = Trace('trace.dat')
# Allow selecting any rtapp task
@interact_tasks(trace, kind='rtapp')
def do_plot(task):
trace.ana.load_tracking.plot_task_signals(task)
"""
if tasks is not None:
tasks = [
trace.get_task_id(task, update=False)
for task in tasks
]
else:
kind = kind or 'all'
if kind == 'all':
tasks = trace.task_ids
elif kind == 'rtapp':
tasks = trace.ana.rta.rtapp_tasks
else:
raise ValueError(f'Unknown task kind: {kind}')
# Map of friendly names to actual objects
task_map = {
str(task): task
for task in tasks
}
def decorator(f):
@functools.wraps(f)
@interact
def wrapper(task=sorted(task_map.keys())):
task = task_map[task]
return f(task)
return wrapper
return decorator
def make_figure(width, height, nrows, ncols, interactive=None, **kwargs):
"""
Make a :class:`matplotlib.figure.Figure` and its axes.
:param width: Width of the figure.
:type width: int
:param height: Height of the figure.
:type height: int
:param interactive: If ``True``, create an interactive figure. Defaults to
``True`` when running under IPython, ``False`` otherwise.
:type interactive: bool or None
:Variable keyword arguments: Forwarded to :class:`matplotlib.figure.Figure`
:returns: A tuple of:
* :class:`matplotlib.figure.Figure`
* :class:`matplotlib.axes.Axes` as a scalar, an iterable (1D) or iterable of iterable matrix (2D)
"""
if interactive is None:
interactive = is_running_ipython()
if not interactive and tuple(map(int, mpl.__version__.split('.'))) <= (3, 0, 3):
warnings.warn('This version of matplotlib does not allow saving figures from axis created using Figure(), forcing interactive=True')
interactive = True
width *= ncols
height *= nrows
if interactive:
figure, axes = plt.subplots(
figsize=(width, height),
nrows=nrows,
ncols=ncols,
**kwargs,
)
else:
figure = Figure(figsize=(width, height))
axes = figure.subplots(ncols=ncols, nrows=nrows, **kwargs)
return (figure, axes)
def plot_signal(series, name=None, interpolation=None, add_markers=True):
"""
Plot a signal using ``holoviews`` library.
:param series: Series of values to plot.
:type series: pandas.Series
:param name: Name of the signal. Defaults to the series name.
:type name: str or None
:param interpolation: Interpolate type for the signal. Defaults to
``steps-post`` which is the correct value for signals encoded as a
series of updates.
:type interpolation: str or None
:param add_markers: Add markers to the plot.
:type add_markers: bool
"""
if isinstance(series, pd.DataFrame):
try:
col, = series.columns
except ValueError:
raise ValueError('Can only pass Series or DataFrame with one column')
else:
series = series[col]
label = name or series.name
interpolation = interpolation or 'steps-post'
kdims = [
# Ensure shared_axes works well across plots.
# We don't set the unit as this will prevent shared_axes to work if
# the other plots do not set the unit, which is what usually
# happens, since only name/label is taken from pandas index names.
hv.Dimension('Time'),
]
fig = hv.Curve(
series,
label=label,
kdims=kdims,
).opts(
interpolation=interpolation,
title=label,
)
if add_markers:
# The "marker" group for Scatter is used to provide marker-specific
# styling in generic code..
# TODO: use mute_legend=True once this bug is fixed:
# https://github.com/holoviz/holoviews/issues/3936
fig *= hv.Scatter(
series,
label=label,
group='marker',
kdims=kdims,
)
return fig
# TODO: revisit when this discussion is solved:
# https://github.com/holoviz/holoviews/issues/4988
def _hv_neutral():
"""
Neutral element of holoviews operations such that
``x <op> holoviews_neutral() == x``.
.. note:: Holoviews currently does not have a perfectly neutral element.
"""
return hv.Curve([])
def _hv_backend_twinx(backend, display, y_range):
def hook(plot, element):
p = plot.state
if backend == 'bokeh':
glyph = p.renderers[-1]
vals = glyph.data_source.data['y']
if y_range is None:
_y_range = (vals.min(), vals.max())
else:
_y_range = y_range
name = uuid4().hex
p.extra_y_ranges.update({
name: bokeh.models.Range1d(start=_y_range[0], end=_y_range[1])
})
glyph.y_range_name = name
if display:
p.add_layout(
bokeh.models.LinearAxis(y_range_name=name),
'right'
)
elif backend == 'matplotlib':
ax = plot.handles['axis']
twin = ax.twinx()
plot.handles['axis'] = twin
if not display:
twin.get_yaxis().set_ticks([])
if y_range is not None:
twin.set_ylim(y_range)
else:
raise ValueError(f'Unsupported backend={backend}')
return hook
def _hv_twinx(fig, display=True, y_range=None):
"""
Similar to matplotlib's twinx feature where the element's Y axis is
separated from the default one and drawn on the right of the plot.
:param display: If ``True``, the ticks will be displayed on the right of
the plot. Otherwise, it will be hidden.
:type display: bool
.. note:: This uses a custom hook for each backend, so it will be disabled
if the user also set their own hook.
"""
kwargs = dict(
display=display,
y_range=y_range,
)
return fig.options(
backend='bokeh',
hooks=[_hv_backend_twinx('bokeh', **kwargs)],
).options(
backend='matplotlib',
hooks=[_hv_backend_twinx('matplotlib', **kwargs)],
)
def _hv_multi_line_title_hook(plot, element):
p = plot.state
# Add in reverse since titles will pile upwards
lines = list(reversed(plot.title.splitlines()))
if len(lines) > 1:
for line in lines:
title = bokeh.models.Title(
text=line,
standoff=1,
)
p.add_layout(title, 'above')
# Add an empty line at the top to provide visual separation
# with other plots
p.add_layout(bokeh.models.Title(text=' '), 'above')
del p.title
# Adjust the width of the plot so that the title is not truncated
max_len = max(map(len, lines))
# Empirical, should probably inspect the title font size instead
px_per_char = 12
p.width = max(p.width, max_len * px_per_char)
def _hv_multi_line_title(fig):
"""
Holoviews hook to allow multiline titles.
Also enlarges the plot if its too small for its title.
"""
return fig.options(hooks=[_hv_multi_line_title_hook])
@contextlib.contextmanager
def _hv_set_backend(backend):
"""
Context manager to work around this issue:
https://github.com/holoviz/holoviews/issues/4962
"""
old_backend = hv.Store.current_backend
try:
# This is safe to do as long as the backend has been
# loaded with hv.extension() beforehand, which happens
# at import time
hv.Store.set_current_backend(backend)
yield
finally:
if old_backend:
hv.Store.set_current_backend(old_backend)
def _hv_link_dataframes(fig, dfs):
"""
Link the provided dataframes to the holoviews figure.
:returns: A panel displaying the dataframes and the figure.
"""
def make_table(i, df):
event_header = [
col for col in df.columns
if (
col.startswith('__') or
col == 'event'
)
]
df = df[order_as(df.columns, event_header)]
if df.index.name in df.columns:
df.index = df.index.copy(deep=False)
df.index.name = ''
df_widget = pn.widgets.DataFrame(
df,
name=df.attrs.get('name', f'dataframe #{i}'),
formatters={
'bool': {'type': 'tickCross'}
},
# Disable edition of the dataframe
disabled=True,
sortable=False,
# Ensure some columns are always displayed
# Note: Tabulator requires a list of column names instead.
frozen_columns=len(event_header) + 1,
height=400,
autosize_mode='fit_viewport',
row_height=25,
# Only relevant for pn.widgets.Tabulator
#theme='simple',
#selectable='checkbox',
# Avoid transferring too much data at once to the browser
#pagination='remote',
#page_size=100,
)
return df_widget
def mark_table_selection(tables):
def plot(*args):
xs = [
table.value.index[x]
for xs, table in zip(args, tables)
for x in xs
]
return hv.Overlay(
[
hv.VLine(x).opts(
backend='bokeh',
line_dash='dashed',
)
for x in xs
]
)
tables = list(tables)
streams = [
table.param.selection
for table in tables
]
bound = pn.bind(plot, *streams)
dmap = hv.DynamicMap(bound).opts(framewise=True)
return dmap
def scroll_table(tables):
def record_taps(x, y):
for table in tables:
if x is not None:
df = table.value
i = df.index.get_loc(x, method='ffill')
# On the pn.widgets.DataFrame, this will automatically scroll in the table.
# On pn.widgets.Tabulator, this will currently not unfortunately
table.selection = [i]
return hv.Points([])
tap = hv.streams.SingleTap(transient=True)
dmap = hv.DynamicMap(record_taps, streams=[tap])
return dmap
tables = list(starmap(make_table, enumerate(dfs)))
markers = mark_table_selection(tables)
scroll = scroll_table(tables)
# Workaround issue:
# https://github.com/holoviz/holoviews/issues/5003
if isinstance(fig, hv.Layout):
ncols = fig._max_cols
else:
ncols = None
fig = fig * (scroll * markers)
if ncols is not None:
fig = fig.cols(ncols)
if len(tables) > 1:
tables_widget = pn.Tabs(
*(
(table.name, table)
for table in tables
),
align='center',
)
else:
tables_widget = tables[0]
tables_widget.align = 'center'
return pn.Column(
fig,
tables_widget,
sizing_mode='stretch_both',
align='center',
)
class _HoloviewsPanelWrapper:
"""
Dummy base class used to identify classes created by
:func:`_hv_wrap_fig_cls`.
"""
@functools.lru_cache(maxsize=None)
def _hv_wrap_fig_cls(cls):
"""
Wrap a holoviews element class so that it is displayed inside a panel but
still exhibits the holoviews API.
.. note:: Due to https://github.com/holoviz/holoviews/issues/3577, ``x <op>
y`` will not work if ``x`` is a holoviews object, but the opposit will
work.
"""
def wrap_fig(self, x):
if x.__class__.__module__.startswith('holoviews'):
return _hv_fig_to_pane(
fig=x,
make_pane=self._make_pane,
)
else:
return x
def make_wrapper(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
x = f(self._fig, *args, **kwargs)
return wrap_fig(self, x)
return wrapper
def make_op(name):
def op(self, other):
f = getattr(self._fig, name)
# Unwrap the holoviews figure to avoid exceptions
if isinstance(other, _HoloviewsPanelWrapper):
other = other._fig
x = f(other)
return wrap_fig(self, x)
return op
class NewCls(_HoloviewsPanelWrapper):
def __init__(self, fig, make_pane):
self._fig = fig
self._make_pane = make_pane
def _repr_mimebundle_(self, *args, **kwargs):
pane = self._make_pane(self._fig)
return pane._repr_mimebundle_(*args, **kwargs)
def opts(self, *args, **kwargs):
return wrap_fig(
self,
self._fig.opts(*args, **kwargs),
)
for attr, x in inspect.getmembers(cls):
if (not attr.startswith('_')) and inspect.isfunction(x):
setattr(NewCls, attr, make_wrapper(x))
for name in (
'__add__',
'__radd__',
'__sub__',
'__rsub__',
'__mul__',
'__rmul__',
'__matmul__',
'__rmatmul__',
'__truediv__',
'__rtruediv__',
'__floordiv__',
'__rfloordiv__',
'__mod__',
'__rmod__',
'__divmod__',
'__rdivmod__',
'__pow__',
'__rpow__',
'__and__',
'__rand__',
'__xor__',
'__rxor__',
'__or__',
'__ror__',
'__rshift__',
'__rrshift__',
'__lshift__',
'__rlshift__',
):
if hasattr(cls, name):
setattr(NewCls, name, make_op(name))
return NewCls
def _hv_fig_to_pane(fig, make_pane):
"""
Stop-gap measure until there is a proper solution for:
https://discourse.holoviz.org/t/replace-holoviews-notebook-rendering-with-a-panel/2519/12
"""
cls = _hv_wrap_fig_cls(fig.__class__)
return cls(fig=fig, make_pane=make_pane)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
|
|
"""Common functions for RFLink component tests and generic platform tests."""
from unittest.mock import Mock
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.rflink import (
CONF_RECONNECT_INTERVAL,
DATA_ENTITY_LOOKUP,
EVENT_KEY_COMMAND,
EVENT_KEY_SENSOR,
SERVICE_SEND_COMMAND,
TMP_ENTITY,
RflinkCommand,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_STOP_COVER, SERVICE_TURN_OFF
async def mock_rflink(
hass, config, domain, monkeypatch, failures=None, failcommand=False
):
"""Create mock RFLink asyncio protocol, test component setup."""
transport, protocol = (Mock(), Mock())
async def send_command_ack(*command):
return not failcommand
protocol.send_command_ack = Mock(wraps=send_command_ack)
def send_command(*command):
return not failcommand
protocol.send_command = Mock(wraps=send_command)
async def create_rflink_connection(*args, **kwargs):
"""Return mocked transport and protocol."""
# failures can be a list of booleans indicating in which sequence
# creating a connection should success or fail
if failures:
fail = failures.pop()
else:
fail = False
if fail:
raise ConnectionRefusedError
else:
return transport, protocol
mock_create = Mock(wraps=create_rflink_connection)
monkeypatch.setattr(
"homeassistant.components.rflink.create_rflink_connection", mock_create
)
await async_setup_component(hass, "rflink", config)
await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
# hook into mock config for injecting events
event_callback = mock_create.call_args_list[0][1]["event_callback"]
assert event_callback
disconnect_callback = mock_create.call_args_list[0][1]["disconnect_callback"]
return event_callback, mock_create, protocol, disconnect_callback
async def test_version_banner(hass, monkeypatch):
"""Test sending unknown commands doesn't cause issues."""
# use sensor domain during testing main platform
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0"},
domain: {
"platform": "rflink",
"devices": {"test": {"name": "test", "sensor_type": "temperature"}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
event_callback(
{
"hardware": "Nodo RadioFrequencyLink",
"firmware": "RFLink Gateway",
"version": "1.1",
"revision": "45",
}
)
async def test_send_no_wait(hass, monkeypatch):
"""Test command sending without ack."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command.call_args_list[0][0][1] == "off"
async def test_cover_send_no_wait(hass, monkeypatch):
"""Test command sending to a cover device without ack."""
domain = "cover"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"RTS_0100F2_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "RTS_0100F2_0"
assert protocol.send_command.call_args_list[0][0][1] == "STOP"
async def test_send_command(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "on"},
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "on"
async def test_send_command_invalid_arguments(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
# one argument missing
hass.async_create_task(
hass.services.async_call(domain, SERVICE_SEND_COMMAND, {"command": "on"})
)
hass.async_create_task(
hass.services.async_call(
domain, SERVICE_SEND_COMMAND, {"device_id": "newkaku_0000c6c2_1"}
)
)
# no arguments
hass.async_create_task(hass.services.async_call(domain, SERVICE_SEND_COMMAND, {}))
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list == []
# bad command (no_command)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "no_command"},
)
assert not success, "send command should not succeed for unknown command"
async def test_reconnecting_after_disconnect(hass, monkeypatch):
"""An unexpected disconnect should cause a reconnect."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch
)
assert disconnect_callback, "disconnect callback not passed to rflink"
# rflink initiated disconnect
disconnect_callback(None)
await hass.async_block_till_done()
# we expect 2 call, the initial and reconnect
assert mock_create.call_count == 2
async def test_reconnecting_after_failure(hass, monkeypatch):
"""A failure to reconnect should be retried."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
# wait for reconnects to have happened
await hass.async_block_till_done()
await hass.async_block_till_done()
# we expect 3 calls, the initial and 2 reconnects
assert mock_create.call_count == 3
async def test_error_when_not_connected(hass, monkeypatch):
"""Sending command should error when not connected."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, _, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
success = await hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
assert not success, "changing state should not succeed when disconnected"
async def test_async_send_command_error(hass, monkeypatch):
"""Sending command should error when protocol fails."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(
hass, config, domain, monkeypatch, failcommand=True
)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": SERVICE_TURN_OFF},
)
await hass.async_block_till_done()
assert not success, "send command should not succeed if failcommand=True"
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == SERVICE_TURN_OFF
async def test_race_condition(hass, monkeypatch):
"""Test race condition for unknown components."""
domain = "light"
config = {"rflink": {"port": "/dev/ttyABC0"}, domain: {"platform": "rflink"}}
tmp_entity = TMP_ENTITY.format("test3")
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
# test event for new unconfigured sensor
event_callback({"id": "test3", "command": "off"})
event_callback({"id": "test3", "command": "on"})
# tmp_entity added to EVENT_KEY_COMMAND
assert tmp_entity in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# tmp_entity must no be added to EVENT_KEY_SENSOR
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR]["test3"]
await hass.async_block_till_done()
# test state of new sensor
new_sensor = hass.states.get(domain + ".test3")
assert new_sensor
assert new_sensor.state == "off"
event_callback({"id": "test3", "command": "on"})
await hass.async_block_till_done()
# tmp_entity must be deleted from EVENT_KEY_COMMAND
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# test state of new sensor
new_sensor = hass.states.get(domain + ".test3")
assert new_sensor
assert new_sensor.state == "on"
async def test_not_connected(hass, monkeypatch):
"""Test Error when sending commands to a disconnected device."""
import pytest
from homeassistant.core import HomeAssistantError
test_device = RflinkCommand("DUMMY_DEVICE")
RflinkCommand.set_rflink_protocol(None)
with pytest.raises(HomeAssistantError):
await test_device._async_handle_command("turn_on")
|
|
"""Support for the Abode Security System."""
from asyncio import gather
from copy import deepcopy
from functools import partial
from abodepy import Abode
from abodepy.exceptions import AbodeAuthenticationException, AbodeException
import abodepy.helpers.timeline as TIMELINE
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_REAUTH
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DATE,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
ATTR_TIME,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import Entity
from .const import ATTRIBUTION, DEFAULT_CACHEDB, DOMAIN, LOGGER
CONF_POLLING = "polling"
SERVICE_SETTINGS = "change_setting"
SERVICE_CAPTURE_IMAGE = "capture_image"
SERVICE_TRIGGER_AUTOMATION = "trigger_automation"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_TYPE = "device_type"
ATTR_EVENT_CODE = "event_code"
ATTR_EVENT_NAME = "event_name"
ATTR_EVENT_TYPE = "event_type"
ATTR_EVENT_UTC = "event_utc"
ATTR_SETTING = "setting"
ATTR_USER_NAME = "user_name"
ATTR_APP_TYPE = "app_type"
ATTR_EVENT_BY = "event_by"
ATTR_VALUE = "value"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_POLLING, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
CHANGE_SETTING_SCHEMA = vol.Schema(
{vol.Required(ATTR_SETTING): cv.string, vol.Required(ATTR_VALUE): cv.string}
)
CAPTURE_IMAGE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
AUTOMATION_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
PLATFORMS = [
"alarm_control_panel",
"binary_sensor",
"lock",
"switch",
"cover",
"camera",
"light",
"sensor",
]
class AbodeSystem:
"""Abode System class."""
def __init__(self, abode, polling):
"""Initialize the system."""
self.abode = abode
self.polling = polling
self.entity_ids = set()
self.logout_listener = None
async def async_setup(hass, config):
"""Set up Abode integration."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=deepcopy(conf)
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Abode integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
polling = config_entry.data.get(CONF_POLLING)
cache = hass.config.path(DEFAULT_CACHEDB)
# For previous config entries where unique_id is None
if config_entry.unique_id is None:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.data[CONF_USERNAME]
)
try:
abode = await hass.async_add_executor_job(
Abode, username, password, True, True, True, cache
)
except AbodeAuthenticationException as ex:
LOGGER.error("Invalid credentials: %s", ex)
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=config_entry.data,
)
return False
except (AbodeException, ConnectTimeout, HTTPError) as ex:
LOGGER.error("Unable to connect to Abode: %s", ex)
raise ConfigEntryNotReady from ex
hass.data[DOMAIN] = AbodeSystem(abode, polling)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
await setup_hass_events(hass)
await hass.async_add_executor_job(setup_hass_services, hass)
await hass.async_add_executor_job(setup_abode_events, hass)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
hass.services.async_remove(DOMAIN, SERVICE_SETTINGS)
hass.services.async_remove(DOMAIN, SERVICE_CAPTURE_IMAGE)
hass.services.async_remove(DOMAIN, SERVICE_TRIGGER_AUTOMATION)
tasks = []
for platform in PLATFORMS:
tasks.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
await gather(*tasks)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.stop)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.logout)
hass.data[DOMAIN].logout_listener()
hass.data.pop(DOMAIN)
return True
def setup_hass_services(hass):
"""Home Assistant services."""
def change_setting(call):
"""Change an Abode system setting."""
setting = call.data.get(ATTR_SETTING)
value = call.data.get(ATTR_VALUE)
try:
hass.data[DOMAIN].abode.set_setting(setting, value)
except AbodeException as ex:
LOGGER.warning(ex)
def capture_image(call):
"""Capture a new image."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_camera_capture_{entity_id}"
dispatcher_send(hass, signal)
def trigger_automation(call):
"""Trigger an Abode automation."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_trigger_automation_{entity_id}"
dispatcher_send(hass, signal)
hass.services.register(
DOMAIN, SERVICE_SETTINGS, change_setting, schema=CHANGE_SETTING_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_CAPTURE_IMAGE, capture_image, schema=CAPTURE_IMAGE_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_TRIGGER_AUTOMATION, trigger_automation, schema=AUTOMATION_SCHEMA
)
async def setup_hass_events(hass):
"""Home Assistant start and stop callbacks."""
def logout(event):
"""Logout of Abode."""
if not hass.data[DOMAIN].polling:
hass.data[DOMAIN].abode.events.stop()
hass.data[DOMAIN].abode.logout()
LOGGER.info("Logged out of Abode")
if not hass.data[DOMAIN].polling:
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.start)
hass.data[DOMAIN].logout_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, logout
)
def setup_abode_events(hass):
"""Event callbacks."""
def event_callback(event, event_json):
"""Handle an event callback from Abode."""
data = {
ATTR_DEVICE_ID: event_json.get(ATTR_DEVICE_ID, ""),
ATTR_DEVICE_NAME: event_json.get(ATTR_DEVICE_NAME, ""),
ATTR_DEVICE_TYPE: event_json.get(ATTR_DEVICE_TYPE, ""),
ATTR_EVENT_CODE: event_json.get(ATTR_EVENT_CODE, ""),
ATTR_EVENT_NAME: event_json.get(ATTR_EVENT_NAME, ""),
ATTR_EVENT_TYPE: event_json.get(ATTR_EVENT_TYPE, ""),
ATTR_EVENT_UTC: event_json.get(ATTR_EVENT_UTC, ""),
ATTR_USER_NAME: event_json.get(ATTR_USER_NAME, ""),
ATTR_APP_TYPE: event_json.get(ATTR_APP_TYPE, ""),
ATTR_EVENT_BY: event_json.get(ATTR_EVENT_BY, ""),
ATTR_DATE: event_json.get(ATTR_DATE, ""),
ATTR_TIME: event_json.get(ATTR_TIME, ""),
}
hass.bus.fire(event, data)
events = [
TIMELINE.ALARM_GROUP,
TIMELINE.ALARM_END_GROUP,
TIMELINE.PANEL_FAULT_GROUP,
TIMELINE.PANEL_RESTORE_GROUP,
TIMELINE.AUTOMATION_GROUP,
TIMELINE.DISARM_GROUP,
TIMELINE.ARM_GROUP,
TIMELINE.ARM_FAULT_GROUP,
TIMELINE.TEST_GROUP,
TIMELINE.CAPTURE_GROUP,
TIMELINE.DEVICE_GROUP,
]
for event in events:
hass.data[DOMAIN].abode.events.add_event_callback(
event, partial(event_callback, event)
)
class AbodeEntity(Entity):
"""Representation of an Abode entity."""
def __init__(self, data):
"""Initialize Abode entity."""
self._data = data
self._available = True
@property
def available(self):
"""Return the available state."""
return self._available
@property
def should_poll(self):
"""Return the polling state."""
return self._data.polling
async def async_added_to_hass(self):
"""Subscribe to Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.add_connection_status_callback,
self.unique_id,
self._update_connection_status,
)
self.hass.data[DOMAIN].entity_ids.add(self.entity_id)
async def async_will_remove_from_hass(self):
"""Unsubscribe from Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.remove_connection_status_callback, self.unique_id
)
def _update_connection_status(self):
"""Update the entity available property."""
self._available = self._data.abode.events.connected
self.schedule_update_ha_state()
class AbodeDevice(AbodeEntity):
"""Representation of an Abode device."""
def __init__(self, data, device):
"""Initialize Abode device."""
super().__init__(data)
self._device = device
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.add_device_callback,
self._device.device_id,
self._update_callback,
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from device events."""
await super().async_will_remove_from_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.remove_all_device_callbacks, self._device.device_id
)
def update(self):
"""Update device state."""
self._device.refresh()
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"device_id": self._device.device_id,
"battery_low": self._device.battery_low,
"no_response": self._device.no_response,
"device_type": self._device.type,
}
@property
def unique_id(self):
"""Return a unique ID to use for this device."""
return self._device.device_uuid
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"manufacturer": "Abode",
"name": self._device.name,
"device_type": self._device.type,
}
def _update_callback(self, device):
"""Update the device state."""
self.schedule_update_ha_state()
class AbodeAutomation(AbodeEntity):
"""Representation of an Abode automation."""
def __init__(self, data, automation):
"""Initialize for Abode automation."""
super().__init__(data)
self._automation = automation
def update(self):
"""Update automation state."""
self._automation.refresh()
@property
def name(self):
"""Return the name of the automation."""
return self._automation.name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, "type": "CUE automation"}
@property
def unique_id(self):
"""Return a unique ID to use for this automation."""
return self._automation.automation_id
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for DRAC client wrapper.
"""
import time
from xml.etree import ElementTree
import mock
from ironic.common import exception
from ironic.drivers.modules.drac import client as drac_client
from ironic.tests import base
from ironic.tests.db import utils as db_utils
from ironic.tests.drivers.drac import utils as test_utils
INFO_DICT = db_utils.get_test_drac_info()
@mock.patch.object(drac_client, 'pywsman')
class DracClientTestCase(base.TestCase):
def setUp(self):
super(DracClientTestCase, self).setUp()
self.resource_uri = 'http://foo/wsman'
def test_wsman_enumerate(self, mock_client_pywsman):
mock_xml = test_utils.mock_wsman_root('<test></test>')
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.enumerate.return_value = mock_xml
client = drac_client.Client(**INFO_DICT)
client.wsman_enumerate(self.resource_uri)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_options.set_flags.assert_called_once_with(
mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
mock_options.set_max_elements.assert_called_once_with(100)
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
None, self.resource_uri)
mock_xml.context.assert_called_once_with()
@mock.patch.object(time, 'sleep', lambda seconds: None)
def test_wsman_enumerate_retry(self, mock_client_pywsman):
mock_xml = test_utils.mock_wsman_root('<test></test>')
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.enumerate.side_effect = [None, mock_xml]
client = drac_client.Client(**INFO_DICT)
client.wsman_enumerate(self.resource_uri)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_options.set_flags.assert_called_once_with(
mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
mock_options.set_max_elements.assert_called_once_with(100)
mock_pywsman_client.enumerate.assert_has_calls([
mock.call(mock_options, None, self.resource_uri),
mock.call(mock_options, None, self.resource_uri)
])
mock_xml.context.assert_called_once_with()
def test_wsman_enumerate_with_additional_pull(self, mock_client_pywsman):
mock_root = mock.Mock()
mock_root.string.side_effect = [test_utils.build_soap_xml(
[{'item1': 'test1'}]),
test_utils.build_soap_xml(
[{'item2': 'test2'}])]
mock_xml = mock.Mock()
mock_xml.root.return_value = mock_root
mock_xml.context.side_effect = [42, 42, None]
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.enumerate.return_value = mock_xml
mock_pywsman_client.pull.return_value = mock_xml
client = drac_client.Client(**INFO_DICT)
result = client.wsman_enumerate(self.resource_uri)
# assert the XML was merged
result_string = ElementTree.tostring(result)
self.assertIn('<item1>test1</item1>', result_string)
self.assertIn('<item2>test2</item2>', result_string)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_options.set_flags.assert_called_once_with(
mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
mock_options.set_max_elements.assert_called_once_with(100)
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
None, self.resource_uri)
def test_wsman_enumerate_filter_query(self, mock_client_pywsman):
mock_xml = test_utils.mock_wsman_root('<test></test>')
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.enumerate.return_value = mock_xml
client = drac_client.Client(**INFO_DICT)
filter_query = 'SELECT * FROM foo'
client.wsman_enumerate(self.resource_uri, filter_query=filter_query)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_filter = mock_client_pywsman.Filter.return_value
mock_filter.simple.assert_called_once_with(mock.ANY, filter_query)
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
mock_filter, self.resource_uri)
mock_xml.context.assert_called_once_with()
def test_wsman_enumerate_invalid_filter_dialect(self, mock_client_pywsman):
client = drac_client.Client(**INFO_DICT)
self.assertRaises(exception.DracInvalidFilterDialect,
client.wsman_enumerate, self.resource_uri,
filter_query='foo',
filter_dialect='invalid')
def test_wsman_invoke(self, mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
method_name = 'method'
client = drac_client.Client(**INFO_DICT)
client.wsman_invoke(self.resource_uri, method_name)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
@mock.patch.object(time, 'sleep', lambda seconds: None)
def test_wsman_invoke_retry(self, mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.side_effect = [None, mock_xml]
method_name = 'method'
client = drac_client.Client(**INFO_DICT)
client.wsman_invoke(self.resource_uri, method_name)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_has_calls([
mock.call(mock_options, self.resource_uri, method_name, None),
mock.call(mock_options, self.resource_uri, method_name, None)
])
def test_wsman_invoke_with_selectors(self, mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
method_name = 'method'
selectors = {'foo': 'bar'}
client = drac_client.Client(**INFO_DICT)
client.wsman_invoke(self.resource_uri, method_name,
selectors=selectors)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
mock_options.add_selector.assert_called_once_with('foo', 'bar')
def test_wsman_invoke_with_properties(self, mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
method_name = 'method'
properties = {'foo': 'bar'}
client = drac_client.Client(**INFO_DICT)
client.wsman_invoke(self.resource_uri, method_name,
properties=properties)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
mock_options.add_property.assert_called_once_with('foo', 'bar')
def test_wsman_invoke_with_properties_including_a_list(self,
mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
mock_request_xml = mock_client_pywsman.XmlDoc.return_value
method_name = 'method'
properties = {'foo': ['bar', 'baz']}
client = drac_client.Client(**INFO_DICT)
client.wsman_invoke(self.resource_uri, method_name,
properties=properties)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, mock_request_xml)
mock_request_xml.root().add.assert_has_calls([
mock.call(self.resource_uri, 'foo', 'bar'),
mock.call(self.resource_uri, 'foo', 'baz')
])
self.assertEqual(2, mock_request_xml.root().add.call_count)
def test_wsman_invoke_receives_error_return_value(self,
mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_ERROR,
'Message': 'error message'}],
self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
method_name = 'method'
client = drac_client.Client(**INFO_DICT)
self.assertRaises(exception.DracOperationFailed,
client.wsman_invoke, self.resource_uri, method_name)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
def test_wsman_invoke_receives_unexpected_return_value(self,
mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': '42'}], self.resource_uri)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman_client = mock_client_pywsman.Client.return_value
mock_pywsman_client.invoke.return_value = mock_xml
method_name = 'method'
client = drac_client.Client(**INFO_DICT)
self.assertRaises(exception.DracUnexpectedReturnValue,
client.wsman_invoke, self.resource_uri, method_name)
mock_options = mock_client_pywsman.ClientOptions.return_value
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
|
|
"""Status codes for the Sensicam
These were taken by conversion using h2py and then writing to a
dictionary from PCO_err.h. Additionally, this implements the
incredibly messy way of looking up error texts found in
PCO_errt.h. Why they chose to make things this complicated, I really
don't know.
"""
# Error code definitions
# ======================
SENSICAM_CODES = {
0: "PCO_NOERROR",
4095: "PCO_ERROR_CODE_MASK",
61440: "PCO_ERROR_LAYER_MASK",
16711680: "PCO_ERROR_DEVICE_MASK",
520093696: "PCO_ERROR_RESERVED_MASK",
536870912: "PCO_ERROR_IS_COMMON",
1073741824: "PCO_ERROR_IS_WARNING",
2147483648: "PCO_ERROR_IS_ERROR",
4096: "PCO_ERROR_FIRMWARE",
8192: "PCO_ERROR_DRIVER",
12288: "PCO_ERROR_SDKDLL",
16384: "PCO_ERROR_APPLICATION",
65536: "SC2_ERROR_POWER_CPLD",
131072: "SC2_ERROR_HEAD_UP",
196608: "SC2_ERROR_MAIN_UP",
262144: "SC2_ERROR_FWIRE_UP",
327680: "SC2_ERROR_MAIN_FPGA",
393216: "SC2_ERROR_HEAD_FPGA",
458752: "SC2_ERROR_MAIN_BOARD",
524288: "SC2_ERROR_HEAD_CPLD",
589824: "SC2_ERROR_SENSOR",
851968: "SC2_ERROR_POWER",
917504: "SC2_ERROR_GIGE",
983040: "SC2_ERROR_USB",
1048576: "SC2_ERROR_BOOT_FPGA",
1114112: "SC2_ERROR_BOOT_UP",
655360: "SC2_ERROR_SDKDLL",
2097152: "PCI540_ERROR_DRIVER",
2162688: "PCI525_ERROR_DRIVER",
3145728: "PCO_ERROR_DRIVER_FIREWIRE",
3211264: "PCO_ERROR_DRIVER_USB",
3276800: "PCO_ERROR_DRIVER_GIGE",
3342336: "PCO_ERROR_DRIVER_CAMERALINK",
720896: "SC2_ERROR_DRIVER",
655360: "PCO_ERROR_PCO_SDKDLL",
1114112: "PCO_ERROR_CONVERTDLL",
1179648: "PCO_ERROR_FILEDLL",
1245184: "PCO_ERROR_JAVANATIVEDLL",
1048576: "PCO_ERROR_CAMWARE",
2684354561: "PCO_ERROR_WRONGVALUE",
2684354562: "PCO_ERROR_INVALIDHANDLE",
2684354563: "PCO_ERROR_NOMEMORY",
2684354564: "PCO_ERROR_NOFILE",
2684354565: "PCO_ERROR_TIMEOUT",
2684354566: "PCO_ERROR_BUFFERSIZE",
2684354567: "PCO_ERROR_NOTINIT",
2684354568: "PCO_ERROR_DISKFULL",
2147491841: "PCO_ERROR_DRIVER_NOTINIT",
2147491845: "PCO_ERROR_DRIVER_WRONGOS",
2147491846: "PCO_ERROR_DRIVER_NODRIVER",
2147491847: "PCO_ERROR_DRIVER_IOFAILURE",
2147491848: "PCO_ERROR_DRIVER_CHECKSUMERROR",
2147491849: "PCO_ERROR_DRIVER_INVMODE",
2147491851: "PCO_ERROR_DRIVER_DEVICEBUSY",
2147491852: "PCO_ERROR_DRIVER_DATAERROR",
2147491853: "PCO_ERROR_DRIVER_NOFUNCTION",
2147491854: "PCO_ERROR_DRIVER_KERNELMEMALLOCFAILED",
2147491856: "PCO_ERROR_DRIVER_BUFFER_CANCELLED",
2147491857: "PCO_ERROR_DRIVER_INBUFFER_SIZE",
2147491858: "PCO_ERROR_DRIVER_OUTBUFFER_SIZE",
2147491859: "PCO_ERROR_DRIVER_FUNCTION_NOT_SUPPORTED",
2147491860: "PCO_ERROR_DRIVER_BUFFER_SYSTEMOFF",
2147491872: "PCO_ERROR_DRIVER_SYSERR",
2147491874: "PCO_ERROR_DRIVER_REGERR",
2147491875: "PCO_ERROR_DRIVER_WRONGVERS",
2147491876: "PCO_ERROR_DRIVER_FILE_READ_ERR",
2147491877: "PCO_ERROR_DRIVER_FILE_WRITE_ERR",
2147491878: "PCO_ERROR_DRIVER_LUT_MISMATCH",
2147491879: "PCO_ERROR_DRIVER_FORMAT_NOT_SUPPORTED",
2147491880: "PCO_ERROR_DRIVER_BUFFER_DMASIZE",
2147491881: "PCO_ERROR_DRIVER_WRONG_ATMEL_FOUND",
2147491882: "PCO_ERROR_DRIVER_WRONG_ATMEL_SIZE",
2147491883: "PCO_ERROR_DRIVER_WRONG_ATMEL_DEVICE",
2147491884: "PCO_ERROR_DRIVER_WRONG_BOARD",
2147491885: "PCO_ERROR_DRIVER_READ_FLASH_FAILED",
2147491886: "PCO_ERROR_DRIVER_HEAD_VERIFY_FAILED",
2147491887: "PCO_ERROR_DRIVER_HEAD_BOARD_MISMATCH",
2147491888: "PCO_ERROR_DRIVER_HEAD_LOST",
2147491889: "PCO_ERROR_DRIVER_HEAD_POWER_DOWN",
2147491890: "PCO_ERROR_DRIVER_CAMERA_BUSY",
2147491891: "PCO_ERROR_DRIVER_BUFFERS_PENDING",
2147495937: "PCO_ERROR_SDKDLL_NESTEDBUFFERSIZE",
2147495938: "PCO_ERROR_SDKDLL_BUFFERSIZE",
2147495939: "PCO_ERROR_SDKDLL_DIALOGNOTAVAILABLE",
2147495940: "PCO_ERROR_SDKDLL_NOTAVAILABLE",
2147495941: "PCO_ERROR_SDKDLL_SYSERR",
2147495942: "PCO_ERROR_SDKDLL_BADMEMORY",
2147495944: "PCO_ERROR_SDKDLL_BUFCNTEXHAUSTED",
2147495945: "PCO_ERROR_SDKDLL_ALREADYOPENED",
2147495946: "PCO_ERROR_SDKDLL_ERRORDESTROYWND",
2147495947: "PCO_ERROR_SDKDLL_BUFFERNOTVALID",
2147495948: "PCO_ERROR_SDKDLL_WRONGBUFFERNR",
2147495949: "PCO_ERROR_SDKDLL_DLLNOTFOUND",
2147495950: "PCO_ERROR_SDKDLL_BUFALREADYASSIGNED",
2147495951: "PCO_ERROR_SDKDLL_EVENTALREADYASSIGNED",
2147495952: "PCO_ERROR_SDKDLL_RECORDINGMUSTBEON",
2147495953: "PCO_ERROR_SDKDLL_DLLNOTFOUND_DIVZERO",
2147495954: "PCO_ERROR_SDKDLL_BUFFERALREADYQUEUED",
2147495955: "PCO_ERROR_SDKDLL_BUFFERNOTQUEUED",
3221237761: "PCO_WARNING_SDKDLL_BUFFER_STILL_ALLOKATED",
3221237762: "PCO_WARNING_SDKDLL_NO_IMAGE_BOARD",
3221237763: "PCO_WARNING_SDKDLL_COC_VALCHANGE",
3221237764: "PCO_WARNING_SDKDLL_COC_STR_SHORT",
2147500033: "PCO_ERROR_APPLICATION_PICTURETIMEOUT",
2147500034: "PCO_ERROR_APPLICATION_SAVEFILE",
2147500035: "PCO_ERROR_APPLICATION_FUNCTIONNOTFOUND",
2147500036: "PCO_ERROR_APPLICATION_DLLNOTFOUND",
2147500037: "PCO_ERROR_APPLICATION_WRONGBOARDNR",
2147500038: "PCO_ERROR_APPLICATION_FUNCTIONNOTSUPPORTED",
2147500039: "PCO_ERROR_APPLICATION_WRONGRES",
2147500040: "PCO_ERROR_APPLICATION_DISKFULL",
2147500041: "PCO_ERROR_APPLICATION_SET_VALUES",
3221241857: "PCO_WARNING_APPLICATION_RECORDERFULL",
2147487745: "PCO_ERROR_FIRMWARE_TELETIMEOUT",
2147487746: "PCO_ERROR_FIRMWARE_WRONGCHECKSUM",
2147487747: "PCO_ERROR_FIRMWARE_NOACK",
2147487748: "PCO_ERROR_FIRMWARE_WRONGSIZEARR",
2147487749: "PCO_ERROR_FIRMWARE_DATAINKONSISTENT",
2147487750: "PCO_ERROR_FIRMWARE_UNKNOWN_COMMAND",
2147487751: "PCO_ERROR_FIRMWARE_0x80001007",
2147487752: "PCO_ERROR_FIRMWARE_INITFAILED",
2147487753: "PCO_ERROR_FIRMWARE_CONFIGFAILED",
2147487754: "PCO_ERROR_FIRMWARE_HIGH_TEMPERATURE",
2147487755: "PCO_ERROR_FIRMWARE_VOLTAGEOUTOFRANGE",
2147487756: "PCO_ERROR_FIRMWARE_I2CNORESPONSE",
2147487757: "PCO_ERROR_FIRMWARE_CHECKSUMCODEFAILED",
2147487758: "PCO_ERROR_FIRMWARE_ADDRESSOUTOFRANGE",
2147487759: "PCO_ERROR_FIRMWARE_NODEVICEOPENED",
2147487760: "PCO_ERROR_FIRMWARE_BUFFERTOSMALL",
2147487761: "PCO_ERROR_FIRMWARE_TOMUCHDATA",
2147487762: "PCO_ERROR_FIRMWARE_WRITEERROR",
2147487763: "PCO_ERROR_FIRMWARE_READERROR",
2147487764: "PCO_ERROR_FIRMWARE_NOTRENDERED",
2147487765: "PCO_ERROR_FIRMWARE_NOHANDLEAVAILABLE",
2147487766: "PCO_ERROR_FIRMWARE_DATAOUTOFRANGE",
2147487767: "PCO_ERROR_FIRMWARE_NOTPOSSIBLE",
2147487768: "PCO_ERROR_FIRMWARE_UNSUPPORTED_SDRAM",
2147487769: "PCO_ERROR_FIRMWARE_DIFFERENT_SDRAMS",
2147487770: "PCO_ERROR_FIRMWARE_ONLY_ONE_SDRAM",
2147487771: "PCO_ERROR_FIRMWARE_NO_SDRAM_MOUNTED",
2147487772: "PCO_ERROR_FIRMWARE_SEGMENTS_TOO_LARGE",
2147487773: "PCO_ERROR_FIRMWARE_SEGMENT_OUT_OF_RANGE",
2147487774: "PCO_ERROR_FIRMWARE_VALUE_OUT_OF_RANGE",
2147487775: "PCO_ERROR_FIRMWARE_IMAGE_READ_NOT_POSSIBLE",
2147487776: "PCO_ERROR_FIRMWARE_NOT_SUPPORTED",
2147487777: "PCO_ERROR_FIRMWARE_ARM_NOT_SUCCESSFUL",
2147487778: "PCO_ERROR_FIRMWARE_RECORD_MUST_BE_OFF",
2147487781: "PCO_ERROR_FIRMWARE_SEGMENT_TOO_SMALL",
2147487782: "PCO_ERROR_FIRMWARE_COC_BUFFER_TO_SMALL",
2147487783: "PCO_ERROR_FIRMWARE_COC_DATAINKONSISTENT",
2147487784: "PCO_ERROR_FIRMWARE_CORRECTION_DATA_INVALID",
2147487785: "PCO_ERROR_FIRMWARE_CCDCAL_NOT_FINISHED",
2147487792: "PCO_ERROR_FIRMWARE_COC_TRIGGER_INVALID",
2147487793: "PCO_ERROR_FIRMWARE_COC_PIXELRATE_INVALID",
2147487794: "PCO_ERROR_FIRMWARE_COC_POWERDOWN_INVALID",
2147487795: "PCO_ERROR_FIRMWARE_COC_SENSORFORMAT_INVALID",
2147487796: "PCO_ERROR_FIRMWARE_COC_ROI_BINNING_INVALID",
2147487797: "PCO_ERROR_FIRMWARE_COC_ROI_DOUBLE_INVALID",
2147487798: "PCO_ERROR_FIRMWARE_COC_MODE_INVALID",
2147487799: "PCO_ERROR_FIRMWARE_COC_DELAY_INVALID",
2147487800: "PCO_ERROR_FIRMWARE_COC_EXPOS_INVALID",
2147487801: "PCO_ERROR_FIRMWARE_COC_TIMEBASE_INVALID",
2147487808: "PCO_ERROR_FIRMWARE_COC_PERIOD_INVALID",
2147487809: "PCO_ERROR_FIRMWARE_COC_MONITOR_INVALID",
2147487824: "PCO_ERROR_FIRMWARE_UNKNOWN_DEVICE",
2147487825: "PCO_ERROR_FIRMWARE_DEVICE_NOT_AVAIL",
2147487826: "PCO_ERROR_FIRMWARE_DEVICE_IS_OPEN",
2147487827: "PCO_ERROR_FIRMWARE_DEVICE_NOT_OPEN",
2147487828: "PCO_ERROR_FIRMWARE_NO_DEVICE_RESPONSE",
2147487829: "PCO_ERROR_FIRMWARE_WRONG_DEVICE_TYPE",
2147487830: "PCO_ERROR_FIRMWARE_ERASE_FLASH_FAILED",
2147487831: "PCO_ERROR_FIRMWARE_DEVICE_NOT_BLANK",
2147487832: "PCO_ERROR_FIRMWARE_ADDRESS_OUT_OF_RANGE",
2147487833: "PCO_ERROR_FIRMWARE_PROG_FLASH_FAILED",
2147487834: "PCO_ERROR_FIRMWARE_PROG_EEPROM_FAILED",
2147487835: "PCO_ERROR_FIRMWARE_READ_FLASH_FAILED",
2147487836: "PCO_ERROR_FIRMWARE_READ_EEPROM_FAILED",
2147487872: "PCO_ERROR_FIRMWARE_GIGE_COMMAND_IS_INVALID",
2147487873: "PCO_ERROR_FIRMWARE_GIGE_UART_NOT_OPERATIONAL",
2147487874: "PCO_ERROR_FIRMWARE_GIGE_ACCESS_DENIED",
2147487875: "PCO_ERROR_FIRMWARE_GIGE_COMMAND_UNKNOWN",
2147487876: "PCO_ERROR_FIRMWARE_GIGE_COMMAND_GROUP_UNKNOWN",
2147487877: "PCO_ERROR_FIRMWARE_GIGE_INVALID_COMMAND_PARAMETERS",
2147487878: "PCO_ERROR_FIRMWARE_GIGE_INTERNAL_ERROR",
2147487879: "PCO_ERROR_FIRMWARE_GIGE_INTERFACE_BLOCKED",
2147487880: "PCO_ERROR_FIRMWARE_GIGE_INVALID_SESSION",
2147487881: "PCO_ERROR_FIRMWARE_GIGE_BAD_OFFSET",
2147487882: "PCO_ERROR_FIRMWARE_GIGE_NV_WRITE_IN_PROGRESS",
2147487883: "PCO_ERROR_FIRMWARE_GIGE_DOWNLOAD_BLOCK_LOST",
2147487884: "PCO_ERROR_FIRMWARE_GIGE_DOWNLOAD_INVALID_LDR",
2147487888: "PCO_ERROR_FIRMWARE_GIGE_DRIVER_IMG_PKT_LOST",
2147487889: "PCO_ERROR_FIRMWARE_GIGE_BANDWIDTH_CONFLICT",
2147488000: "PCO_ERROR_FIRMWARE_FLICAM_EXT_MOD_OUT_OF_RANGE",
2147488001: "PCO_ERROR_FIRMWARE_FLICAM_SYNC_PLL_NOT_LOCKED",
3221229569: "PCO_WARNING_FIRMWARE_FUNC_ALREADY_ON",
3221229570: "PCO_WARNING_FIRMWARE_FUNC_ALREADY_OFF",
3221229571: "PCO_WARNING_FIRMWARE_HIGH_TEMPERATURE",
3221229572: "PCO_WARNING_FIRMWARE_OFFSET_NOT_LOCKED"
}
# Allow for using the text instead of the numeric codes as the
# dictionary keys.
_inverse = {v:k for k, v in SENSICAM_CODES.items()}
SENSICAM_CODES.update(_inverse)
# Error texts
# ===========
PCO_ERROR_COMMON_TXT = (
"No error.", # 0x00000000 PCO_NOERROR
"Function call with wrong parameter.", # 0xA0000001 PCO_ERROR_WRONGVALUE
"Handle is invalid.", # 0xA0000002 PCO_ERROR_INVALIDHANDLE
"No memory available.", # 0xA0000003 PCO_ERROR_NOMEMORY
"A file handle could not be opened.", # 0xA0000004 PCO_ERROR_NOFILE
"Timeout in function.", # 0xA0000005 PCO_ERROR_TIMEOUT
"A buffer is to small.", # 0xA0000006 PCO_ERROR_BUFFERSIZE
"The called module is not initialized.", # 0xA0000007 PCO_ERROR_NOTINIT
"Disk full.", # 0xA0000008 PCO_ERROR_DISKFULL
"", # 0xA0000009
)
PCO_ERROR_DRIVER_TXT = (
"No error.", # 0x00002000 PCO_NOERROR
"Initialization failed; no camera connected.", # 0x80002001 PCO_ERROR_DRIVER_NOTINIT
"", # 0x80002002
"", # 0x80002003
"", # 0x80002004
"Wrong driver for this OS.", # 0x80002005 PCO_ERROR_DRIVER_WRONGOS
"Open driver or driver class failed.", # 0x80002006 PCO_ERROR_DRIVER_NODRIVER
"I/O operation failed.", # 0x80002007 PCO_ERROR_DRIVER_IOFAILURE
"Error in telegram checksum.", # 0x80002008 PCO_ERROR_DRIVER_CHECKSUMERROR
"Invalid Camera mode.", # 0x80002009 PCO_ERROR_DRIVER_INVMODE
"", # 0x8000200A
"Device is hold by an other process.", # 0x8000200B PCO_ERROR_DRIVER_DEVICEBUSY
"Error in reading or writing data to board.", # 0x8000200C PCO_ERROR_DRIVER_DATAERROR
"No function specified.", # 0x8000200D PCO_ERROR_DRIVER_NOFUNCTION
"Kernel Memory allocation in driver failed.", # 0x8000200E PCO_ERROR_DRIVER_KERNELMEMALLOCFAILED
"", # 0x8000200F
"Buffer was cancelled.", # 0x80002010 PCO_ERROR_DRIVER_BUFFER_CANCELLED
"Input buffer too small for this IO-call.", # 0x80002011 PCO_ERROR_DRIVER_INBUFFER_TO_SMALL
"Output buffer too small for this IO-call.", # 0x80002012 PCO_ERROR_DRIVER_OUTBUFFER_TO_SMALL
"Driver IO-Function not supported.", # 0x80002013 PCO_ERROR_DRIVER_FUNCTION_NOT_SUPPORTED
"Buffer failed because device power off.", # 0x80002014 PCO_ERROR_DRIVER_BUFFER_SYSTEMOFF
"", "", "", # 0x80002015 - 0x80002017
"", "", "", "", "", "", "", "", # 0x80002018 - 0x8000201F
"A call to a windows-function fails.", # 0x80002020 PCO_ERROR_DRIVER_SYSERR
"", # 0x80002021
"Error in reading/writing to registry.", # 0x80002022 PCO_ERROR_DRIVER_REGERR
"Need newer called vxd or dll.", # 0x80002023 PCO_ERROR_DRIVER_WRONGVERS
"Error while reading from file.", # 0x80002024 PCO_ERROR_DRIVER_FILE_READ_ERR
"Error while writing to file.", # 0x80002025 PCO_ERROR_DRIVER_FILE_WRITE_ERR
"Camera and dll lut do not match.", # 0x80002026 PCO_ERROR_DRIVER_LUT_MISMATCH
"Grabber does not support transfer format.", # 0x80002027 PCO_ERROR_DRIVER_FORMAT_NOT_SUPPORTED
"DMA Error not enough data transferred.", # 0x80002028 PCO_ERROR_DRIVER_BUFFER_DMASIZE
"version verify failed wrong typ id.", # 0x80002029 PCO_ERROR_DRIVER_WRONG_ATMEL_FOUND
"version verify failed wrong size.", # 0x8000202A PCO_ERROR_DRIVER_WRONG_ATMEL_SIZE
"version verify failed wrong device id.", # 0x8000202B PCO_ERROR_DRIVER_WRONG_ATMEL_DEVICE
"firmware is not supported from this driver.", # 0x8000202C PCO_ERROR_DRIVER_WRONG_BOARD
"board firmware verify failed.", # 0x8000202D PCO_ERROR_DRIVER_READ_FLASH_FAILED
"camera head is not recognized correctly.", # 0x8000202E PCO_ERROR_DRIVER_HEAD_VERIFY_FAILED
"firmware does not support camera head.", # 0x8000202F PCO_ERROR_DRIVER_HEAD_BOARD_MISMATCH
"camera head is not connected.", # 0x80002030 PCO_ERROR_DRIVER_HEAD_LOST
"camera head power down.", # 0x80002031 PCO_ERROR_DRIVER_HEAD_POWER_DOWN
"camera started." # 0x80002032 PCO_ERROR_DRIVER_CAMERA_BUSY
"camera busy." # 0x80002033 PCO_ERROR_DRIVER_BUFFERS_PENDING
)
PCO_ERROR_SDKDLL_TXT = (
"No error.", # 0x00000000 PCO_NOERROR
"wSize of an embedded buffer is to small.", # 0x80003001 PCO_ERROR_SDKDLL_NESTEDBUFFERSIZE
"wSize of a buffer is to small.", # 0x80003002 PCO_ERROR_SDKDLL_BUFFERSIZE
"A dialog is not available.", # 0x80003003 PCO_ERROR_SDKDLL_DIALOGNOTAVAILABLE
"Option is not available.", # 0x80003004 PCO_ERROR_SDKDLL_NOTAVAILABLE
"A call to a windows-function fails.", # 0x80003005 PCO_ERROR_SDKDLL_SYSERR
"Memory area is invalid.", # 0x80003006 PCO_ERROR_SDKDLL_BADMEMORY
"", # 0x80003007
"Number of available buffers is exhausted.", # 0x80003008 PCO_ERROR_SDKDLL_BUFCNTEXHAUSTED
"Dialog is already open.", # 0x80003009 PCO_ERROR_SDKDLL_ALREADYOPENED
"Error while destroying dialog.", # 0x8000300A PCO_ERROR_SDKDLL_ERRORDESTROYWND
"A requested buffer is not available.", # 0x8000300B PCO_ERROR_SDKDLL_BUFFERNOTVALID
"The buffer nr is out of range.", # 0x8000300C PCO_ERROR_SDKDLL_WRONGBUFFERNR
"A DLL could not be found.", # 0x8000300D PCO_ERROR_SDKDLL_DLLNOTFOUND
"Buffer already assigned to another buffernr.", # 0x8000300E PCO_ERROR_SDKDLL_BUFALREADYASSIGNED
"Event already assigned to another buffernr.", # 0x8000300F PCO_ERROR_SDKDLL_EVENTALREADYASSIGNED
"Recording must be active.", # 0x80003010 PCO_ERROR_SDKDLL_RECORDINGMUSTBEON
"A DLL could not be found, due to div by zero.", # 0x80003011 PCO_ERROR_SDKDLL_DLLNOTFOUND_DIVZERO
"Buffer is already queued.", # 0x80003012 PCO_ERROR_SDKDLL_BUFFERALREADYQUEUED
"Buffer is not queued.", # 0x80003013 PCO_ERROR_SDKDLL_BUFFERNOTQUEUED
)
PCO_ERROR_APPLICATION_TXT = (
"No error.", # 0x00000000 PCO_NOERROR
"Error while waiting for a picture.", # 0x80004001 PCO_ERROR_APPLICATION_PICTURETIMEOUT
"Error while saving file.", # 0x80004002 PCO_ERROR_APPLICATION_SAVEFILE
"A function inside a DLL could not be found.", # 0x80004003 PCO_ERROR_APPLICATION_FUNCTIONNOTFOUND
"A DLL could not be found.", # 0x80004004 PCO_ERROR_APPLICATION_DLLNOTFOUND
"The board number is out of range.", # 0x80004005 PCO_ERROR_APPLICATION_WRONGBOARDNR
"The decive does not support this function.", # 0x80004006 PCO_ERROR_APPLICATION_FUNCTIONNOTSUPPORTED
"Started Math with different resolution than reference.",# 0x80004007 PCO_ERROR_APPLICATION_WRONGRES
"Disk full.", # 0x80004008 PCO_ERROR_APPLICATION_DISKFULL
"Error setting values to camera.", # 0x80004009 PCO_ERROR_APPLICATION_SET_VALUES
)
PCO_ERROR_FIRMWARE_TXT = (
"No error.", # 0x00000000 PCO_NOERROR
"Timeout in telegram.", # 0x80001001 PCO_ERROR_FIRMWARE_TELETIMEOUT
"Wrong checksum in telegram.", # 0x80001002 PCO_ERROR_FIRMWARE_WRONGCHECKSUM
"No acknowledge.", # 0x80001003 PCO_ERROR_FIRMWARE_NOACK
"Wrong size in array.", # 0x80001004 PCO_ERROR_FIRMWARE_WRONGSIZEARR
"Data is inkonsistent.", # 0x80001005 PCO_ERROR_FIRMWARE_DATAINKONSISTENT
"Unknown command telegram.", # 0x80001006 PCO_ERROR_FIRMWARE_UNKNOWN_COMMAND
"", # 0x80001007
"FPGA init failed.", # 0x80001008 PCO_ERROR_FIRMWARE_INITFAILED
"FPGA configuration failed.", # 0x80001009 PCO_ERROR_FIRMWARE_CONFIGFAILED
"High temperature.", # 0x8000100A PCO_ERROR_FIRMWARE_HIGH_TEMPERATURE
"Supply voltage out of range.", # 0x8000100B PCO_ERROR_FIRMWARE_VOLTAGEOUTOFRANGE
"No response from I2C Device.", # 0x8000100C PCO_ERROR_FIRMWARE_I2CNORESPONSE
"Checksum in code area is wrong.", # 0x8000100D PCO_ERROR_FIRMWARE_CHECKSUMCODEFAILED
"An address is out of range.", # 0x8000100E PCO_ERROR_FIRMWARE_ADDRESSOUTOFRANGE
"No device is open for update.", # 0x8000100F PCO_ERROR_FIRMWARE_NODEVICEOPENED
"The delivered buffer is to small.", # 0x80001010 PCO_ERROR_FIRMWARE_BUFFERTOSMALL
"To much data delivered to function.", # 0x80001011 PCO_ERROR_FIRMWARE_TOMUCHDATA
"Error while writing to camera.", # 0x80001012 PCO_ERROR_FIRMWARE_WRITEERROR
"Error while reading from camera.", # 0x80001013 PCO_ERROR_FIRMWARE_READERROR
"Was not able to render graph.", # 0x80001014 PCO_ERROR_FIRMWARE_NOTRENDERED
"The handle is not known.", # 0x80001015 PCO_ERROR_FIRMWARE_NOHANDLEAVAILABLE
"Value is out of allowed range.", # 0x80001016 PCO_ERROR_FIRMWARE_DATAOUTOFRANGE
"Desired function not possible.", # 0x80001017 PCO_ERROR_FIRMWARE_NOTPOSSIBLE
"SDRAM type read from SPD unknown.", # 0x80001018 PCO_ERROR_FIRMWARE_UNSUPPORTED_SDRAM
"Different SDRAM modules mounted.", # 0x80001019 PCO_ERROR_FIRMWARE_DIFFERENT_SDRAMS
"For CMOS sensor two modules needed.", # 0x8000101A PCO_ERROR_FIRMWARE_ONLY_ONE_SDRAM
"No SDRAM mounted.", # 0x8000101B PCO_ERROR_FIRMWARE_NO_SDRAM_MOUNTED
"Segment size is too large.", # 0x8000101C PCO_ERROR_FIRMWARE_SEGMENTS_TOO_LARGE
"Segment is out of range.", # 0x8000101D PCO_ERROR_FIRMWARE_SEGMENT_OUT_OF_RANGE
"Value is out of range.", # 0x8000101E PCO_ERROR_FIRMWARE_VALUE_OUT_OF_RANGE
"Image read not possible.", # 0x8000101F PCO_ERROR_FIRMWARE_IMAGE_READ_NOT_POSSIBLE
"Command/data not supported by this hardware.", # 0x80001020 PCO_ERROR_FIRMWARE_NOT_SUPPORTED
"Starting record failed due not armed.", # 0x80001021 PCO_ERROR_FIRMWARE_ARM_NOT_SUCCESSFUL
"Arm is not possible while record active.", # 0x80001022 PCO_ERROR_FIRMWARE_RECORD_MUST_BE_OFF
"", # 0x80001023
"", # 0x80001024
"Segment too small for image.", # 0x80001025 PCO_ERROR_FIRMWARE_SEGMENT_TOO_SMALL
"COC built is too large for internal memory.", # 0x80001026 PCO_ERROR_FIRMWARE_COC_BUFFER_TO_SMALL
"COC has invalid data at fix position.", # 0x80001027 PCO_ERROR_FIRMWARE_COC_DATAINKONSISTENT
"Correction data not valid.", # 0x80001028 PCO_ERROR_FIRMWARE_CORRECTION_DATA_INVALID
"CCD calibration not finished.", # 0x80001029 PCO_ERROR_FIRMWARE_CCDCAL_NOT_FINISHED
"", # 0x8000102A
"", # 0x8000102B
"", # 0x8000102C
"", # 0x8000102D
"", # 0x8000102E
"", # 0x8000102F
"COC Trigger setting invalid.", # 0x80001030 PCO_ERROR_FIRMWARE_COC_TRIGGER_INVALID
"COC PixelRate setting invalid.", # 0x80001031 PCO_ERROR_FIRMWARE_COC_PIXELRATE_INVALID
"COC Powerdown setting invalid.", # 0x80001032 PCO_ERROR_FIRMWARE_COC_POWERDOWN_INVALID
"COC Sensorformat setting invalid.", # 0x80001033 PCO_ERROR_FIRMWARE_COC_SENSORFORMAT_INVALID
"COC ROI to Binning setting invalid.", # 0x80001034 PCO_ERROR_FIRMWARE_COC_ROI_BINNING_INVALID
"COC ROI to Double setting invalid.", # 0x80001035 PCO_ERROR_FIRMWARE_COC_ROI_DOUBLE_INVALID
"COC Mode setting invalid.", # 0x80001036 PCO_ERROR_FIRMWARE_COC_MODE_INVALID
"COC Delay setting invalid.", # 0x80001037 PCO_ERROR_FIRMWARE_COC_DELAY_INVALID
"COC Exposure setting invalid.", # 0x80001038 PCO_ERROR_FIRMWARE_COC_EXPOS_INVALID
"COC Timebase setting invalid.", # 0x80001039 PCO_ERROR_FIRMWARE_COC_TIMEBASE_INVALID
"", "", "", "", "", "", # 0x8000103A - 0x8000103F
"COC modulate period time invalid.", # 0x80001040 PCO_ERROR_FIRMWARE_COC_PERIOD_INVALID
"COC modulate monitor time invalid", # 0x80001041 PCO_ERROR_FIRMWARE_COC_MONITOR_INVALID
"", "", "", "", "", "", # 0x80001042 - 0x80001047
"", "", "", "", "", "", "", "", # 0x80001048 - 0x8000104F
"Attempt to open unknown device for update.", # 0x80001050 PCO_ERROR_FIRMWARE_UNKNOWN_DEVICE
"Attempt to open device not available.", # 0x80001051 PCO_ERROR_FIRMWARE_DEVICE_NOT_AVAIL
"This or other device is already open.", # 0x80001052 PCO_ERROR_FIRMWARE_DEVICE_IS_OPEN
"No device opened for update command.", # 0x80001053 PCO_ERROR_FIRMWARE_DEVICE_NOT_OPEN
"Device to open does not respond.", # 0x80001054 PCO_ERROR_FIRMWARE_NO_DEVICE_RESPONSE
"Device to open is wrong device type.", # 0x80001055 PCO_ERROR_FIRMWARE_WRONG_DEVICE_TYPE
"Erasing device flash/firmware failed.", # 0x80001056 PCO_ERROR_FIRMWARE_ERASE_FLASH_FAILED
"Device to program is not blank.", # 0x80001057 PCO_ERROR_FIRMWARE_DEVICE_NOT_BLANK
"Device address is out of range.", # 0x80001058 PCO_ERROR_FIRMWARE_ADDRESS_OUT_OF_RANGE
"Programming device flash/firmware failed.", # 0x80001059 PCO_ERROR_FIRMWARE_PROG_FLASH_FAILED
"Programming device EEPROM failed.", # 0x8000105A PCO_ERROR_FIRMWARE_PROG_EEPROM_FAILED
"Reading device flash/firmware failed.", # 0x8000105B PCO_ERROR_FIRMWARE_READ_FLASH_FAILED
"Reading device EEPROM failed.", # 0x8000105C PCO_ERROR_FIRMWARE_READ_EEPROM_FAILED
"", "", "", # 0x8000105D - 0x8000105F
"", "", "", "", "", "", "", "", # 0x80001060 - 0x80001067
"", "", "", "", "", "", "", "", # 0x80001068 - 0x8000106F
"", "", "", "", "", "", "", "", # 0x80001070 - 0x80001077
"", "", "", "", "", "", "", "", # 0x80001078 - 0x8000107F
"Command is invalid.", # 0x80001080 PCO_ERROR_FIRMWARE_GIGE_COMMAND_IS_INVALID
"Camera UART not operational.", # 0x80001081 PCO_ERROR_FIRMWARE_GIGE_UART_NOT_OPERATIONAL
"Access denied. Debugging? See pco_err.h!", # 0x80001082 PCO_ERROR_FIRMWARE_GIGE_ACCESS_DENIED
"Command unknown.", # 0x80001083 PCO_ERROR_FIRMWARE_GIGE_COMMAND_UNKNOWN
"Command group unknown.", # 0x80001084 PCO_ERROR_FIRMWARE_GIGE_COMMAND_GROUP_UNKNOWN
"Invalid command parameters.", # 0x80001085 PCO_ERROR_FIRMWARE_GIGE_INVALID_COMMAND_PARAMETERS
"Internal error.", # 0x80001086 PCO_ERROR_FIRMWARE_GIGE_INTERNAL_ERROR
"Interface blocked.", # 0x80001087 PCO_ERROR_FIRMWARE_GIGE_INTERFACE_BLOCKED
"Invalid session.", # 0x80001088 PCO_ERROR_FIRMWARE_GIGE_INVALID_SESSION
"Bad offset.", # 0x80001089 PCO_ERROR_FIRMWARE_GIGE_BAD_OFFSET
"NV write in progress.", # 0x8000108a PCO_ERROR_FIRMWARE_GIGE_NV_WRITE_IN_PROGRESS
"Download block lost.", # 0x8000108b PCO_ERROR_FIRMWARE_GIGE_DOWNLOAD_BLOCK_LOST
"Flash loader block invalid.", # 0x8000108c PCO_ERROR_FIRMWARE_GIGE_DOWNLOAD_INVALID_LDR
"", "", "", # 0x8000108d - 0x8000108F
"Image packet lost.", # 0x80001090 PCO_ERROR_FIRMWARE_GIGE_DRIVER_IMG_PKT_LOST
"GiGE Data bandwidth conflict.", # 0x80001091 PCO_ERROR_FIRMWARE_GIGE_BANDWIDTH_CONFLICT
"", "", "", "", "", # 0x80001092 - 0x80001096
"", "", "", "", "", # 0x80001097 - 0x8000109B
"", "", "", "", # 0x8000109C - 0x8000109F
"External modulation frequency out of range.", # 0x80001100 PCO_ERROR_FIRMWARE_FLICAM_EXT_MOD_OUT_OF_RANGE
"Sync PLL not locked." # 0x80001101 PCO_ERROR_FIRMWARE_FLICAM_SYNC_PLL_NOT_LOCKED
)
|
|
from __future__ import print_function
import functools
import itertools
import re
import sys
import warnings
import numpy as np
import numba.unittest_support as unittest
from numba import types, typing, utils, typeof, numpy_support, njit
from numba.compiler import compile_isolated, Flags, DEFAULT_FLAGS
from numba.numpy_support import from_dtype
from numba import vectorize
from numba.config import PYVERSION
from numba.errors import LoweringError, TypingError
from .support import (TestCase, CompilationCache, skip_on_numpy_16,
is_on_numpy_16, MemoryLeakMixin)
from numba.typing.npydecl import supported_ufuncs, all_ufuncs
is32bits = tuple.__itemsize__ == 4
iswindows = sys.platform.startswith('win32')
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
enable_nrt_flags = Flags()
enable_nrt_flags.set("nrt")
def _unimplemented(func):
"""An 'expectedFailure' like decorator that only expects compilation errors
caused by unimplemented functions that fail in no-python mode"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except TypingError:
raise unittest._ExpectedFailure(sys.exc_info())
raise unittest._UnexpectedSuccess
def _make_ufunc_usecase(ufunc):
ldict = {}
arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)])
func_str = 'def fn({0}):\n np.{1}({0})'.format(arg_str, ufunc.__name__)
exec(func_str, globals(), ldict)
fn = ldict['fn']
fn.__name__ = '{0}_usecase'.format(ufunc.__name__)
return fn
def _make_unary_ufunc_usecase(ufunc):
ufunc_name = ufunc.__name__
ldict = {}
exec("def fn(x,out):\n np.{0}(x,out)".format(ufunc_name), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "{0}_usecase".format(ufunc_name)
return fn
def _make_unary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x):\n return {0}(x)".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_binary_ufunc_usecase(ufunc):
ufunc_name = ufunc.__name__
ldict = {}
exec("def fn(x,y,out):\n np.{0}(x,y,out)".format(ufunc_name), globals(), ldict);
fn = ldict['fn']
fn.__name__ = "{0}_usecase".format(ufunc_name)
return fn
def _make_binary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x,y):\n return x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _as_dtype_value(tyargs, args):
"""Convert python values into numpy scalar objects.
"""
return [np.dtype(str(ty)).type(val) for ty, val in zip(tyargs, args)]
class TestUFuncs(MemoryLeakMixin, TestCase):
def setUp(self):
super(TestUFuncs, self).setUp()
self.inputs = [
(np.uint32(0), types.uint32),
(np.uint32(1), types.uint32),
(np.int32(-1), types.int32),
(np.int32(0), types.int32),
(np.int32(1), types.int32),
(np.uint64(0), types.uint64),
(np.uint64(1), types.uint64),
(np.int64(-1), types.int64),
(np.int64(0), types.int64),
(np.int64(1), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(0.0), types.float32),
(np.float32(0.5), types.float32),
(np.float64(-0.5), types.float64),
(np.float64(0.0), types.float64),
(np.float64(0.5), types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C')),
]
self.cache = CompilationCache()
def _determine_output_type(self, input_type, int_output_type=None,
float_output_type=None):
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
return output_type
def unary_ufunc_test(self, ufunc, flags=enable_pyobj_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None):
# Necessary to avoid some Numpy warnings being silenced, despite
# the simplefilter() call below.
self.reset_module_warnings(__name__)
ufunc = _make_unary_ufunc_usecase(ufunc)
inputs = list(self.inputs)
inputs.extend(additional_inputs)
pyfunc = ufunc
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
if input_type in skip_inputs:
continue
output_type = self._determine_output_type(
input_type, int_output_type, float_output_type)
cr = self.cache.compile(pyfunc, (input_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input_operand, np.ndarray):
result = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
invalid_flag = False
with warnings.catch_warnings(record=True) as warnlist:
warnings.simplefilter('always')
pyfunc(input_operand, expected)
warnmsg = "invalid value encountered"
for thiswarn in warnlist:
if (issubclass(thiswarn.category, RuntimeWarning)
and str(thiswarn.message).startswith(warnmsg)):
invalid_flag = True
cfunc(input_operand, result)
# Need special checks if NaNs are in results
if np.isnan(expected).any() or np.isnan(result).any():
self.assertTrue(np.allclose(np.isnan(result), np.isnan(expected)))
if not np.isnan(expected).all() and not np.isnan(result).all():
self.assertTrue(np.allclose(result[np.invert(np.isnan(result))],
expected[np.invert(np.isnan(expected))]))
else:
match = np.all(result == expected) or np.allclose(result,
expected)
if not match:
if invalid_flag:
# Allow output to mismatch for invalid input
print("Output mismatch for invalid input",
input_tuple, result, expected)
else:
msg = '\n'.join(["ufunc '{0}' failed",
"inputs ({1}):", "{2}",
"got({3})", "{4}",
"expected ({5}):", "{6}"
]).format(ufunc.__name__,
input_type, input_operand,
output_type, result,
expected.dtype, expected)
self.fail(msg)
def unary_op_test(self, operator, flags=enable_nrt_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None):
operator_func = _make_unary_ufunc_op_usecase(operator)
inputs = list(self.inputs)
inputs.extend(additional_inputs)
pyfunc = operator_func
for input_tuple in inputs:
input_operand, input_type = input_tuple
if ((input_type in skip_inputs) or
(not isinstance(input_type, types.Array))):
continue
cr = self.cache.compile(pyfunc, (input_type,),
flags=flags)
cfunc = cr.entry_point
expected = pyfunc(input_operand)
result = cfunc(input_operand)
np.testing.assert_array_almost_equal(expected, result)
def binary_ufunc_test(self, ufunc, flags=enable_pyobj_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None):
ufunc = _make_binary_ufunc_usecase(ufunc)
inputs = list(self.inputs) + additional_inputs
pyfunc = ufunc
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
if input_type in skip_inputs:
continue
output_type = self._determine_output_type(
input_type, int_output_type, float_output_type)
cr = self.cache.compile(pyfunc, (input_type, input_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input_operand, np.ndarray):
result = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input_operand, input_operand, result)
pyfunc(input_operand, input_operand, expected)
np.testing.assert_array_almost_equal(expected, result)
def binary_op_test(self, operator, flags=enable_nrt_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None,
positive_rhs=False):
operator_func = _make_binary_ufunc_op_usecase(operator)
inputs = list(self.inputs)
inputs.extend(additional_inputs)
pyfunc = operator_func
for input_tuple in inputs:
input_operand1, input_type = input_tuple
input_dtype = numpy_support.as_dtype(
getattr(input_type, "dtype", input_type))
input_type1 = input_type
if input_type in skip_inputs:
continue
if positive_rhs:
zero = np.zeros(1, dtype=input_dtype)[0]
# If we only use two scalars, the code generator will not
# select the ufunctionalized operator, so we mix it up.
if isinstance(input_type, types.Array):
input_operand0 = input_operand1
input_type0 = input_type
if positive_rhs and np.any(input_operand1 < zero):
continue
else:
input_operand0 = (np.random.random(10) * 100).astype(
input_dtype)
input_type0 = typeof(input_operand0)
if positive_rhs and input_operand1 < zero:
continue
cr = self.cache.compile(pyfunc, (input_type0, input_type1),
flags=flags)
cfunc = cr.entry_point
expected = pyfunc(input_operand0, input_operand1)
result = cfunc(input_operand0, input_operand1)
np.testing.assert_array_almost_equal(expected, result)
def binary_int_op_test(self, *args, **kws):
if 'skip_inputs' not in kws:
kws['skip_inputs'] = []
kws['skip_inputs'].extend([
types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
return self.binary_op_test(*args, **kws)
def unary_int_ufunc_test(self, name=None, flags=enable_pyobj_flags):
self.unary_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
def binary_int_ufunc_test(self, name=None, flags=enable_pyobj_flags):
self.binary_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
############################################################################
# Math operations
def test_add_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.add, flags=flags)
def test_add_ufunc_npm(self):
self.test_add_ufunc(flags=no_pyobj_flags)
def test_subtract_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.subtract, flags=flags)
def test_subtract_ufunc_npm(self):
self.test_subtract_ufunc(flags=no_pyobj_flags)
def test_multiply_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.multiply, flags=flags)
def test_multiply_ufunc_npm(self):
self.test_multiply_ufunc(flags=no_pyobj_flags)
def test_divide_ufunc(self, flags=enable_pyobj_flags):
# Bear in mind that in python3 divide IS true_divide
# so the out type for int types will be a double
int_out_type = None
if PYVERSION >= (3, 0):
int_out_type = types.float64
self.binary_ufunc_test(np.divide, flags=flags, int_output_type=int_out_type)
def test_divide_ufunc_npm(self):
self.test_divide_ufunc(flags=no_pyobj_flags)
def test_logaddexp_ufunc(self):
self.binary_ufunc_test(np.logaddexp)
def test_logaddexp_ufunc_npm(self):
self.binary_ufunc_test(np.logaddexp, flags=no_pyobj_flags)
def test_logaddexp2_ufunc(self):
self.binary_ufunc_test(np.logaddexp2)
def test_logaddexp2_ufunc_npm(self):
self.binary_ufunc_test(np.logaddexp2, flags=no_pyobj_flags)
def test_true_divide_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.true_divide, flags=flags, int_output_type=types.float64)
def test_true_divide_ufunc_npm(self):
self.test_true_divide_ufunc(flags=no_pyobj_flags)
def test_floor_divide_ufunc(self):
self.binary_ufunc_test(np.floor_divide)
def test_floor_divide_ufunc_npm(self):
self.binary_ufunc_test(np.floor_divide, flags=no_pyobj_flags)
def test_negative_ufunc(self, flags=enable_pyobj_flags):
# NumPy ufunc has bug with uint32 as input and int64 as output,
# so skip uint32 input.
self.unary_ufunc_test(np.negative, int_output_type=types.int64,
skip_inputs=[types.Array(types.uint32, 1, 'C'), types.uint32],
flags=flags)
def test_negative_ufunc_npm(self):
self.test_negative_ufunc(flags=no_pyobj_flags)
def test_power_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.power, flags=flags)
def test_power_ufunc_npm(self):
self.test_power_ufunc(flags=no_pyobj_flags)
def test_remainder_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.remainder, flags=flags)
def test_remainder_ufunc_npm(self):
self.test_remainder_ufunc(flags=no_pyobj_flags)
def test_mod_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.mod, flags=flags)
def test_mod_ufunc_npm(self):
self.test_mod_ufunc(flags=no_pyobj_flags)
def test_fmod_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.fmod, flags=flags)
def test_fmod_ufunc_npm(self):
self.test_fmod_ufunc(flags=no_pyobj_flags)
def test_abs_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.abs, flags=flags,
additional_inputs = [(np.iinfo(np.uint32).max, types.uint32),
(np.iinfo(np.uint64).max, types.uint64),
(np.finfo(np.float32).min, types.float32),
(np.finfo(np.float64).min, types.float64)
])
def test_abs_ufunc_npm(self):
self.test_abs_ufunc(flags=no_pyobj_flags)
def test_absolute_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.absolute, flags=flags,
additional_inputs = [(np.iinfo(np.uint32).max, types.uint32),
(np.iinfo(np.uint64).max, types.uint64),
(np.finfo(np.float32).min, types.float32),
(np.finfo(np.float64).min, types.float64)
])
def test_absolute_ufunc_npm(self):
self.test_absolute_ufunc(flags=no_pyobj_flags)
def test_fabs_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.fabs, flags=flags)
def test_fabs_ufunc_npm(self):
self.test_fabs_ufunc(flags=no_pyobj_flags)
def test_rint_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.rint, flags=flags)
def test_rint_ufunc_npm(self):
self.test_rint_ufunc(flags=no_pyobj_flags)
def test_sign_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.sign, flags=flags)
def test_sign_ufunc_npm(self):
self.test_sign_ufunc(flags=no_pyobj_flags)
def test_conj_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.conj, flags=flags)
def test_conj_ufunc_npm(self):
self.test_conj_ufunc(flags=no_pyobj_flags)
def test_exp_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.exp, flags=flags)
def test_exp_ufunc_npm(self):
self.test_exp_ufunc(flags=no_pyobj_flags)
def test_exp2_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.exp2, flags=flags)
def test_exp2_ufunc_npm(self):
self.test_exp2_ufunc(flags=no_pyobj_flags)
def test_log_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.log, flags=flags)
def test_log_ufunc_npm(self):
self.test_log_ufunc(flags=no_pyobj_flags)
def test_log2_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.log2, flags=flags)
def test_log2_ufunc_npm(self):
self.test_log2_ufunc(flags=no_pyobj_flags)
def test_log10_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.log10, flags=flags)
def test_log10_ufunc_npm(self):
self.test_log10_ufunc(flags=no_pyobj_flags)
def test_expm1_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.expm1, flags=flags)
def test_expm1_ufunc_npm(self):
self.test_expm1_ufunc(flags=no_pyobj_flags)
def test_log1p_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.log1p, flags=flags)
def test_log1p_ufunc_npm(self):
self.test_log1p_ufunc(flags=no_pyobj_flags)
def test_sqrt_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.sqrt, flags=flags)
def test_sqrt_ufunc_npm(self):
self.test_sqrt_ufunc(flags=no_pyobj_flags)
def test_square_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.square, flags=flags)
def test_square_ufunc_npm(self):
self.test_square_ufunc(flags=no_pyobj_flags)
def test_reciprocal_ufunc(self, flags=enable_pyobj_flags):
# reciprocal for integers doesn't make much sense and is problematic
# in the case of division by zero, as an inf will overflow float to
# int conversions, which is undefined behavior.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.unary_ufunc_test(np.reciprocal, skip_inputs=to_skip, flags=flags)
def test_reciprocal_ufunc_npm(self):
self.test_reciprocal_ufunc(flags=no_pyobj_flags)
def test_conjugate_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.conjugate, flags=flags)
def test_conjugate_ufunc_npm(self):
self.test_conjugate_ufunc(flags=no_pyobj_flags)
############################################################################
# Trigonometric Functions
def test_sin_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.sin, flags=flags)
def test_sin_ufunc_npm(self):
self.test_sin_ufunc(flags=no_pyobj_flags)
def test_cos_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.cos, flags=flags)
def test_cos_ufunc_npm(self):
self.test_cos_ufunc(flags=no_pyobj_flags)
def test_tan_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.tan, flags=flags)
def test_tan_ufunc_npm(self):
self.test_tan_ufunc(flags=no_pyobj_flags)
def test_arcsin_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.arcsin, flags=flags)
def test_arcsin_ufunc_npm(self):
self.test_arcsin_ufunc(flags=no_pyobj_flags)
def test_arccos_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.arccos, flags=flags)
def test_arccos_ufunc_npm(self):
self.test_arccos_ufunc(flags=no_pyobj_flags)
def test_arctan_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.arctan, flags=flags)
def test_arctan_ufunc_npm(self):
self.test_arctan_ufunc(flags=no_pyobj_flags)
def test_arctan2_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.arctan2, flags=flags)
def test_arctan2_ufunc_npm(self):
self.test_arctan2_ufunc(flags=no_pyobj_flags)
def test_hypot_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.hypot)
def test_hypot_ufunc_npm(self):
self.test_hypot_ufunc(flags=no_pyobj_flags)
def test_sinh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.sinh, flags=flags)
def test_sinh_ufunc_npm(self):
self.test_sinh_ufunc(flags=no_pyobj_flags)
def test_cosh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.cosh, flags=flags)
def test_cosh_ufunc_npm(self):
self.test_cosh_ufunc(flags=no_pyobj_flags)
def test_tanh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.tanh, flags=flags)
def test_tanh_ufunc_npm(self):
self.test_tanh_ufunc(flags=no_pyobj_flags)
def test_arcsinh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.arcsinh, flags=flags)
def test_arcsinh_ufunc_npm(self):
self.test_arcsinh_ufunc(flags=no_pyobj_flags)
def test_arccosh_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.arccosh, flags=flags)
def test_arccosh_ufunc_npm(self):
self.test_arccosh_ufunc(flags=no_pyobj_flags)
def test_arctanh_ufunc(self, flags=enable_pyobj_flags):
# arctanh is only valid is only finite in the range ]-1, 1[
# This means that for any of the integer types it will produce
# conversion from infinity/-infinity to integer. That's undefined
# behavior in C, so the results may vary from implementation to
# implementation. This means that the result from the compiler
# used to compile NumPy may differ from the result generated by
# llvm. Skipping the integer types in this test avoids failed
# tests because of this.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.unary_ufunc_test(np.arctanh, skip_inputs=to_skip, flags=flags)
def test_arctanh_ufunc_npm(self):
self.test_arctanh_ufunc(flags=no_pyobj_flags)
def test_deg2rad_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.deg2rad, flags=flags)
def test_deg2rad_ufunc_npm(self):
self.test_deg2rad_ufunc(flags=no_pyobj_flags)
def test_rad2deg_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.rad2deg, flags=flags)
def test_rad2deg_ufunc_npm(self):
self.test_rad2deg_ufunc(flags=no_pyobj_flags)
def test_degrees_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.degrees, flags=flags)
def test_degrees_ufunc_npm(self):
self.test_degrees_ufunc(flags=no_pyobj_flags)
def test_radians_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.radians, flags=flags)
def test_radians_ufunc_npm(self):
self.test_radians_ufunc(flags=no_pyobj_flags)
############################################################################
# Bit-twiddling Functions
def test_bitwise_and_ufunc(self, flags=enable_pyobj_flags):
self.binary_int_ufunc_test(np.bitwise_and, flags=flags)
def test_bitwise_and_ufunc_npm(self):
self.test_bitwise_and_ufunc(flags=no_pyobj_flags)
def test_bitwise_or_ufunc(self, flags=enable_pyobj_flags):
self.binary_int_ufunc_test(np.bitwise_or, flags=flags)
def test_bitwise_or_ufunc_npm(self):
self.test_bitwise_or_ufunc(flags=no_pyobj_flags)
def test_bitwise_xor_ufunc(self, flags=enable_pyobj_flags):
self.binary_int_ufunc_test(np.bitwise_xor, flags=flags)
def test_bitwise_xor_ufunc_npm(self):
self.test_bitwise_xor_ufunc(flags=no_pyobj_flags)
def test_invert_ufunc(self, flags=enable_pyobj_flags):
self.unary_int_ufunc_test(np.invert, flags=flags)
def test_invert_ufunc_npm(self):
self.test_invert_ufunc(flags=no_pyobj_flags)
def test_bitwise_not_ufunc(self, flags=enable_pyobj_flags):
self.unary_int_ufunc_test(np.bitwise_not, flags=flags)
def test_bitwise_not_ufunc_npm(self):
self.test_bitwise_not_ufunc(flags=no_pyobj_flags)
# Note: there is no entry for left_shift and right_shift as this harness
# is not valid for them. This is so because left_shift and right
# shift implementation in NumPy has undefined behavior (in C-parlance)
# when the second argument is a negative (or bigger than the number
# of bits) value.
# Also, right_shift for negative first arguments also relies on
# implementation defined behavior, although numba warantees "sane"
# behavior (arithmetic shifts on signed integers, logic shifts on
# unsigned integers).
############################################################################
# Comparison functions
def test_greater_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.greater, flags=flags)
def test_greater_ufunc_npm(self):
self.test_greater_ufunc(flags=no_pyobj_flags)
def test_greater_equal_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.greater_equal, flags=flags)
def test_greater_equal_ufunc_npm(self):
self.test_greater_equal_ufunc(flags=no_pyobj_flags)
def test_less_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.less, flags=flags)
def test_less_ufunc_npm(self):
self.test_less_ufunc(flags=no_pyobj_flags)
def test_less_equal_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.less_equal, flags=flags)
def test_less_equal_ufunc_npm(self):
self.test_less_equal_ufunc(flags=no_pyobj_flags)
def test_not_equal_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.not_equal, flags=flags)
def test_not_equal_ufunc_npm(self):
self.test_not_equal_ufunc(flags=no_pyobj_flags)
def test_equal_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.equal, flags=flags)
def test_equal_ufunc_npm(self):
self.test_equal_ufunc(flags=no_pyobj_flags)
def test_logical_and_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.logical_and, flags=flags)
def test_logical_and_ufunc_npm(self):
self.test_logical_and_ufunc(flags=no_pyobj_flags)
def test_logical_or_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.logical_or, flags=flags)
def test_logical_or_ufunc_npm(self):
self.test_logical_or_ufunc(flags=no_pyobj_flags)
def test_logical_xor_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.logical_xor, flags=flags)
def test_logical_xor_ufunc_npm(self):
self.test_logical_xor_ufunc(flags=no_pyobj_flags)
def test_logical_not_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.logical_not, flags=flags)
def test_logical_not_ufunc_npm(self):
self.test_logical_not_ufunc(flags=no_pyobj_flags)
def test_maximum_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.maximum, flags=flags)
def test_maximum_ufunc_npm(self):
self.test_maximum_ufunc(flags=no_pyobj_flags)
def test_minimum_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.minimum, flags=flags)
def test_minimum_ufunc_npm(self):
self.test_minimum_ufunc(flags=no_pyobj_flags)
def test_fmax_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.fmax, flags=flags)
def test_fmax_ufunc_npm(self):
self.test_fmax_ufunc(flags=no_pyobj_flags)
def test_fmin_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.fmin, flags=flags)
def test_fmin_ufunc_npm(self):
self.test_fmin_ufunc(flags=no_pyobj_flags)
############################################################################
# Floating functions
def test_isfinite_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.isfinite, flags=flags)
def test_isfinite_ufunc_npm(self):
self.test_isfinite_ufunc(flags=no_pyobj_flags)
def test_isinf_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.isinf, flags=flags)
def test_isinf_ufunc_npm(self):
self.test_isinf_ufunc(flags=no_pyobj_flags)
def test_isnan_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.isnan, flags=flags)
def test_isnan_ufunc_npm(self):
self.test_isnan_ufunc(flags=no_pyobj_flags)
def test_signbit_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.signbit, flags=flags)
def test_signbit_ufunc_npm(self):
self.test_signbit_ufunc(flags=no_pyobj_flags)
def test_copysign_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.copysign, flags=flags)
def test_copysign_ufunc_npm(self):
self.test_copysign_ufunc(flags=no_pyobj_flags)
def test_nextafter_ufunc(self, flags=enable_pyobj_flags):
self.binary_ufunc_test(np.nextafter, flags=flags)
def test_nextafter_ufunc_npm(self):
self.test_nextafter_ufunc(flags=no_pyobj_flags)
def test_modf_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.modf, flags=flags)
@_unimplemented
def test_modf_ufunc_npm(self):
self.test_modf_ufunc(flags=no_pyobj_flags)
# Note: there is no entry for ldexp as this harness isn't valid for this
# ufunc. this is so because ldexp requires heterogeneous inputs.
# However, this ufunc is tested by the TestLoopTypes test classes.
def test_frexp_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.frexp, flags=flags)
@_unimplemented
def test_frexp_ufunc_npm(self):
self.test_frexp_ufunc(flags=no_pyobj_flags)
def test_floor_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.floor, flags=flags)
def test_floor_ufunc_npm(self):
self.test_floor_ufunc(flags=no_pyobj_flags)
def test_ceil_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.ceil, flags=flags)
def test_ceil_ufunc_npm(self):
self.test_ceil_ufunc(flags=no_pyobj_flags)
def test_trunc_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.trunc, flags=flags)
def test_trunc_ufunc_npm(self):
self.test_trunc_ufunc(flags=no_pyobj_flags)
def test_spacing_ufunc(self, flags=enable_pyobj_flags):
self.unary_ufunc_test(np.spacing, flags=flags)
def test_spacing_ufunc_npm(self):
self.test_spacing_ufunc(flags=no_pyobj_flags)
############################################################################
# Other tests
def test_binary_ufunc_performance(self):
pyfunc = _make_binary_ufunc_usecase(np.add)
arraytype = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, arraytype, arraytype))
cfunc = cr.entry_point
nelem = 5000
x_operand = np.arange(nelem, dtype=np.float32)
y_operand = np.arange(nelem, dtype=np.float32)
control = np.empty_like(x_operand)
result = np.empty_like(x_operand)
def bm_python():
pyfunc(x_operand, y_operand, control)
def bm_numba():
cfunc(x_operand, y_operand, result)
print(utils.benchmark(bm_python, maxsec=.1))
print(utils.benchmark(bm_numba, maxsec=.1))
assert np.allclose(control, result)
def binary_ufunc_mixed_types_test(self, ufunc, flags=enable_pyobj_flags):
ufunc_name = ufunc.__name__
ufunc = _make_binary_ufunc_usecase(ufunc)
inputs1 = [
(1, types.uint64),
(-1, types.int64),
(0.5, types.float64),
(np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
inputs2 = inputs1
output_types = [types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 1, 'C')]
pyfunc = ufunc
for input1, input2, output_type in itertools.product(inputs1, inputs2, output_types):
input1_operand = input1[0]
input1_type = input1[1]
input2_operand = input2[0]
input2_type = input2[1]
# Skip division by unsigned int because of NumPy bugs
if ufunc_name == 'divide' and (input2_type == types.Array(types.uint32, 1, 'C') or
input2_type == types.Array(types.uint64, 1, 'C')):
continue
# Skip some subtraction tests because of NumPy bugs
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint32 and types.Array(types.int64, 1, 'C'):
continue
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint64 and types.Array(types.int64, 1, 'C'):
continue
if ((isinstance(input1_type, types.Array) or
isinstance(input2_type, types.Array)) and
not isinstance(output_type, types.Array)):
continue
cr = self.cache.compile(pyfunc,
(input1_type, input2_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input1_operand, np.ndarray):
result = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
elif isinstance(input2_operand, np.ndarray):
result = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input1_operand, input2_operand, result)
pyfunc(input1_operand, input2_operand, expected)
# Need special checks if NaNs are in results
if np.isnan(expected).any() or np.isnan(result).any():
self.assertTrue(np.allclose(np.isnan(result), np.isnan(expected)))
if not np.isnan(expected).all() and not np.isnan(result).all():
self.assertTrue(np.allclose(result[np.invert(np.isnan(result))],
expected[np.invert(np.isnan(expected))]))
else:
self.assertTrue(np.all(result == expected) or
np.allclose(result, expected))
def test_mixed_types(self):
self.binary_ufunc_mixed_types_test(np.divide, flags=no_pyobj_flags)
def test_broadcasting(self):
# Test unary ufunc
pyfunc = _make_unary_ufunc_usecase(np.negative)
input_operands = [
np.arange(3, dtype='i8'),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3*3, dtype='i8').reshape(3,3)]
output_operands = [
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3)]
for x, result in zip(input_operands, output_operands):
input_type = types.Array(types.uint64, x.ndim, 'C')
output_type = types.Array(types.int64, result.ndim, 'C')
cr = self.cache.compile(pyfunc, (input_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.zeros(result.shape, dtype=result.dtype)
np.negative(x, expected)
cfunc(x, result)
self.assertTrue(np.all(result == expected))
# Test binary ufunc
pyfunc = _make_binary_ufunc_usecase(np.add)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
output_type = types.Array(types.uint64, max(x.ndim, y.ndim), 'C')
cr = self.cache.compile(pyfunc, (input1_type, input2_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = np.zeros(expected.shape, dtype='u8')
cfunc(x, y, result)
self.assertTrue(np.all(result == expected))
def test_implicit_output_npm(self):
with self.assertRaises(TypeError):
def myadd(a0, a1):
return np.add(a0, a1)
arr_ty = types.Array(types.uint64, 1, 'C')
cr = compile_isolated(myadd, (arr_ty, arr_ty),
flags=no_pyobj_flags)
def test_broadcast_implicit_output_npm_nrt(self):
def pyfunc(a0, a1):
return np.add(a0, a1)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
cr = self.cache.compile(pyfunc, (input1_type, input2_type),
flags=enable_nrt_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = cfunc(x, y)
np.testing.assert_array_equal(expected, result)
def test_implicit_output_layout(self):
def pyfunc(a0, a1):
return np.add(a0, a1)
X = np.linspace(0, 1, 20).reshape(4, 5)
Y = np.array(X, order='F')
Xty = typeof(X)
assert X.flags.c_contiguous and Xty.layout == 'C'
Yty = typeof(Y)
assert Y.flags.f_contiguous and Yty.layout == 'F'
cr0 = self.cache.compile(pyfunc, (Xty, Yty), flags=enable_nrt_flags)
expected0 = np.add(X, Y)
result0 = cr0.entry_point(X, Y)
self.assertEqual(expected0.flags.c_contiguous,
result0.flags.c_contiguous)
self.assertEqual(expected0.flags.f_contiguous,
result0.flags.f_contiguous)
np.testing.assert_array_equal(expected0, result0)
cr1 = self.cache.compile(pyfunc, (Yty, Yty), flags=enable_nrt_flags)
expected1 = np.add(Y, Y)
result1 = cr1.entry_point(Y, Y)
self.assertEqual(expected1.flags.c_contiguous,
result1.flags.c_contiguous)
self.assertEqual(expected1.flags.f_contiguous,
result1.flags.f_contiguous)
np.testing.assert_array_equal(expected1, result1)
# ____________________________________________________________
# Array operators
def test_unary_positive_array_op(self):
self.unary_op_test('+')
def test_unary_negative_array_op(self):
self.unary_op_test('-')
def test_unary_invert_array_op(self):
self.unary_op_test('~', skip_inputs=[
types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
def test_add_array_op(self):
self.binary_op_test('+')
def test_subtract_array_op(self):
self.binary_op_test('-')
def test_multiply_array_op(self):
self.binary_op_test('*')
def test_divide_array_op(self):
int_out_type = None
if PYVERSION >= (3, 0):
int_out_type = types.float64
self.binary_op_test('/', int_output_type=int_out_type)
def test_floor_divide_array_op(self):
self.binary_op_test('//')
def test_remainder_array_op(self):
self.binary_op_test('%')
def test_power_array_op(self):
self.binary_op_test('**')
def test_left_shift_array_op(self):
self.binary_int_op_test('<<', positive_rhs=True)
def test_right_shift_array_op(self):
self.binary_int_op_test('>>', positive_rhs=True)
def test_bitwise_and_array_op(self):
self.binary_int_op_test('&')
def test_bitwise_or_array_op(self):
self.binary_int_op_test('|')
def test_bitwise_xor_array_op(self):
self.binary_int_op_test('^')
def test_equal_array_op(self):
self.binary_op_test('==')
def test_greater_array_op(self):
self.binary_op_test('>')
def test_greater_equal_array_op(self):
self.binary_op_test('>=')
def test_less_array_op(self):
self.binary_op_test('<')
def test_less_equal_array_op(self):
self.binary_op_test('<=')
def test_not_equal_array_op(self):
self.binary_op_test('!=')
def test_unary_positive_array_op(self):
'''
Verify that the unary positive operator copies values, and doesn't
just alias to the input array (mirrors normal Numpy/Python
interaction behavior).
'''
# Test originally from @gmarkall
def f(a1):
a2 = +a1
a1[0] = 3
a2[1] = 4
return a2
a1 = np.zeros(10)
a2 = f(a1)
self.assertTrue(a1[0] != a2[0] and a1[1] != a2[1])
a3 = np.zeros(10)
a4 = njit(f)(a3)
self.assertTrue(a3[0] != a4[0] and a3[1] != a4[1])
np.testing.assert_array_equal(a1, a3)
np.testing.assert_array_equal(a2, a4)
class TestScalarUFuncs(TestCase):
"""check the machinery of ufuncs works when the result is an scalar.
These are not exhaustive because:
- the machinery to support this case is the same for all the functions of a
given arity.
- the result of the inner function itself is already tested in TestUFuncs
This class tests regular uses. A subclass tests the no python backend.
"""
_compile_flags = enable_pyobj_flags
def run_ufunc(self, pyfunc, arg_types, arg_values):
for tyargs, args in zip(arg_types, arg_values):
cr = compile_isolated(pyfunc, tyargs, flags=self._compile_flags)
cfunc = cr.entry_point
got = cfunc(*args)
expected = pyfunc(*_as_dtype_value(tyargs, args))
msg = 'for args {0} typed {1}'.format(args, tyargs)
# note: due to semantics of ufuncs, thing like adding a int32 to a
# uint64 results in doubles (as neither int32 can be cast safely
# to uint64 nor vice-versa, falling back to using the float version.
# Modify in those cases the expected value (the numpy version does
# not use typed integers as inputs so its result is an integer)
special = set([(types.int32, types.uint64), (types.uint64, types.int32),
(types.int64, types.uint64), (types.uint64, types.int64)])
if tyargs in special:
expected = float(expected)
else:
# The numba version of scalar ufuncs return an actual value that
# gets converted to a Python type, instead of using NumPy scalars.
# although in python 2 NumPy scalars are considered and instance of
# the appropriate python type, in python 3 that is no longer the case.
# This is why the expected result is casted to the appropriate Python
# type (which is actually the expected behavior of the ufunc translation)
if np.issubdtype(expected.dtype, np.inexact):
expected = float(expected)
elif np.issubdtype(expected.dtype, np.integer):
expected = int(expected)
elif np.issubdtype(expected.dtype, np.bool):
expected = bool(expected)
alltypes = cr.signature.args + (cr.signature.return_type,)
# select the appropriate precision for comparison: note that an argument
# typed at a lower precision can introduce precision problems. For this
# reason the argument types must be taken into account.
if any([t==types.float32 for t in alltypes]):
prec='single'
elif any([t==types.float64 for t in alltypes]):
prec='double'
else:
prec='exact'
self.assertPreciseEqual(got, expected, msg=msg, prec=prec)
def test_scalar_unary_ufunc(self):
def _func(x):
return np.sqrt(x)
vals = [(2,), (2,), (1,), (2,), (.1,), (.2,)]
tys = [(types.int32,), (types.uint32,),
(types.int64,), (types.uint64,),
(types.float32,), (types.float64,)]
self.run_ufunc(_func, tys, vals)
def test_scalar_binary_uniform_ufunc(self):
def _func(x,y):
return np.add(x,y)
vals = [2, 2, 1, 2, .1, .2]
tys = [types.int32, types.uint32,
types.int64, types.uint64, types.float32, types.float64]
self.run_ufunc(_func, zip(tys, tys), zip(vals, vals))
def test_scalar_binary_mixed_ufunc(self, flags=enable_pyobj_flags):
def _func(x,y):
return np.add(x,y)
vals = [2, 2, 1, 2, .1, .2]
tys = [types.int32, types.uint32,
types.int64, types.uint64,
types.float32, types.float64]
self.run_ufunc(_func, itertools.product(tys, tys),
itertools.product(vals, vals))
class TestScalarUFuncsNoPython(TestScalarUFuncs):
"""Same tests as TestScalarUFuncs, but forcing no python mode"""
_compile_flags = no_pyobj_flags
class TestUfuncIssues(TestCase):
def test_issue_651(self):
# Exercise the code path to make sure this does not fail
@vectorize(["(float64,float64)"])
def foo(x1, x2):
return np.add(x1, x2) + np.add(x1, x2)
a = np.arange(10, dtype='f8')
b = np.arange(10, dtype='f8')
self.assertTrue(np.all(foo(a, b) == (a + b) + (a + b)))
def test_issue_713(self):
def foo(x,y):
return np.floor_divide(x,y)
cr = compile_isolated(foo, [types.complex128, types.complex128])
self.assertEqual(foo(1j, 1j), cr.entry_point(1j, 1j))
class _TestLoopTypes(TestCase):
"""Test code generation for the different loop types defined by ufunc.
This class tests the ufuncs without forcing no-python mode. Subclasses
of this class tweak it so they tests no-python mode support for the
different ufuncs.
This test relies on class variables to configure the test. Subclasses
of this class can just override some of these variables to check other
ufuncs in a different compilation context. The variables supported are:
_funcs: the ufuncs to test
_compile_flags: compilation flags to use (to force nopython mode)
_skip_types: letter types that force skipping the loop when testing
if present in the NumPy ufunc signature.
_supported_types: only test loops where all the types in the loop
signature are in this collection. If unset, all.
Note that both, _skip_types and _supported_types must be met for a loop
to be tested.
The NumPy ufunc signature has a form like 'ff->f' (for a binary ufunc
loop taking 2 floats and resulting in a float). In a NumPy ufunc object
you can get a list of supported signatures by accessing the attribute
'types'.
"""
_ufuncs = all_ufuncs[:]
# Have their own test classes
_ufuncs.remove(np.left_shift)
_ufuncs.remove(np.right_shift)
_ufuncs.remove(np.reciprocal)
_ufuncs.remove(np.power)
_compile_flags = enable_pyobj_flags
_skip_types = 'OegG'
# Skip datetime64 'M' and timedelta64 'm' on numpy 1.6
if is_on_numpy_16:
_skip_types += 'Mm'
def _arg_for_type(self, a_letter_type, index=0):
"""return a suitable array argument for testing the letter type"""
if a_letter_type in 'bhilq':
# an integral
return np.array([1, 4, 0, -2], dtype=a_letter_type)
if a_letter_type in 'BHILQ':
return np.array([1, 2, 4, 0], dtype=a_letter_type)
elif a_letter_type in '?':
# a boolean
return np.array([True, False, False, True], dtype=a_letter_type)
elif a_letter_type[0] == 'm':
# timedelta64
if len(a_letter_type) == 1:
a_letter_type = 'm8[D]'
return np.array([2, -3, 'NaT', 0], dtype=a_letter_type)
elif a_letter_type[0] == 'M':
# datetime64
if len(a_letter_type) == 1:
a_letter_type = 'M8[D]'
return np.array(['Nat', 1, 25, 0], dtype=a_letter_type)
elif a_letter_type in 'fd':
# floating point
return np.array([1.5, -3.5, 0.0, float('nan')], dtype=a_letter_type)
elif a_letter_type in 'FD':
# complex
return np.array([-1.0j, 1.5 + 1.5j, 1j * float('nan'), 0j],
dtype=a_letter_type)
else:
raise RuntimeError("type %r not understood" % (a_letter_type,))
def _check_loop(self, fn, ufunc, loop):
# the letter types for the args
letter_types = loop[:ufunc.nin] + loop[-ufunc.nout:]
# ignore the loops containing an object argument. They will always
# fail in no python mode. Usually the last loop in ufuncs is an all
# object fallback
supported_types = getattr(self, '_supported_types', [])
if (supported_types and
any(l not in supported_types for l in letter_types)):
return
skip_types = getattr(self, '_skip_types', [])
if any(l in skip_types for l in letter_types):
return
# if the test case requires some types to be present, skip loops
# not involving any of those types.
required_types = getattr(self, '_required_types', [])
if required_types and not any(l in letter_types
for l in required_types):
return
self._check_ufunc_with_dtypes(fn, ufunc, letter_types)
def _check_ufunc_with_dtypes(self, fn, ufunc, dtypes):
arg_dty = [np.dtype(t) for t in dtypes]
arg_nbty = [types.Array(from_dtype(t), 1, 'C') for t in arg_dty]
cr = compile_isolated(fn, arg_nbty, flags=self._compile_flags)
# Ensure a good mix of input values
c_args = [self._arg_for_type(t, index=index).repeat(2)
for index, t in enumerate(dtypes)]
for arr in c_args:
self.random.shuffle(arr)
py_args = [a.copy() for a in c_args]
cr.entry_point(*c_args)
fn(*py_args)
# Check each array (including inputs, to ensure they weren't
# mutated).
for c_arg, py_arg in zip(c_args, py_args):
prec = 'single' if c_arg.dtype.char in 'fF' else 'exact'
prec = 'double' if c_arg.dtype.char in 'dD' else prec
msg = '\n'.join(["ufunc '{0}' arrays differ ({1}):",
"args: {2}", "expected {3}", "got {4}"])
msg = msg.format(ufunc.__name__, c_args, prec, py_arg, c_arg)
self.assertPreciseEqual(py_arg, c_arg, prec=prec, msg=msg)
@classmethod
def _check_ufunc_loops(cls, ufunc):
for loop in ufunc.types:
cls._inject_test(ufunc, loop)
@classmethod
def _inject_test(cls, ufunc, loop):
def test_template(self):
fn = _make_ufunc_usecase(ufunc)
self._check_loop(fn, ufunc, loop)
setattr(cls, "test_{0}_{1}".format(ufunc.__name__,
loop.replace('->', '_')),
test_template)
@classmethod
def autogenerate(cls):
for ufunc in cls._ufuncs:
cls._check_ufunc_loops(ufunc)
class TestLoopTypes(_TestLoopTypes):
pass
TestLoopTypes.autogenerate()
class TestLoopTypesIntNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
# reciprocal and power need a special test due to issue #757
_ufuncs.remove(np.power)
_ufuncs.remove(np.reciprocal)
_ufuncs.remove(np.left_shift) # has its own test class
_ufuncs.remove(np.right_shift) # has its own test class
_required_types = '?bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _TestLoopTypes._skip_types
TestLoopTypesIntNoPython.autogenerate()
class TestLoopTypesReciprocalNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = [np.reciprocal] # issue #757
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _TestLoopTypes._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
if a_letter_type in 'bBhHiIlLqQ':
# For integer reciprocal, avoid 0 as argument, as it triggers
# undefined behavior that may differ in results from Numba
# to the compiler used to compile NumPy.
res[res == 0] = 42
return res
TestLoopTypesReciprocalNoPython.autogenerate()
class TestLoopTypesPowerNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = [np.power] # issue #757
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _TestLoopTypes._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
if a_letter_type in 'bBhHiIlLqQ' and index == 1:
# For integer power, avoid a negative exponent, as it triggers
# undefined behavior that may differ in results from Numba
# to the compiler used to compile NumPy
res[res < 0] = 3
return res
TestLoopTypesPowerNoPython.autogenerate()
class TestLoopTypesIntLeftShiftNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = [np.left_shift]
_required_types = 'bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _TestLoopTypes._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
# Shifting by a negative amount (argument with index 1) is undefined
# behavior in C. It is also undefined behavior in numba. In the same
# sense, it is also undefined behavior when the shift amount is larger
# than the number of bits in the shifted integer.
# To avoid problems in the test, the values are clamped (clipped) so
# that 0 <= shift_amount < bitcount(shifted_integer)
if index == 1:
bit_count = res.dtype.itemsize * 8
res = np.clip(res, 0, bit_count-1)
return res
TestLoopTypesIntLeftShiftNoPython.autogenerate()
class TestLoopTypesIntRightShiftNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = [np.right_shift]
_required_types = 'bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _TestLoopTypes._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
# Shifting by a negative amount (argument with index 1) is undefined
# behavior in C. It is also undefined behavior in numba. In the same
# sense, it is also undefined behavior when the shift amount is larger
# than the number of bits in the shifted integer.
# To avoid problems in the test, the values are clamped (clipped) so
# that 0 <= shift_amount < bitcount(shifted_integer)
if index == 1:
bit_count = res.dtype.itemsize * 8
res = np.clip(res, 0, bit_count-1)
# Right shift has "implementation defined behavior" when the number
# shifted is negative (in C). In numba, right shift for signed integers
# is "arithmetic" while for unsigned integers is "logical".
# This test compares against the NumPy implementation, that relies
# on "implementation defined behavior", so the test could be a false
# failure if the compiler used to compile NumPy doesn't follow the same
# policy.
# Hint: do not rely on right shifting negative numbers in NumPy.
if index == 0:
res = np.abs(res)
return res
TestLoopTypesIntRightShiftNoPython.autogenerate()
class TestLoopTypesFloatNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
if iswindows:
_ufuncs.remove(np.signbit) # TODO: fix issue #758
_required_types = 'fd'
_skip_types = 'FDmMO' + _TestLoopTypes._skip_types
TestLoopTypesFloatNoPython.autogenerate()
class TestLoopTypesComplexNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
# Test complex types
# Every loop containing a complex argument must be tested
_required_types = 'FD'
_skip_types = 'mMO' + _TestLoopTypes._skip_types
TestLoopTypesComplexNoPython.autogenerate()
@skip_on_numpy_16
class TestLoopTypesDatetimeNoPython(_TestLoopTypes):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
# NOTE: the full list of ufuncs supporting datetime64 and timedelta64
# types in Numpy is:
# ['absolute', 'add', 'divide', 'equal', 'floor_divide', 'fmax', 'fmin',
# 'greater', 'greater_equal', 'less', 'less_equal', 'maximum',
# 'minimum', 'multiply', 'negative', 'not_equal', 'sign', 'subtract',
# 'true_divide']
# Test datetime64 and timedelta64 types.
_required_types = 'mM'
# Test various units combinations (TestLoopTypes is only able to test
# homogeneous units).
def test_add(self):
ufunc = np.add
fn = _make_ufunc_usecase(ufunc)
# heterogenous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8', 'm8[m]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8', 'm8[m]', 'm8[m]'])
# heterogenous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[m]'])
def test_subtract(self):
ufunc = np.subtract
fn = _make_ufunc_usecase(ufunc)
# heterogenous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[s]'])
# heterogenous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[m]'])
def test_multiply(self):
ufunc = np.multiply
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[us]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['q', 'm8[s]', 'm8[us]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_true_divide(self):
ufunc = np.true_divide
fn = _make_ufunc_usecase(ufunc)
# heterogenous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'd'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'd'])
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_floor_divide(self):
ufunc = np.floor_divide
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def _check_comparison(self, ufunc):
fn = _make_ufunc_usecase(ufunc)
# timedelta
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8', 'm8[m]', '?'])
# datetime
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', '?'])
def test_comparisons(self):
for ufunc in [np.equal, np.not_equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
self._check_comparison(ufunc)
TestLoopTypesDatetimeNoPython.autogenerate()
class TestUFuncBadArgsNoPython(TestCase):
_compile_flags = no_pyobj_flags
def test_missing_args(self):
def func(x):
"""error: np.add requires two args"""
result = np.add(x)
return result
self.assertRaises(TypingError, compile_isolated, func, [types.float64],
return_type=types.float64, flags=self._compile_flags)
def test_too_many_args(self):
def func(x, out, out2):
"""error: too many args"""
result = np.add(x, x, out, out2)
return result
array_type = types.Array(types.float64, 1, 'C')
self.assertRaises(TypingError, compile_isolated, func, [array_type] *3,
return_type=array_type, flags=self._compile_flags)
def test_no_scalar_result_by_reference(self):
def func(x):
"""error: scalar as a return value is not supported"""
y = 0
np.add(x, x, y)
self.assertRaises(TypingError, compile_isolated, func, [types.float64],
return_type=types.float64, flags=self._compile_flags)
if __name__ == '__main__':
unittest.main()
|
|
"""
This module holds Devices related components.
A 'Device' is any computer running the clientapp or the serverapp
"""
import logging
from ConfigParser import SafeConfigParser
import config as config
import time
from player import LocalPlayer, RemotePlayer
from socket import *
import select
import os
import threading
class Capability:
ProvideContent="ProvideContent"
PlayMusic="PlayMusic"
PlayVideo="PlayVideo"
SyncToStream="SyncToStream"
class DeviceManager:
"""
Keeps a registry of known devices.
"""
def __init__(self, startWatcher = False, watcher = None):
self.parser = SafeConfigParser()
self.activeDevice = None
self.bindEvents(startWatcher, watcher)
#TODO : Load hostname and port from config
self.deleteRegistry()
self.registerLocalDevice()
def registerLocalDevice(self):
self.registerDevice(Device("local","My player", "localhost"))
def bindEvents(self, startWatcher, watcher = None):
self.deviceWatcher = None
if startWatcher:
if watcher:
self.deviceWatcher = watcher
else:
self.deviceWatcher = DeviceWatcher(callback=self.handleDeviceNotificationReceived)
self.deviceWatcher.start()
def handleDeviceNotificationReceived(self, msg):
"""
TODO : Move this.
This method is triggered each time another device
on the network broadcasted it's presence.
If the device is already present in the devices registry,
updates the device-last-seen field in the registry.
If the device is not yet in the registry,
add it and set device-last-seen to now.
"""
device = Device.fromEncodedString(msg)
self.registerDevice(device)
def getDevices(self):
"""
Read all configured devices from the registry.
If the registry could not be read, return None
If no devices were found in the registry, return an empty array
otherwise return an array of Devices.
"""
devices = []
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
if not self.createRegistry():
return
for device in self.parser.sections():
url = self.parser.get(device, 'url').encode("utf-8")
lastSeen = self.parser.get(device, 'lastSeen')
visibleName = self.parser.get(device, 'visibleName').encode("utf-8")
type = self.parser.get(device, 'type').encode("utf-8")
device = Device(type, visibleName, url, lastSeen)
devices.append(device)
return devices
def getLikelyActiveDevices(self):
return self.getDevices()
def registerDevice(self, device):
"""
Register or update the specified device. Devices are stored into the file devices.ini
from the config folder.
"""
if not config.workspaceIsSet():
print "Cannot register a device when the workspace is not set."
return False
if not isinstance(device, Device):
error = "The specified device argument must inherit from the type devices.Device."
logging.info(error)
raise TypeError(error)
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
print "The DeviceManager is creating the registry..."
if not self.createRegistry():
print "The DeviceManager could not create the registry."
return False
currentDevices = self.getDevices()
if not currentDevices == None and device in currentDevices:
self.updateDeviceLastSeenTime(device)
return True
sectionName = device.visibleName
self.parser.add_section(sectionName)
self.parser.set(sectionName, 'visibleName', device.visibleName)
self.parser.set(sectionName, 'url', device.url)
self.parser.set(sectionName, 'type', device.type)
self.parser.set(sectionName, 'lastSeen', str(device.lastSeen))
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
print "Added device to the registry: {0} {1}".format(device.visibleName, device.url)
return True
def printRegisteredDevices(self):
for device in self.getDevices():
print device.visibleName
def getActivePlayer(self):
activeDevice = self.getActiveDevice()
if activeDevice == None:
print "There is no active player to select."
return
def getActiveDevice(self):
if self.activeDevice == None:
devices = self.getDevices()
if not devices:
return None
for device in devices:
if device.type == "local":
print "No device were selected. Using local device '{0}' as default.".format(device.visibleName)
self.activeDevice = device
break
return self.activeDevice
def getActiveDeviceType(self):
activeDev = self.getActiveDevice()
if activeDev:
return activeDev.type
else :
return None
def setActiveDevice(self, device):
print "Set '{0}' as active device.".format(device.visibleName)
self.activeDevice = device
def setActiveDeviceCapabilities(self, capabilities = []):
activeDevice = self.getActiveDevice()
if activeDevice:
return activeDevice.setCapabilities(capabilities)
return False
def updateDeviceLastSeenTime(self, device):
filesRead = self.parser.read(config.getFullFileName("devices.ini"))
if len(filesRead) == 0:
error = "The DeviceManager could not load it's configuration file: {0}".format(config.getFullFileName("devices.ini"))
logging.error(error)
raise Exception(error)
else:
sectionName = device.visibleName
lastSeen = device.lastSeen
self.parser.set(sectionName, 'lastSeen', str(lastSeen))
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
#print "Updated device lastSeen time: {0}".format(lastSeen)
def createRegistry(self):
try:
print "Creating device registry: {0}".format(config.getFullFileName("devices.ini") or 'Undefined')
with open(config.getFullFileName("devices.ini"), 'w+') as f:
print f
return True
except Exception as e:
print e
return False
def isWatching(self):
if self.deviceWatcher:
return self.deviceWatcher.isRunning()
else:
return False
def deleteRegistry(self):
try:
self.parser = SafeConfigParser()
with open(config.getFullFileName("devices.ini"),'w') as f:
self.parser.write(f)
return True
except Exception as e:
print e
return False
def dispose(self):
print "Disposing DeviceManager..."
if self.deviceWatcher:
self.deviceWatcher.stop()
class DeviceWatcher():
"""
Watch for other devices presence broadcasts.
"""
def __init__(self, portToWatch = 5555, callback = None):
self.portToWatch = portToWatch or config.getProperty("presence_watcher_watched_port")
self.running = False
self.bufferSize = 1024
self.callback = callback
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.bind(('', self.portToWatch))
self.thread = threading.Thread(target=self._run, name="watcher")
def setCallbackFunc(self, callback):
self.callback = callback
def start(self):
print "Starting to watch for devices UDP broadcasts on port: {0}...".format(self.portToWatch)
self.running = True
self.thread.start()
def isRunning(self):
return self.running
def stop(self):
print "Stopping DeviceWatcher..."
self.running = False
self.sock.close()
print "Stopped DeviceWatcher."
def _run(self):
print "Started DeviceWatcher."
try:
while self.running:
data, addr = self.sock.recvfrom(self.bufferSize)
if self.callback:
self.callback(data)
finally:
self.sock.close()
def getProcName(self):
return self.thread.name
class DevicePresenceBroadcaster():
"""
Notify other devices the presence of this device.
"""
def __init__(self, thisDevice, portToTarget = 5555, delayBetweenBroadcastsInSec = 5):
self.port = portToTarget or config.getProperty("presence_broadcaster_target_port")
self.delay = delayBetweenBroadcastsInSec or config.getProperty("presence_broadcaster_call_delay_seconds")
self.thisDevice = thisDevice
self.running = False
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.bind(('', 0))
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.thread = threading.Thread(target=self._run, name="broadcaster")
def start(self):
print "Starting PresenceBroadcaster with delay =", self.delay, "seconds"
self.running = True
self.thread.start()
def isRunning(self):
return self.running
def stop(self):
print "Stopping DevicePresenceBroadcaster..."
self.running = False
self.sock.close()
print "Stopped PresenceBroadcaster."
def _run(self):
print "Started PresenceBroadcaster."
try:
while self.running:
try:
data = self.thisDevice.encodeForTransport()
self.sock.sendto(data, ('<broadcast>', int(self.port)))
print "Broadcasting {0} presence on UDP port: {1}".format(self.thisDevice.visibleName, self.port)
except Exception as e:
print e
#Wait if broadcaster is running
if self.running:
time.sleep(self.delay)
finally:
self.stop()
def getProcName(self):
return self.thread.name
class Device:
"""
A 'Device' is any computer running the clientapp or the serverapp
"""
def __init__(self, type="local", visibleName = None, url = None, lastSeen = None, capabilities = None):
self.visibleName = visibleName
self.url = url or "0:0"
self.lastSeen = lastSeen or time.localtime()
self.type = type
self.capabilities = capabilities or []
if ':' in url:
self.host, self.port = url.split(':')
else:
self.host = url
def isLikelyActive(self):
lastSeenTime = time.fromtimestamp(self.lastSeen)
print time.localtime() - lastSeenTime
return False
@staticmethod
def fromEncodedString(encodedString):
"""
Copy constructor for Device object encoded wit hencodeForTransport
"""
visibleName, url, capabilities = Device.decode(encodedString)
return Device("remote", visibleName=visibleName, url=url, capabilities=capabilities)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.visibleName == other.visibleName)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.visibleName + " [{0}]".format(self.type)
def __str__(self):
return self.visibleName, self.url
def encodeForTransport(self):
"""
Encode this device to a string for transport via tcp
@param capabilities a string with format: 'capability1 | capability 2 | ...'
@return { 'deviceName'; 192.168.1.1:80; capabilities }
"""
capabilityString = ""
for cap in self.capabilities:
capabilityString = capabilityString + cap + "|"
#Remove trailing '|'
capabilityString = capabilityString[:-1]
encodedDevice = "{0};{1};{2}".format(self.visibleName, self.url, capabilityString)
print encodedDevice
return encodedDevice
@staticmethod
def decode(encodedString):
print encodedString
args = encodedString.split(';')
name = args[0]
url = args[1]
capabilities = args[2].split('|')
return name, url, capabilities
def setCapabilities(self, capabilities):
self.capabilities = capabilities
def testPresenceBroadcaster():
thisDevice = Device(url="localhost:5000", visibleName="test-device")
bc = DevicePresenceBroadcaster(thisDevice, delayBetweenBroadcastsInSec=1)
watcher = DeviceWatcher()
watcher.start()
bc.start()
time.sleep(5)
bc.stop()
watcher.stop()
if __name__ == '__main__':
config.setConfigFolder('../config/')
testPresenceBroadcaster()
#man = DeviceManager()
# man.handleDeviceNotificationReceived("rpi-yam","192.168.1.127:5005")
#print man.printRegisteredDevices()
def startPresenceBroadcaster():
from devices import Device
thisDevice = Device("rpi")
PRESENCE_BROADCASTER = DevicePresenceBroadcaster(thisDevice)
PRESENCE_BROADCASTER.start()
def stopPresenceBroadcaster():
if PRESENCE_BROADCASTER:
PRESENCE_BROADCASTER.stop()
|
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cld
import unittest
import sys
from testData import *
VERBOSE = False
# MKM: ported from FullTests in compact_lang_det_unittest_small.cc
class TestCLD(unittest.TestCase):
langsSeen = set()
def runOne(self, expectedLangName, s):
if VERBOSE:
print
print 'Test: %s [%d bytes]' % (expectedLangName, len(s))
detectedLangName, detectedLangCode, isReliable, textBytesFound, details = cld.detect(s)
if VERBOSE:
print ' detected: %s' % detectedLangName
print ' reliable: %s' % (isReliable != 0)
print ' textBytes: %s' % textBytesFound
print ' details: %s' % str(details)
self.langsSeen.add(expectedLangName)
print ' %d langs' % len(self.langsSeen)
self.assertEquals(expectedLangName, detectedLangName)
self.assertTrue(isReliable)
def testAFRIKAANS(self):
self.runOne('AFRIKAANS', kTeststr_af_Latn)
# def testAFAR(self):
# self.runOne('AFAR', kTeststr_aa_Latn)
# def testABKHAZIAN(self):
# self.runOne('ABKHAZIAN', kTeststr_ab_Cyrl)
def testAFRIKAANS(self):
self.runOne('AFRIKAANS', kTeststr_af_Latn)
# def testAMHARIC(self):
# self.runOne('AMHARIC', kTeststr_am_Ethi)
def testARABIC(self):
self.runOne('ARABIC', kTeststr_ar_Arab)
# def testASSAMESE(self):
# self.runOne('ASSAMESE', kTeststr_as_Beng)
# def testAYMARA(self):
# self.runOne('AYMARA', kTeststr_ay_Latn)
# AZERBAIJANI Arab & Cyrl removed 2008.05.27. Just AZERBAIJANI Latn left
# def testAZERBAIJANI(self):
# self.runOne('AZERBAIJANI', kTeststr_az_Arab)
# Missing data: az-Cyrl
# def testAZERBAIJANI(self):
# self.runOne('AZERBAIJANI', kTeststr_az_Latn)
# def testBASHKIR(self):
# self.runOne('BASHKIR', kTeststr_ba_Cyrl)
def testBELARUSIAN(self):
self.runOne('BELARUSIAN', kTeststr_be_Cyrl)
def testBULGARIAN(self):
self.runOne('BULGARIAN', kTeststr_bg_Cyrl)
# def testBIHARI(self):
# self.runOne('BIHARI', kTeststr_bh_Deva)
# def testBISLAMA(self):
# self.runOne('BISLAMA', kTeststr_bi_Latn)
# def testBENGALI(self):
# self.runOne('BENGALI', kTeststr_bn_Beng)
# def testTIBETAN(self):
# self.runOne('TIBETAN', kTeststr_bo_Tibt)
# def testBRETON(self):
# self.runOne('BRETON', kTeststr_br_Latn)
def testSERBIAN(self):
self.runOne('SERBIAN', kTeststr_bs_Cyrl) # NOTE: Not BOSNIAN
# def testCROATIAN(self):
# self.runOne('CROATIAN', kTeststr_bs_Latn) # NOTE: Not BOSNIAN
def testCATALAN(self):
self.runOne('CATALAN', kTeststr_ca_Latn)
def testCHEROKEE(self):
self.runOne('CHEROKEE', kTeststr_chr_Cher)
# def testCORSICAN(self):
# self.runOne('CORSICAN', kTeststr_co_Latn)
# No CREOLES_AND_PIDGINS_ENGLISH_BASED
# No CREOLES_AND_PIDGINS_FRENCH_BASED
# No CREOLES_AND_PIDGINS_OTHER
# No CREOLES_AND_PIDGINS_PORTUGUESE_BASED
def testCZECH(self):
self.runOne('CZECH', kTeststr_cs_Latn)
def testWELSH(self):
self.runOne('WELSH', kTeststr_cy_Latn)
def testDANISH(self):
self.runOne('DANISH', kTeststr_da_Latn)
def testGERMAN(self):
self.runOne('GERMAN', kTeststr_de_Latn)
def testDHIVEHI(self):
self.runOne('DHIVEHI', kTeststr_dv_Thaa)
# def testDZONGKHA(self):
# self.runOne('DZONGKHA', kTeststr_dz_Tibt)
def testGREEK(self):
self.runOne('GREEK', kTeststr_el_Grek)
def testENGLISH(self):
self.runOne('ENGLISH', kTeststr_en_Latn)
def testENGLISH(self):
self.runOne('ENGLISH', kTeststr_en)
# def testESPERANTO(self):
# self.runOne('ESPERANTO', kTeststr_eo_Latn)
def testSPANISH(self):
self.runOne('SPANISH', kTeststr_es_Latn)
def testESTONIAN(self):
self.runOne('ESTONIAN', kTeststr_et_Latn)
# def testBASQUE(self):
# self.runOne('BASQUE', kTeststr_eu_Latn)
def testPERSIAN(self):
self.runOne('PERSIAN', kTeststr_fa_Arab)
def testFINNISH(self):
self.runOne('FINNISH', kTeststr_fi_Latn)
# def testFIJIAN(self):
# self.runOne('FIJIAN', kTeststr_fj_Latn)
# def testFAROESE(self):
# self.runOne('FAROESE', kTeststr_fo_Latn)
def testFRENCH(self):
self.runOne('FRENCH', kTeststr_fr_Latn)
# def testFRISIAN(self):
# self.runOne('FRISIAN', kTeststr_fy_Latn)
def testIRISH(self):
self.runOne('IRISH', kTeststr_ga_Latn)
# def testSCOTS_GAELIC(self):
# self.runOne('SCOTS_GAELIC', kTeststr_gd_Latn)
# def testGALICIAN(self):
# self.runOne('GALICIAN', kTeststr_gl_Latn)
# def testGUARANI(self):
# self.runOne('GUARANI', kTeststr_gn_Latn)
def testGUJARATI(self):
self.runOne('GUJARATI', kTeststr_gu_Gujr)
# def testMANX(self):
# self.runOne('MANX', kTeststr_gv_Latn)
# def testHAUSA(self):
# self.runOne('HAUSA', kTeststr_ha_Latn)
def testHINDI(self):
self.runOne('HINDI', kTeststr_hi_Deva)
def testHINDI2(self):
self.runOne('HINDI', kTeststr_ks)
def testCROATIAN(self):
self.runOne('CROATIAN', kTeststr_hr_Latn) # NOTE: now CROATIAN
# def testHAITIAN_CREOLE(self):
# self.runOne('HAITIAN_CREOLE', kTeststr_ht_Latn)
def testHUNGARIAN(self):
self.runOne('HUNGARIAN', kTeststr_hu_Latn)
def testARMENIAN(self):
self.runOne('ARMENIAN', kTeststr_hy_Armn)
# def testINTERLINGUA(self):
# self.runOne('INTERLINGUA', kTeststr_ia_Latn)
def testMALAY(self):
self.runOne('MALAY', kTeststr_id_Latn)
# def testINTERLINGUE(self):
# self.runOne('INTERLINGUE', kTeststr_ie_Latn)
# def testINUPIAK(self):
# self.runOne('INUPIAK', kTeststr_ik_Latn)
def testICELANDIC(self):
self.runOne('ICELANDIC', kTeststr_is_Latn)
def testITALIAN(self):
self.runOne('ITALIAN', kTeststr_it_Latn)
def testINUKTITUT(self):
self.runOne('INUKTITUT', kTeststr_iu_Cans)
def testHEBREW(self):
self.runOne('HEBREW', kTeststr_iw_Hebr)
def testJAPANESE(self):
self.runOne('Japanese', kTeststr_ja_Hani)
# def testJAVANESE(self):
# self.runOne('JAVANESE', kTeststr_jw_Latn)
def testGEORGIAN(self):
self.runOne('GEORGIAN', kTeststr_ka_Geor)
# def testKHASI(self):
# self.runOne('KHASI', kTeststr_kha_Latn)
# def testKAZAKH(self):
# self.runOne('KAZAKH', kTeststr_kk_Arab)
# def testKAZAKH(self):
# self.runOne('KAZAKH', kTeststr_kk_Cyrl)
# def testKAZAKH(self):
# self.runOne('KAZAKH', kTeststr_kk_Latn)
# def testGREENLANDIC(self):
# self.runOne('GREENLANDIC', kTeststr_kl_Latn)
def testKHMER(self):
self.runOne('KHMER', kTeststr_km_Khmr)
def testKANNADA(self):
self.runOne('KANNADA', kTeststr_kn_Knda)
def testKOREAN(self):
self.runOne('Korean', kTeststr_ko_Hani)
# def testKASHMIRI(self):
# self.runOne('KASHMIRI', kTeststr_ks_Deva)
# KURDISH Latn removed 2008.05.27. Just KURDISH Arab left
# def testKURDISH(self):
# self.runOne('KURDISH', kTeststr_ku_Arab)
# def testKURDISH(self):
# self.runOne('KURDISH', kTeststr_ku_Latn)
# def testKYRGYZ(self):
# self.runOne('KYRGYZ', kTeststr_ky_Arab)
# def testKYRGYZ(self):
# self.runOne('KYRGYZ', kTeststr_ky_Cyrl)
# def testLATIN(self):
# self.runOne('LATIN', kTeststr_la_Latn)
# def testLUXEMBOURGISH(self):
# self.runOne('LUXEMBOURGISH', kTeststr_lb_Latn)
# def testGANDA(self):
# self.runOne('GANDA', kTeststr_lg_Latn)
# def testLINGALA(self):
# self.runOne('LINGALA', kTeststr_ln_Latn)
def testLAOTHIAN(self):
self.runOne('LAOTHIAN', kTeststr_lo_Laoo)
def testLITHUANIAN(self):
self.runOne('LITHUANIAN', kTeststr_lt_Latn)
def testLATVIAN(self):
self.runOne('LATVIAN', kTeststr_lv_Latn)
# def testMALAGASY(self):
# self.runOne('MALAGASY', kTeststr_mg_Latn)
# def testMAORI(self):
# self.runOne('MAORI', kTeststr_mi_Latn)
def testMACEDONIAN(self):
self.runOne('MACEDONIAN', kTeststr_mk_Cyrl)
def testMALAYALAM(self):
self.runOne('MALAYALAM', kTeststr_ml_Mlym)
# def testMONGOLIAN(self):
# self.runOne('MONGOLIAN', kTeststr_mn_Cyrl)
# def testMOLDAVIAN(self):
# self.runOne('MOLDAVIAN', kTeststr_mo_Cyrl)
# def testMARATHI(self):
# self.runOne('MARATHI', kTeststr_mr_Deva)
def testMALAY(self):
self.runOne('MALAY', kTeststr_ms_Latn)
# def testMALAY(self):
# self.runOne('MALAY', kTeststr_ms_Latn2)
def testMALAY(self):
self.runOne('MALAY', kTeststr_ms_Latn3)
# def testMALTESE(self):
# self.runOne('MALTESE', kTeststr_mt_Latn)
# def testBURMESE(self):
# self.runOne('BURMESE', kTeststr_my_Latn)
# def testBURMESE(self):
# self.runOne('BURMESE', kTeststr_my_Mymr)
# def testNAURU(self):
# self.runOne('NAURU', kTeststr_na_Latn)
# def testNEPALI(self):
# self.runOne('NEPALI', kTeststr_ne_Deva)
def testDUTCH(self):
self.runOne('DUTCH', kTeststr_nl_Latn)
# def testNORWEGIAN_N(self):
# self.runOne('NORWEGIAN_N', kTeststr_nn_Latn)
def testNORWEGIAN(self):
self.runOne('NORWEGIAN', kTeststr_no_Latn)
# def testOCCITAN(self):
# self.runOne('OCCITAN', kTeststr_oc_Latn)
# def testOROMO(self):
# self.runOne('OROMO', kTeststr_om_Latn)
def testORIYA(self):
self.runOne('ORIYA', kTeststr_or_Orya)
def testPUNJABI(self):
self.runOne('PUNJABI', kTeststr_pa_Guru)
def testPOLISH(self):
self.runOne('POLISH', kTeststr_pl_Latn)
# def testPASHTO(self):
# self.runOne('PASHTO', kTeststr_ps_Arab)
def testPORTUGUESE(self):
self.runOne('PORTUGUESE', kTeststr_pt_BR) # NOTE: not PORTUGUESE_B
# nor PORTUGUESE_P
# def testQUECHUA(self):
# self.runOne('QUECHUA', kTeststr_qu_Latn)
# def testRHAETO_ROMANCE(self):
# self.runOne('RHAETO_ROMANCE', kTeststr_rm_Latn)
# def testRUNDI(self):
# self.runOne('RUNDI', kTeststr_rn_Latn)
def testROMANIAN(self):
self.runOne('ROMANIAN', kTeststr_ro_Latn)
def testRUSSIAN(self):
self.runOne('RUSSIAN', kTeststr_ru_Cyrl)
# def testKINYARWANDA(self):
# self.runOne('KINYARWANDA', kTeststr_rw_Latn)
# def testSANSKRIT(self):
# self.runOne('SANSKRIT', kTeststr_sa_Deva)
# def testSANSKRIT(self):
# self.runOne('SANSKRIT', kTeststr_sa_Latn)
# def testSCOTS(self):
# self.runOne('SCOTS', kTeststr_sco_Latn)
# def testSINDHI(self):
# self.runOne('SINDHI', kTeststr_sd_Arab)
# def testSANGO(self):
# self.runOne('SANGO', kTeststr_sg_Latn)
# No SERBO_CROATIAN (sh)
def testSINHALESE(self):
self.runOne('SINHALESE', kTeststr_si_Sinh)
# def testLIMBU(self):
# self.runOne('LIMBU', kTeststr_sit_NP)
def testSLOVAK(self):
self.runOne('SLOVAK', kTeststr_sk_Latn)
def testSLOVENIAN(self):
self.runOne('SLOVENIAN', kTeststr_sl_Latn)
# def testSAMOAN(self):
# self.runOne('SAMOAN', kTeststr_sm_Latn)
# def testSHONA(self):
# self.runOne('SHONA', kTeststr_sn_Latn)
# def testSOMALI(self):
# self.runOne('SOMALI', kTeststr_so_Latn)
# def testALBANIAN(self):
# self.runOne('ALBANIAN', kTeststr_sq_Latn)
def testSERBIAN(self):
self.runOne('SERBIAN', kTeststr_sr_Cyrl) # NOTE: now SERBIAN
def testCROATIAN(self):
self.runOne('CROATIAN', kTeststr_sr_Latn) # NOTE: Not SERBIAN
def testCROATIAN(self):
self.runOne('CROATIAN', kTeststr_sr_ME_Latn) # NOTE: not SERBIAN nor MONTENEGRIN
# def testSISWANT(self):
# self.runOne('SISWANT', kTeststr_ss_Latn)
# def testSESOTHO(self):
# self.runOne('SESOTHO', kTeststr_st_Latn)
# def testSUNDANESE(self):
# self.runOne('SUNDANESE', kTeststr_su_Latn)
def testSWEDISH(self):
self.runOne('SWEDISH', kTeststr_sv_Latn)
def testSWAHILI(self):
self.runOne('SWAHILI', kTeststr_sw_Latn)
def testSYRIAC(self):
self.runOne('SYRIAC', kTeststr_syr_Syrc)
def testTAMIL(self):
self.runOne('TAMIL', kTeststr_ta_Taml)
def testTELUGU(self):
self.runOne('TELUGU', kTeststr_te_Telu)
# Tajik Arab removed 2008.05.27. Just Tajik Cyrl left
# def testTAJIK(self):
# self.runOne('TAJIK', kTeststr_tg_Arab)
# def testTAJIK(self):
# self.runOne('TAJIK', kTeststr_tg_Cyrl)
def testTHAI(self):
self.runOne('THAI', kTeststr_th_Thai)
# def testTIGRINYA(self):
# self.runOne('TIGRINYA', kTeststr_ti_Ethi)
# def testTURKMEN(self):
# self.runOne('TURKMEN', kTeststr_tk_Cyrl)
# def testTURKMEN(self):
# self.runOne('TURKMEN', kTeststr_tk_Latn)
def testTAGALOG(self):
self.runOne('TAGALOG', kTeststr_tl_Latn)
# def testTSWANA(self):
# self.runOne('TSWANA', kTeststr_tn_Latn)
# def testTONGA(self):
# self.runOne('TONGA', kTeststr_to_Latn)
def testTURKISH(self):
self.runOne('TURKISH', kTeststr_tr_Latn)
# def testTSONGA(self):
# self.runOne('TSONGA', kTeststr_ts_Latn)
# def testTATAR(self):
# self.runOne('TATAR', kTeststr_tt_Cyrl)
# def testTATAR(self):
# self.runOne('TATAR', kTeststr_tt_Latn)
# def testTWI(self):
# self.runOne('TWI', kTeststr_tw_Latn)
# def testUIGHUR(self):
# self.runOne('UIGHUR', kTeststr_ug_Arab)
# def testUIGHUR(self):
# self.runOne('UIGHUR', kTeststr_ug_Cyrl)
# def testUIGHUR(self):
# self.runOne('UIGHUR', kTeststr_ug_Latn)
def testUKRAINIAN(self):
self.runOne('UKRAINIAN', kTeststr_uk_Cyrl)
# def testURDU(self):
# self.runOne('URDU', kTeststr_ur_Arab)
# def testUZBEK(self):
# self.runOne('UZBEK', kTeststr_uz_Arab)
# def testUZBEK(self):
# self.runOne('UZBEK', kTeststr_uz_Cyrl)
# def testUZBEK(self):
# self.runOne('UZBEK', kTeststr_uz_Latn)
def testVIETNAMESE(self):
self.runOne('VIETNAMESE', kTeststr_vi_Latn)
# def testVOLAPUK(self):
# self.runOne('VOLAPUK', kTeststr_vo_Latn)
# def testWOLOF(self):
# self.runOne('WOLOF', kTeststr_wo_Latn)
# def testXHOSA(self):
# self.runOne('XHOSA', kTeststr_xh_Latn)
def testYIDDISH(self):
self.runOne('YIDDISH', kTeststr_yi_Hebr)
# def testYORUBA(self):
# self.runOne('YORUBA', kTeststr_yo_Latn)
# Zhuang Hani removed 2008.05.13. Just Zhuang Latn left
# def testZHUANG(self):
# self.runOne('ZHUANG', kTeststr_za_Hani)
# def testZHUANG(self):
# self.runOne('ZHUANG', kTeststr_za_Latn)
def testCHINESE(self):
self.runOne('Chinese', kTeststr_zh_Hani)
def testCHINESE_T(self):
self.runOne('ChineseT', kTeststr_zh_TW)
# def testZULU(self):
# self.runOne('ZULU', kTeststr_zu_Latn)
# No TG_UNKNOWN_LANGUAGE
# No UNKNOWN_LANGUAGE
if __name__ == '__main__':
unittest.main()
|
|
"""Support for Modbus."""
from __future__ import annotations
from typing import cast
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA as BINARY_SENSOR_DEVICE_CLASSES_SCHEMA,
)
from homeassistant.components.cover import (
DEVICE_CLASSES_SCHEMA as COVER_DEVICE_CLASSES_SCHEMA,
)
from homeassistant.components.sensor import (
CONF_STATE_CLASS,
DEVICE_CLASSES_SCHEMA as SENSOR_DEVICE_CLASSES_SCHEMA,
STATE_CLASSES_SCHEMA as SENSOR_STATE_CLASSES_SCHEMA,
)
from homeassistant.components.switch import (
DEVICE_CLASSES_SCHEMA as SWITCH_DEVICE_CLASSES_SCHEMA,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COUNT,
CONF_COVERS,
CONF_DELAY,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_LIGHTS,
CONF_METHOD,
CONF_NAME,
CONF_OFFSET,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_SWITCHES,
CONF_TEMPERATURE_UNIT,
CONF_TIMEOUT,
CONF_TYPE,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_X_COILS,
CALL_TYPE_X_REGISTER_HOLDINGS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_CLIMATES,
CONF_CLOSE_COMM_ON_ERROR,
CONF_DATA_TYPE,
CONF_FANS,
CONF_INPUT_TYPE,
CONF_LAZY_ERROR,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_MSG_WAIT,
CONF_PARITY,
CONF_PRECISION,
CONF_RETRIES,
CONF_RETRY_ON_EMPTY,
CONF_SCALE,
CONF_SLAVE_COUNT,
CONF_STATE_CLOSED,
CONF_STATE_CLOSING,
CONF_STATE_OFF,
CONF_STATE_ON,
CONF_STATE_OPEN,
CONF_STATE_OPENING,
CONF_STATUS_REGISTER,
CONF_STATUS_REGISTER_TYPE,
CONF_STEP,
CONF_STOPBITS,
CONF_SWAP,
CONF_SWAP_BYTE,
CONF_SWAP_NONE,
CONF_SWAP_WORD,
CONF_SWAP_WORD_BYTE,
CONF_TARGET_TEMP,
CONF_VERIFY,
CONF_WRITE_TYPE,
DEFAULT_HUB,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TEMP_UNIT,
MODBUS_DOMAIN as DOMAIN,
RTUOVERTCP,
SERIAL,
TCP,
UDP,
DataType,
)
from .modbus import ModbusHub, async_modbus_setup
from .validators import (
duplicate_entity_validator,
duplicate_modbus_validator,
number_validator,
scan_interval_validator,
struct_validator,
)
BASE_SCHEMA = vol.Schema({vol.Optional(CONF_NAME, default=DEFAULT_HUB): cv.string})
BASE_COMPONENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.positive_int,
vol.Optional(CONF_SLAVE, default=0): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.positive_int,
vol.Optional(CONF_LAZY_ERROR, default=0): cv.positive_int,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
BASE_STRUCT_SCHEMA = BASE_COMPONENT_SCHEMA.extend(
{
vol.Optional(CONF_INPUT_TYPE, default=CALL_TYPE_REGISTER_HOLDING): vol.In(
[
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
]
),
vol.Optional(CONF_COUNT): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DataType.INT16): vol.In(
[
DataType.INT8,
DataType.INT16,
DataType.INT32,
DataType.INT64,
DataType.UINT8,
DataType.UINT16,
DataType.UINT32,
DataType.UINT64,
DataType.FLOAT16,
DataType.FLOAT32,
DataType.FLOAT64,
DataType.STRING,
DataType.STRING,
DataType.CUSTOM,
]
),
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_SCALE, default=1): number_validator,
vol.Optional(CONF_OFFSET, default=0): number_validator,
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(CONF_SWAP, default=CONF_SWAP_NONE): vol.In(
[
CONF_SWAP_NONE,
CONF_SWAP_BYTE,
CONF_SWAP_WORD,
CONF_SWAP_WORD_BYTE,
]
),
}
)
BASE_SWITCH_SCHEMA = BASE_COMPONENT_SCHEMA.extend(
{
vol.Optional(CONF_WRITE_TYPE, default=CALL_TYPE_REGISTER_HOLDING): vol.In(
[
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_COIL,
CALL_TYPE_X_COILS,
CALL_TYPE_X_REGISTER_HOLDINGS,
]
),
vol.Optional(CONF_COMMAND_OFF, default=0x00): cv.positive_int,
vol.Optional(CONF_COMMAND_ON, default=0x01): cv.positive_int,
vol.Optional(CONF_VERIFY): vol.Maybe(
{
vol.Optional(CONF_ADDRESS): cv.positive_int,
vol.Optional(CONF_INPUT_TYPE): vol.In(
[
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_COIL,
CALL_TYPE_X_COILS,
CALL_TYPE_X_REGISTER_HOLDINGS,
]
),
vol.Optional(CONF_STATE_OFF): cv.positive_int,
vol.Optional(CONF_STATE_ON): cv.positive_int,
vol.Optional(CONF_DELAY, default=0): cv.positive_int,
}
),
}
)
CLIMATE_SCHEMA = vol.All(
BASE_STRUCT_SCHEMA.extend(
{
vol.Required(CONF_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=35): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=5): cv.positive_int,
vol.Optional(CONF_STEP, default=0.5): vol.Coerce(float),
vol.Optional(CONF_TEMPERATURE_UNIT, default=DEFAULT_TEMP_UNIT): cv.string,
}
),
)
COVERS_SCHEMA = BASE_COMPONENT_SCHEMA.extend(
{
vol.Optional(CONF_INPUT_TYPE, default=CALL_TYPE_REGISTER_HOLDING,): vol.In(
[
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_COIL,
]
),
vol.Optional(CONF_DEVICE_CLASS): COVER_DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_STATE_CLOSED, default=0): cv.positive_int,
vol.Optional(CONF_STATE_CLOSING, default=3): cv.positive_int,
vol.Optional(CONF_STATE_OPEN, default=1): cv.positive_int,
vol.Optional(CONF_STATE_OPENING, default=2): cv.positive_int,
vol.Optional(CONF_STATUS_REGISTER): cv.positive_int,
vol.Optional(
CONF_STATUS_REGISTER_TYPE,
default=CALL_TYPE_REGISTER_HOLDING,
): vol.In([CALL_TYPE_REGISTER_HOLDING, CALL_TYPE_REGISTER_INPUT]),
}
)
SWITCH_SCHEMA = BASE_SWITCH_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): SWITCH_DEVICE_CLASSES_SCHEMA,
}
)
LIGHT_SCHEMA = BASE_SWITCH_SCHEMA.extend({})
FAN_SCHEMA = BASE_SWITCH_SCHEMA.extend({})
SENSOR_SCHEMA = vol.All(
BASE_STRUCT_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): SENSOR_DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_STATE_CLASS): SENSOR_STATE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
),
)
BINARY_SENSOR_SCHEMA = BASE_COMPONENT_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): BINARY_SENSOR_DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_INPUT_TYPE, default=CALL_TYPE_COIL): vol.In(
[CALL_TYPE_COIL, CALL_TYPE_DISCRETE]
),
vol.Optional(CONF_SLAVE_COUNT, default=0): cv.positive_int,
}
)
MODBUS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_TIMEOUT, default=3): cv.socket_timeout,
vol.Optional(CONF_CLOSE_COMM_ON_ERROR, default=True): cv.boolean,
vol.Optional(CONF_DELAY, default=0): cv.positive_int,
vol.Optional(CONF_RETRIES, default=3): cv.positive_int,
vol.Optional(CONF_RETRY_ON_EMPTY, default=False): cv.boolean,
vol.Optional(CONF_MSG_WAIT): cv.positive_int,
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_CLIMATES): vol.All(
cv.ensure_list, [vol.All(CLIMATE_SCHEMA, struct_validator)]
),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
vol.Optional(CONF_LIGHTS): vol.All(cv.ensure_list, [LIGHT_SCHEMA]),
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.All(SENSOR_SCHEMA, struct_validator)]
),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
vol.Optional(CONF_FANS): vol.All(cv.ensure_list, [FAN_SCHEMA]),
}
)
SERIAL_SCHEMA = MODBUS_SCHEMA.extend(
{
vol.Required(CONF_TYPE): SERIAL,
vol.Required(CONF_BAUDRATE): cv.positive_int,
vol.Required(CONF_BYTESIZE): vol.Any(5, 6, 7, 8),
vol.Required(CONF_METHOD): vol.Any("rtu", "ascii"),
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_PARITY): vol.Any("E", "O", "N"),
vol.Required(CONF_STOPBITS): vol.Any(1, 2),
}
)
ETHERNET_SCHEMA = MODBUS_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_TYPE): vol.Any(TCP, UDP, RTUOVERTCP),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
scan_interval_validator,
duplicate_entity_validator,
duplicate_modbus_validator,
[
vol.Any(SERIAL_SCHEMA, ETHERNET_SCHEMA),
],
),
},
extra=vol.ALLOW_EXTRA,
)
def get_hub(hass: HomeAssistant, name: str) -> ModbusHub:
"""Return modbus hub with name."""
return cast(ModbusHub, hass.data[DOMAIN][name])
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Modbus component."""
return await async_modbus_setup(
hass,
config,
)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22nd 2015 - Martin Vejmelka
Copyright UC Denver 2015
Performs data assimilation for cycling forecasts using WRF.
The algorithm is as follows:
1) check if a previous cycle is available
a) yes?
- run DA using RAWS observations from start of previous cycle up to now (use wrfoutput)
- store the current state in wrfinput
b) no?
- initialize from equilibrium (compute from wrfinput atmospheric state), perform on DA step using P0
- store fuel moisture state/parameters/covariance
"""
import sys
import os
import numpy as np
from datetime import datetime
import pytz
import netCDF4
from observation import Observation
from wrf_model_data import WRFModelData
from trend_surface_model import fit_tsm
from spatial_model_utilities import great_circle_distance, find_closest_grid_point
from grid_moisture_model import GridMoistureModel
from diagnostics import init_diagnostics, diagnostics
def total_seconds(tdelta):
"""
Utility function for python < 2.7, 2.7 and above have total_seconds()
as a function of timedelta.
"""
return tdelta.microseconds / 1e6 + (tdelta.seconds + tdelta.days * 24 * 3600)
def load_raws_observations(obs_file,glat,glon,grid_dist_km):
"""
Loads all of the RAWS observations valid at the time in question
and converts them to Observation objects.
"""
# load observations & register them to grid
orig_obs = []
if os.path.exists(obs_file):
orig_obs = np.loadtxt(obs_file,dtype=np.object,delimiter=',')
else:
print('WARN: no observation file found.')
obss = []
omin, omax = 0.6, 0.0
# format of file
# 0 1 2 3 4 5 6 7 8 9 10 11
# yyyy,mm,dd,hh,MM,ss,lat,lon,elevation,var_id,value,variance
for oo in orig_obs:
ts = datetime(int(oo[0]),int(oo[1]),int(oo[2]),int(oo[3]),int(oo[4]),int(oo[5]),tzinfo=pytz.timezone('GMT'))
lat, lon, elev = float(oo[6]), float(oo[7]), float(oo[8])
obs, ovar = float(oo[10]), float(oo[11])
i, j = find_closest_grid_point(lat,lon,glat,glon)
# compute distance to grid points
dist_grid_pt = great_circle_distance(lon,lat,glon[i,j],glat[i,j])
# check & remove nonsense zero-variance (or negative variance) observations
if ovar > 0 and dist_grid_pt < grid_dist_km / 2.0:
obss.append(Observation(ts,lat,lon,elev,oo[9],obs,ovar,(i,j)))
omin = min(omin, obs)
omax = max(omax, obs)
print('INFO: loaded %d observations in range %g to %g [%d available]' % (len(obss),omin,omax,len(obss)))
return obss
def check_overlap(wrf_path,ts_now):
"""
Check if the WRF file <wrf_path> timstamps contain <ts_now>.
"""
wrfout = WRFModelData(wrf_path)
outts = wrfout['GMT']
if ts_now in outts:
return True
else:
print("INFO: previous forecast [%s - %s] exists, running DA till %s" % (str(outts[0]),str(outts[-1]),str(ts_now)))
return False
def build_observation_data(obss):
"""
Repackage the matched time series into a time-indexed structure
which gives details on the observed data and active observation stations.
synopsis: obs_data = build_observation_data(obss)
"""
# repackage all the observations into a time-indexed structure which groups
# observations at the same time together
obs_data = {}
for obs in obss:
t = obs.get_time()
o = obs_data[t] if obs_data.has_key(t) else []
o.append(obs)
obs_data[t] = o
return obs_data
def compute_equilibria(T, P, Q):
"""
Computes atmospheric drying/wetting moisture equilibria from the pressure P [Pa],
water vapor mixing ratio Q [-] and the surface temperature [K].
"""
# saturated vapor pressure (at each location, size n x 1)
Pws = np.exp(54.842763 - 6763.22/T - 4.210 * np.log(T) + 0.000367*T + np.tanh(0.0415*(T - 218.8))
* (53.878 - 1331.22/T - 9.44523 * np.log(T) + 0.014025*T))
# water vapor pressure (at each location, size n x 1)
Pw = P * Q / (0.622 + (1 - 0.622) * Q)
# relative humidity (percent, at each location, size n x 1)
H = 100 * Pw / Pws
mxpos = np.unravel_index(np.argmax(H),H.shape)
H = np.minimum(H, 100.)
d = 0.924*H**0.679 + 0.000499*np.exp(0.1*H) + 0.18*(21.1 + 273.15 - T)*(1 - np.exp(-0.115*H))
w = 0.618*H**0.753 + 0.000454*np.exp(0.1*H) + 0.18*(21.1 + 273.15 - T)*(1 - np.exp(-0.115*H))
d *= 0.01
w *= 0.01
# this is here to _ensure_ that drying equilibrium is always higher than (or equal to) wetting equilibrium
Ed = np.maximum(d, w)
Ew = np.minimum(d, w)
return Ed, Ew
def store_covariance_matrix(P, path):
"""
Store the grid of covariance matrices P in the netCDF file path.
"""
dom_shape = (P.shape[0], P.shape[1])
d = netCDF4.Dataset(path, 'w')
d.createDimension('fuel_moisture_classes_stag', 5)
d.createDimension('south_north', dom_shape[0])
d.createDimension('west_east', dom_shape[1])
Ps = d.createVariable('P', 'f4', ('fuel_moisture_classes_stag', 'fuel_moisture_classes_stag', 'south_north', 'west_east'))
Ps[:,:,:,:] = P.transpose((2,3,0,1))
d.close()
def execute_da_step(model, model_time, covariates, fm10):
"""
Execute a single DA step from the current state/extended parameters and covariance matrix using
the <covariates> and observations <fm10>. Assimilation time window is fixed at 60 mins.
"""
valid_times = [z for z in fm10.keys() if abs(total_seconds(z - model_time)) < 1800]
print('INFO: there are %d valid times at model time %s' % (len(valid_times), str(model_time)))
if len(valid_times) > 0:
# retrieve all observations for current time
obs_valid_now = []
for z in valid_times:
obs_valid_now.extend(fm10[z])
for o in obs_valid_now:
print o
fmc_gc = model.get_state()
dom_shape = fmc_gc.shape[:2]
# construct covariate storage
Xd3 = len(covariates) + 1
X = np.zeros((dom_shape[0], dom_shape[1], Xd3))
X[:,:,0] = fmc_gc[:,:,1]
for c,i in zip(covariates,np.arange(len(covariates))+1):
X[:,:,i] = covariates[i-1]
# run the trend surface model (clamp output to [0.0 - 2.5] to be safe)
Kf_fn, Vf_fn = fit_tsm(obs_valid_now, X)
Kf_fn[Kf_fn < 0.0] = 0.0
Kf_fn[Kf_fn > 2.5] = 2.5
# preallocate Kalman gain variable [not really used]
Kg = np.zeros((dom_shape[0], dom_shape[1], 5))
# run the data assimilation step now
print("Mean Kf: %g Vf: %g state[0]: %g state[1]: %g state[2]: %g\n" %
(np.mean(Kf_fn), np.mean(Vf_fn), np.mean(fmc_gc[:,:,0]), np.mean(fmc_gc[:,:,1]), np.mean(fmc_gc[:,:,2])))
model.kalman_update_single2(Kf_fn[:,:,np.newaxis], Vf_fn[:,:,np.newaxis,np.newaxis], 1, Kg)
print("Mean Kf: %g Vf: %g state[0]: %g state[1]: %g state[2]: %g\n" %
(np.mean(Kf_fn), np.mean(Vf_fn), np.mean(fmc_gc[:,:,0]), np.mean(fmc_gc[:,:,1]), np.mean(fmc_gc[:,:,2])))
def init_from_equilibrium(wrf_model, fm10, ts_now, cfg):
"""
Initialize from the wrf_model equilibrium.
"""
lat, lon = wrf_model.get_lats(), wrf_model.get_lons()
dom_shape = lat.shape
T2 = wrf_model['T2']
Q2 = wrf_model['Q2']
PSFC = wrf_model['PSFC']
hgt = wrf_model['HGT']
rain = wrf_model['RAIN']
rain = np.log(rain + 1.0)
constant = np.ones_like(T2)
Ed,Ew = compute_equilibria(T2,PSFC,Q2)
E = 0.5 * (Ed[0,:,:] + Ew[0,:,:])
P0 = np.diag(cfg['P0'])
Tk = np.array([1.0, 10.0, 100.0]) * 3600
model = GridMoistureModel(E[:,:,np.newaxis][:,:,np.zeros((3,),dtype=np.int)], Tk, P0)
# execute single DA step on the equilibrium with background covariance
covariates = [T2,PSFC,lon - np.mean(lon),lat - np.mean(lat),hgt - np.mean(hgt),
np.ones(dom_shape),rain]
execute_da_step(model, ts_now, covariates, fm10)
return model
def run_data_assimilation(wrf_model, fm10, ts_now, cfg):
lat, lon = wrf_model.get_lats(), wrf_model.get_lons()
dom_shape = lat.shape
T2 = wrf_model['T2']
Q2 = wrf_model['Q2']
PSFC = wrf_model['PSFC']
hgt = wrf_model['HGT']
rain = wrf_model['RAIN']
rain = np.log(rain + 1.0)
constant = np.ones_like(T2)
Ed,Ew = compute_equilibria(T2,PSFC,Q2)
E = 0.5 * (Ed[0,:,:] + Ew[0,:,:])
P0 = np.diag(cfg['P0'])
Tk = np.array([1.0, 10.0, 100.0]) * 3600
model = GridMoistureModel(E[:,:,np.newaxis][:,:,np.zeros((3,),dtype=np.int)], Tk, P0)
# try to find a stored covariance matrix
cov_path = os.path.join(os.path.dirname(cfg['wrf_output_prev'], 'P.nc'))
if os.path.exists(cov_path):
print('INFO: found stored covariance matrix, loading for init (also FMC_GC)...')
model.get_state()[:,:,:3] = wrf_model['FMC_GC'][0,:3,:,:].transpose((1,2,0))
model.get_state()[:,:,3:5] = wrf_model['FMEP'][0,:,:,:].transpose((1,2,0))
d = netCDF4.Dataset(cov_path)
model.get_state_covar()[:,:,:,:] = d.variables['P'][:,:,:,:]
else:
print('INFO: no covariance matrix found, intializing with background covariance.')
return 0
def run_module():
# read in configuration file to execute run
print("Reading configuration from [%s]" % sys.argv[1])
with open(sys.argv[1]) as f:
cfg = eval(f.read())
# init diagnostics
init_diagnostics(os.path.join(cfg['output_dir'], 'moisture_model_v1_diagnostics.txt'))
diagnostics().configure_tag("s2_eta_hat", True, True, True)
diagnostics().configure_tag("kriging_rmse", True, True, True)
diagnostics().configure_tag("kriging_beta", True, True, True)
diagnostics().configure_tag("kriging_iters", False, True, True)
diagnostics().configure_tag("kriging_subzero_s2_estimates", False, True, True)
# load the wrfinput file
wrfin = WRFModelData(cfg['wrf_input'], ['T2', 'Q2', 'PSFC', 'HGT', 'FMC_GC', 'FMEP'])
lat, lon = wrfin.get_lats(), wrfin.get_lons()
ts_now = wrfin['GMT'][0]
dom_shape = lat.shape
print('INFO: domain size is %d x %d grid points, wrfinput timestamp %s' % (dom_shape[0], dom_shape[1], str(ts_now)))
print('INFO: domain extent is lats (%g to %g) lons (%g to %g).' % (np.amin(lat),np.amax(lat),np.amin(lon),np.amax(lon)))
# compute the diagonal distance between grid points
grid_dist_km = great_circle_distance(lon[0,0], lat[0,0], lon[1,1], lat[1,1])
print('INFO: diagonal distance in grid is %g' % grid_dist_km)
# load observations but discard those too far away from the grid nodes
obss = load_raws_observations(cfg['observations'], lat, lon, grid_dist_km)
fm10 = build_observation_data(obss)
print('INFO: %d different time instances found in observations' % len(fm10))
# if a previous cycle is available (i.e. the wrfoutput is a valid file)
if os.path.exists(cfg['wrf_output_prev']) and check_overlap(cfg['wrf_output_prev'],ts_now):
# load the model as a wrfout with all default variables
wrfout = WRFModelData(cfg['wrf_output_prev'])
outts = wrfout['GMT']
print("INFO: previous forecast [%s - %s] exists, running DA till %s" % (str(outts[0]),str(outts[-1]),str(ts_now)))
# run from the start until now (retrieve fuel moisture, extended parameters, covariance matrix)
model = run_data_assimilation(wrfout, fm10, ts_now, cfg)
# store this for the current time instance (fm, ep in the wrfinput, P next to it)
d = netCDF4.Dataset(cfg['wrf_input'], 'r+')
d.variables['FMC_GC'] = fm
d.variables['FMEP'] = ep
d.close()
# store the covariance matrix alongside the wrfinput file
dir = os.path.dirname(wrfin)
store_covariance_matrix(P, os.path.join(dir, 'P.nc'))
else:
print("INFO: no previous forecast found, running DA from equilibrium at %s" % (str(ts_now)))
# initialize from weather equilibrium and perform one DA step
model = init_from_equilibrium(wrfin, fm10, ts_now, cfg)
# store result in wrfinput dataset
d = netCDF4.Dataset(cfg['wrf_input'], 'r+')
fmcep = model.get_state()
d.variables['FMC_GC'][0,:3,:,:] = fmcep[:,:,:3].transpose((2,0,1))
d.variables['FMEP'][0,:,:,:] = fmcep[:,:,3:5].transpose((2,0,1))
d.close()
store_covariance_matrix(model.get_state_covar(), os.path.join(os.path.dirname(cfg['wrf_input']), 'P.nc'))
return 0
if __name__ == '__main__':
# import profile
# import pstats
# profile.run('run_module(); print', 'spatial_model.stats')
# stats = pstats.Stats('spatial_model.stats')
# stats.strip_dirs()
# stats.sort_stats('cumulative')
# stats.print_stats()
if len(sys.argv) != 2:
print('USAGE: fmda_cycle.py <cfg-file>')
sys.exit(1)
run_module()
sys.exit(0)
|
|
#!/usr/bin/python
import common
#-----
import re
import os
import string
import sys
import time
import traceback
import ConfigParser
import StringIO
import inspect
#### Class Configuration ############################
class Configuration:
def __init__ (self, fileName):
self.cp = None
self.inifile = fileName
#-- check for duplicate sections in config file --
self.check_for_duplicate_sections()
try:
#-- check version of parser to use ---
if sys.version_info[0] >= 2 and sys.version_info[1] >= 3:
self.cp = ConfigParser.SafeConfigParser()
else:
self.cp = ConfigParser.ConfigParser()
#-- read the ini file ---
real_fp = open(self.inifile, 'r')
string_fp = StringIO.StringIO(real_fp.read())
self.cp.readfp(string_fp,self.inifile)
except Exception, e:
common.logerr("%s" % e)
#-- additional check for syntax errors --
self.syntax_check()
#----------------
def syntax_check(self):
""" Checks for some syntax errors in ini config file. """
for section in self.sections():
for option in self.options(section):
value = self.option_value(section,option)
if "\n" in value:
line = string.split(value,"\n")
common.logerr("Section [%s]: this line starts with whitespace ( %s)\n Please remove the leading space or comment (;) the line." % (section,line[1]))
#----------------
def __str__ (self):
result = []
result.append('<Configuration from %s>' % self.inifile)
for section in self.sections():
result.append('[%s]' % section)
for option in self.options(section):
value = self.option_value(section, option)
result.append(' %-25s %s' % (option, value))
return '\n'.join(result)
#----------------
def check_for_duplicate_sections(self):
""" Check for duplicate sections in a config file ignoring commented ones.
In addition, checks to verify there is no whitespace preceding or
appending the section name in brackets as the ini parser does not
validate for this.
"""
if (self.inifile == "") or (self.inifile is None):
common.logerr("System error: config file name is empty")
try:
fp = open(self.inifile, 'r')
except:
common.logerr("Problem reading ini file: %s" % sys.exc_info()[1])
sections = {} # used to keep track of sections
duplicates = [] # used to identify duplicates
for line in fp.readlines():
newline = line.strip()
if len(newline) == 0:
continue
if newline[0] != "[":
continue
#--- see if it is a section ---
match = re.search('\[(.*)\]', newline)
if match:
section = match.group(1).lower().strip()
if section in sections:
duplicates.append(section)
continue
sections[section] = True
if (len(duplicates) <> 0 ):
common.logerr("Duplicate sections in %s - %s" % (self.inifile,duplicates))
#----------------
def validate_section(self,section,valid_option_list):
if not self.has_section(section):
common.logerr("Section (%s) does not exist in ini file (%s)" % (section,self.inifile))
errors = []
for option in valid_option_list:
if self.has_option(section,option):
continue
errors.append(option)
if len(errors) > 0:
common.logerr("These options are not defined in the %s section of the ini file: %s" % (section,errors))
#----------------
def section_options(self):
result = []
for section in self.sections():
result.append('[%s]' % section)
for option in self.options(section):
result.append(' %-25s' % (option))
return '\n'.join(result)
#----------------
def filename(self):
return self.inifile
#----------------
def sections(self):
sections = list(self.cp.sections())
sections.sort()
return sections
#----------------
def options(self,section):
options = self.cp.options(section)
options.sort()
return options
#----------------
def option_value(self,section,option):
""" Due they way python os.path.basename/dirname work, we cannot let a
pathname end in a '/' or we may see inconsistent results. So we
are stripping all option values of trailing '/'s.
"""
value = ""
if self.has_option(section,option):
try:
value = self.cp.get(section,option)
except Exception,e:
common.logerr("ini file error: %s" % e.__str__())
#-- cannot let paths end in a '/' --
while len(value) > 0 and value[len(value)-1] == "/":
value = value[0:len(value)-1].strip()
return value
#----------------
def has_section(self, section):
return self.cp.has_section(section)
#----------------
def has_option(self, section,option):
return self.cp.has_option(section,option)
#----------------
def delete_section(self, section):
self.cp.remove_section(section)
return
#### Exceptions #####################################
class ConfigurationError(Exception):
pass
class UsageError(Exception):
pass
#####################################################
#---------------------
#---- Functions ------
#---------------------
def compare_ini_files (file_1, file_2, no_local_settings):
try:
print "Comparing ini files: %s / %s" % (file_1,file_2)
ini_1 = Configuration(file_1)
ini_2 = Configuration(file_2)
rtn = 0
#--- remove the Local Settings section conditionally ---
if ( no_local_settings ):
ini_1.delete_section("Local Settings")
ini_2.delete_section("Local Settings")
print "... Checking section information:"
if ( ini_1.sections() == ini_2.sections() ):
print "... sections are identical"
else:
print "... WARNING: section information differs"
compare_sections(ini_1,ini_2)
compare_sections(ini_2,ini_1)
rtn = 1
print
print "... Checking section/object information:"
if ( ini_1.section_options() == ini_2.section_options() ):
print "... all section/objects are identical"
else:
print "... WARNING: section/object information differs"
compare_options(ini_1,ini_2)
compare_options(ini_2,ini_1)
rtn = 1
except:
raise
return rtn
#--------------------------------
def compare_sections(ini1,ini2):
print """
... Sections in %s
NOT FOUND in %s""" % (ini1.filename(),ini2.filename())
for section in ini1.sections():
if ( ini2.has_section(section) ):
continue
else:
print " %s" % (section)
#--------------------------------
def compare_options(ini1,ini2):
print """
... Section/objects in %s
NOT FOUND in %s""" % (ini1.filename(),ini2.filename())
for section in ini1.sections():
for option in ini1.options(section):
## if (section == "SE CHANGEME"):
## if (option == "enable"):
## print section,option
if ( ini2.has_option(section,option) == False):
print " %-20s/%s" % (section,option)
#--------------------------------
def usage(pgm):
print
print "Usage: " + pgm + " --compare file1 file2"
print " " + pgm + " --show-options file"
print " " + pgm + " --show-values file"
print " " + pgm + " --validate file"
print " " + pgm + " --help | -h "
print """
compare .......... Shows the differences between the
section/objects (not values) of the 2 ini files
returns 0 if identical
returns 1 if any differences
show-options ...... Shows the section/objects for the ini file
show-values ....... Shows the section/objects/values for the ini file
validate .......... Verifies the ini file has no syntax errors
Full path to the files must be specified unless this is executed
in the directory in which they reside.
"""
#----------------------------------
def run_unit_tests(pgm):
try:
dir="./testdata/"
tests = {
"no arguments" : [1,pgm],
"not enough arguments": [1,pgm,"--validate"],
"not enough arguments": [1,pgm,"--compare"],
"invalid argument" : [1,pgm,"--bad-arg",dir+"non-existent-file"],
## validate ###
"validate: good ini" : [0,pgm,"--validate",dir+"config-good.ini"],
"validate: bad ini" : [1,pgm,"--validate",dir+"config-bad.ini"],
"validate: no ini" : [1,pgm,"--validate",dir+"non-existent-file"],
"duplicates" : [1,pgm,"--validate",dir+"config-w-dup-sections.ini"],
## compare ###
"compare: no difference" : [0,pgm,"--compare",dir+"config-good.ini",dir+"config-good.ini"],
"compare: differences" : [1,pgm,"--compare",dir+"config-good.ini",dir+"config-good-different.ini"],
"compare: no ini" : [1,pgm,"--validate",dir+"non-existent-file"],
"duplicates" : [1,pgm,"--compare",dir+"config-good.ini config-w-dup-sections.ini"],
## show-options ###
"show-options" : [0,pgm,"--show-options",dir+"config-good.ini"],
"show-options: bad ini" : [1,pgm,"--show-options",dir+"config-bad.ini"],
"show-options: no ini" : [1,pgm,"--show-options",dir+"non-existent-file"],
"duplicates" : [1,pgm,"--show-options",dir+"config-w-dup-sections.ini"],
## show-values ###
"show-values" : [0,pgm,"--show-values",dir+"config-good.ini"],
"show-values: bad ini" : [1,pgm,"--show-values",dir+"config-bad.ini"],
"show-values: no ini" : [1,pgm,"--show-values",dir+"non-existent-file"],
"duplicates" : [1,pgm,"--show-values",dir+"config-w-dup-sections.ini"],
}
#---- run tests -----
n=0
for test in tests.keys():
n = n + 1
args = tests[test]
expected_rtn = args[0]
print "-----------------------------------------------------------------"
print "-- Test %d: %s" % (n,test)
print "-- ",args[1:]
print "-- Expected return code: %s" % expected_rtn
rtn = main(args[1:])
print "-- Return code: %s" % rtn
if ( rtn == expected_rtn ):
print "-- Test %d: %s - SUCCESSFUL" % (n,test)
print "-----------------------------------------------------------------"
else:
raise ConfigurationError("-- Test %d: %s - FAILED" % (n,test))
except:
raise
print "**********## All %d tests passed ***************" % n
#--------------------
def validate_args(opts,expected_args):
if ( len(opts) < expected_args ):
raise UsageError("Insufficient number of arguments for option selected")
#---------------------
def show_line():
x = traceback.extract_tb(sys.exc_info()[2])
z = x[len(x)-1]
return "%s line %s" % (z[2],z[1])
############################# Main Program ##############################
def main(opts=None):
try:
#--- process command line arguments ---
##print len(opts)
validate_args(opts,2)
opt = opts[1]
if (opt == "-h") or (opt == "--help"):
usage(opts[0]);return 1
elif (opt == "--compare") or (opt == "-compare"):
validate_args(opts,4)
return compare_ini_files(opts[2],opts[3],False)
elif (opt == "--compare-no-local") or (opt == "-compare-no-local"):
validate_args(opts,4)
return compare_ini_files(opts[2],opts[3],True)
elif (opt == "--show-options") or (opt == "-show-options"):
validate_args(opts,3)
ini = Configuration(opts[2])
print ini.section_options()
elif (opt == "--show-values") or (opt == "-show-values"):
validate_args(opts,3)
ini = Configuration(opts[2])
print ini
validate_args(opts,3)
elif (opt == "--validate") or (opt == "-validate"):
ini = Configuration(opts[2])
print "... configuration ini file has no syntax errors"
elif (opt == "--test") or (opt == "-test"):
run_unit_tests(opts[0])
else:
raise UsageError("Invalid command line option")
except ConfigurationError, e:
print;print "Configuration ERROR: %s" % e;return 1
except UsageError, e:
usage(opts[0])
print "Usage ERROR: %s" % e;return 1
except Exception, e:
print;print "Exception ERROR: %s - %s" % (show_line(),e);return 1
return 0
#--------------------------
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.base_model.gae_models."""
from __future__ import annotations
import datetime
import re
import types
from core import feconf
from core import python_utils
from core.constants import constants
from core.platform import models
from core.tests import test_utils
from typing import Dict, List, Set, Union, cast
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class BaseModelUnitTests(test_utils.GenericTestBase):
"""Test the generic base model."""
def tearDown(self) -> None:
"""Deletes all model entities."""
for entity in base_models.BaseModel.get_all():
entity.delete()
super(BaseModelUnitTests, self).tearDown()
def test_get_deletion_policy(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The get_deletion_policy() method is missing from the '
'derived class. It should be implemented in the '
'derived class.')):
base_models.BaseModel.get_deletion_policy()
def test_has_reference_to_user_id(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The has_reference_to_user_id() method is missing from the '
'derived class. It should be implemented in the '
'derived class.')):
base_models.BaseModel.has_reference_to_user_id('user_id')
def test_error_cases_for_get_method(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
base_models.BaseModel.EntityNotFoundError,
'Entity for class BaseModel with id Invalid id not found'):
base_models.BaseModel.get('Invalid id')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
base_models.BaseModel.EntityNotFoundError,
'Entity for class BaseModel with id Invalid id not found'):
base_models.BaseModel.get('Invalid id', strict=True)
self.assertIsNone(
base_models.BaseModel.get('Invalid id', strict=False))
def test_base_model_export_data_raises_not_implemented_error(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The export_data() method is missing from the '
'derived class. It should be implemented in the '
'derived class.')):
base_models.BaseModel.export_data('')
def test_get_model_association_to_user_raises_not_implemented_error(
self
) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The get_model_association_to_user() method is missing from '
'the derived class. It should be implemented in the '
'derived class.')):
base_models.BaseModel.get_model_association_to_user()
def test_export_data(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The export_data() method is missing from the derived '
'class. It should be implemented in the derived class.')):
base_models.BaseModel.export_data('user_id')
def test_generic_query_put_get_and_delete_operations(self) -> None:
model = base_models.BaseModel()
all_models = base_models.BaseModel.get_all()
self.assertEqual(all_models.count(), 0)
model.update_timestamps()
model.put()
all_models = base_models.BaseModel.get_all()
self.assertEqual(all_models.count(), 1)
base_model = all_models.get()
# Ruling out the possibility of None for mypy type checking.
assert base_model is not None
self.assertEqual(base_model, model)
model_id = base_model.id
self.assertEqual(model, base_models.BaseModel.get(model_id))
model.delete()
all_models = base_models.BaseModel.get_all()
self.assertEqual(all_models.count(), 0)
self.assertEqual(model_id, 4)
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError,
'Entity for class BaseModel with id 4 not found'
):
model.get(model_id)
def test_put(self) -> None:
model = base_models.BaseModel()
self.assertIsNone(model.created_on)
self.assertIsNone(model.last_updated)
# Field last_updated will get updated anyway because it is None.
model.update_timestamps(update_last_updated_time=False)
model.put()
model_id = model.id
self.assertIsNotNone(
base_models.BaseModel.get_by_id(model_id).created_on)
self.assertIsNotNone(
base_models.BaseModel.get_by_id(model_id).last_updated)
last_updated = model.last_updated
# Field last_updated won't get updated because update_last_updated_time
# is set to False and last_updated already has some value.
model.update_timestamps(update_last_updated_time=False)
model.put()
self.assertEqual(
base_models.BaseModel.get_by_id(model_id).last_updated,
last_updated)
# Field last_updated will get updated because update_last_updated_time
# is set to True (by default).
model.update_timestamps()
model.put()
self.assertNotEqual(
base_models.BaseModel.get_by_id(model_id).last_updated,
last_updated)
def test_put_without_update_timestamps(self) -> None:
model = base_models.BaseModel()
self.assertIsNone(model.created_on)
self.assertIsNone(model.last_updated)
# First `put` does not raise an Exception because it sets last_updated
# automatically since it is None.
model.put()
# Immediately calling `put` again fails, because update_timestamps needs
# to be called first.
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, re.escape('did not call update_timestamps()')
):
model.put()
model = base_models.BaseModel.get_by_id(model.id)
# Getting a fresh model requires update_timestamps too.
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, re.escape('did not call update_timestamps()')
):
model.put()
model.update_timestamps()
# OK, update_timestamps called before put.
model.put()
def test_put_multi(self) -> None:
models_1 = [base_models.BaseModel() for _ in range(3)]
for model in models_1:
self.assertIsNone(model.created_on)
self.assertIsNone(model.last_updated)
# Field last_updated will get updated anyway because it is None.
base_models.BaseModel.update_timestamps_multi(
models_1, update_last_updated_time=False)
base_models.BaseModel.put_multi(models_1)
model_ids = [model.id for model in models_1]
last_updated_values = []
for model_id in model_ids:
model = base_models.BaseModel.get_by_id(model_id)
self.assertIsNotNone(model.created_on)
self.assertIsNotNone(model.last_updated)
last_updated_values.append(model.last_updated)
# Field last_updated won't get updated because update_last_updated_time
# is set to False and last_updated already has some value.
models_2_without_none = cast(
List[base_models.BaseModel],
base_models.BaseModel.get_multi(model_ids)
)
base_models.BaseModel.update_timestamps_multi(
models_2_without_none, update_last_updated_time=False)
base_models.BaseModel.put_multi(models_2_without_none)
for model_id, last_updated in python_utils.ZIP(
model_ids, last_updated_values):
model = base_models.BaseModel.get_by_id(model_id)
self.assertEqual(model.last_updated, last_updated)
# Field last_updated will get updated because update_last_updated_time
# is set to True (by default).
models_3_without_none = cast(
List[base_models.BaseModel],
base_models.BaseModel.get_multi(model_ids)
)
base_models.BaseModel.update_timestamps_multi(models_3_without_none)
base_models.BaseModel.put_multi(models_3_without_none)
for model_id, last_updated in python_utils.ZIP(
model_ids, last_updated_values):
model = base_models.BaseModel.get_by_id(model_id)
self.assertNotEqual(model.last_updated, last_updated)
def test_get_multi(self) -> None:
model1 = base_models.BaseModel()
model2 = base_models.BaseModel()
model3 = base_models.BaseModel()
model2.deleted = True
model1.update_timestamps()
model1.put()
model2.update_timestamps()
model2.put()
model3.update_timestamps()
model3.put()
model1_id = model1.id
model2_id = model2.id
model3_id = model3.id
# For all the None ids, get_multi should return None at the appropriate
# position.
result = base_models.BaseModel.get_multi(
[model1_id, model2_id, None, model3_id, 'none', None])
self.assertEqual(result, [model1, None, None, model3, None, None])
def test_delete_multi(self) -> None:
model1 = base_models.BaseModel()
model2 = base_models.BaseModel()
model3 = base_models.BaseModel()
model2.deleted = True
model1.update_timestamps()
model1.put()
model2.update_timestamps()
model2.put()
model3.update_timestamps()
model3.put()
model1_id = model1.id
model2_id = model2.id
model3_id = model3.id
base_models.BaseModel.delete_multi([model1, model2, model3])
result = base_models.BaseModel.get_multi([
model1_id, model2_id, model3_id])
self.assertEqual(result, [None, None, None])
def test_get_new_id_method_returns_unique_ids(self) -> None:
ids: Set[str] = set([])
for _ in range(100):
new_id = base_models.BaseModel.get_new_id('')
self.assertNotIn(new_id, ids)
base_models.BaseModel(id=new_id).put()
ids.add(new_id)
class TestBaseHumanMaintainedModel(base_models.BaseHumanMaintainedModel):
"""Model that inherits the BaseHumanMaintainedModel for testing."""
pass
class BaseHumanMaintainedModelTests(test_utils.GenericTestBase):
"""Test the generic base human maintained model."""
MODEL_ID = 'model1'
def setUp(self) -> None:
super(BaseHumanMaintainedModelTests, self).setUp()
self.model_instance = TestBaseHumanMaintainedModel(id=self.MODEL_ID)
def mock_put(self: base_models.BaseHumanMaintainedModel) -> None:
"""Function to modify and save the entities used for testing
to the datastore.
"""
self._last_updated_timestamp_is_fresh = True
self.last_updated_by_human = datetime.datetime.utcnow()
# These if conditions can be removed once the auto_now property
# is set True to these attributes.
if self.created_on is None:
self.created_on = datetime.datetime.utcnow()
if self.last_updated is None:
self.last_updated = datetime.datetime.utcnow()
# We are using BaseModel.put() to save the changes to the datastore
# since the put() method which TestBaseHumanMaintainedModel class
# inherits from BaseHumanMaintainedModel raises NotImplementedError,
# and we do actually want to save the changes in this case.
base_models.BaseModel.put(self)
with self.swap(TestBaseHumanMaintainedModel, 'put', mock_put):
self.model_instance.put()
def test_put(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError, 'Use put_for_human or put_for_bot instead'):
self.model_instance.put()
def test_put_for_human(self) -> None:
previous_last_updated_by_human = (
self.model_instance.last_updated_by_human)
self.model_instance.update_timestamps()
self.model_instance.put_for_human()
self.assertNotEqual(
previous_last_updated_by_human,
self.model_instance.last_updated_by_human)
def test_put_for_bot(self) -> None:
previous_last_updated_by_human = (
self.model_instance.last_updated_by_human)
self.model_instance.update_timestamps()
self.model_instance.put_for_bot()
self.assertEqual(
previous_last_updated_by_human,
self.model_instance.last_updated_by_human)
def test_put_multi(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
'Use put_multi_for_human or put_multi_for_bot instead'):
TestBaseHumanMaintainedModel.put_multi([])
def test_put_multi_for_human(self) -> None:
previous_last_updated_by_human = (
self.model_instance.last_updated_by_human)
self.model_instance.update_timestamps()
TestBaseHumanMaintainedModel.put_multi_for_human(
[self.model_instance])
self.assertNotEqual(
previous_last_updated_by_human,
self.model_instance.last_updated_by_human)
def test_put_multi_for_bot(self) -> None:
previous_last_updated_by_human = (
self.model_instance.last_updated_by_human)
self.model_instance.update_timestamps()
TestBaseHumanMaintainedModel.put_multi_for_bot(
[self.model_instance])
self.assertEqual(
previous_last_updated_by_human,
self.model_instance.last_updated_by_human)
class TestSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Model that inherits the BaseSnapshotMetadataModel for testing."""
pass
class TestSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Model that inherits the BaseSnapshotContentModel for testing."""
pass
class TestCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Model that inherits the BaseCommitLogEntryModel for testing."""
@classmethod
def get_instance_id(
cls,
target_entity_id: str,
version: Union[int, str]
) -> str:
"""A function that returns the id of the log in BaseCommitLogEntryModel.
Args:
target_entity_id: str. The id of the mock entity used.
version: int. The version of the model after the commit.
Returns:
str. The commit id with the target entity id and version number.
"""
return 'entity-%s-%s' % (target_entity_id, version)
class TestVersionedModel(base_models.VersionedModel):
"""Model that inherits the VersionedModel for testing."""
SNAPSHOT_METADATA_CLASS = TestSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TestSnapshotContentModel
COMMIT_LOG_ENTRY_CLASS = TestCommitLogEntryModel
class BaseCommitLogEntryModelTests(test_utils.GenericTestBase):
def test_get_deletion_policy_is_locally_pseudonymize(self) -> None:
self.assertEqual(
base_models.BaseCommitLogEntryModel.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_get_model_association_to_user_is_not_corresponding_to_user(
self
) -> None:
self.assertEqual(
base_models.BaseCommitLogEntryModel.get_model_association_to_user(),
base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER)
def test_base_class_get_instance_id_raises_not_implemented_error(
self
) -> None:
# Raise NotImplementedError as _get_instance_id is to be overwritten
# in child classes of BaseCommitLogEntryModel.
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The get_instance_id() method is missing from the derived '
'class. It should be implemented in the derived class.')):
base_models.BaseCommitLogEntryModel.get_commit('id', 1)
class BaseSnapshotMetadataModelTests(test_utils.GenericTestBase):
def test_has_reference_to_user_id(self) -> None:
model1 = base_models.BaseSnapshotMetadataModel(
id='model_id-1',
committer_id='committer_id',
commit_type='create',
commit_cmds_user_ids=[
'commit_cmds_user_1_id', 'commit_cmds_user_2_id'],
content_user_ids=['content_user_1_id', 'content_user_2_id'])
model1.update_timestamps()
model1.put()
self.assertTrue(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('committer_id'))
self.assertTrue(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('commit_cmds_user_1_id'))
self.assertTrue(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('commit_cmds_user_2_id'))
self.assertTrue(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('content_user_1_id'))
self.assertTrue(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('content_user_2_id'))
self.assertFalse(
base_models.BaseSnapshotMetadataModel
.has_reference_to_user_id('x_id'))
def test_get_version_string(self) -> None:
model1 = base_models.BaseSnapshotMetadataModel(
id='model_id-1', committer_id='committer_id', commit_type='create')
model1.update_timestamps()
model1.put()
self.assertEqual(model1.get_version_string(), '1')
def test_get_unversioned_instance_id(self) -> None:
model1 = base_models.BaseSnapshotMetadataModel(
id='model_id-1', committer_id='committer_id', commit_type='create')
model1.update_timestamps()
model1.put()
self.assertEqual(model1.get_unversioned_instance_id(), 'model_id')
def test_export_data_trivial(self) -> None:
user_data = (
base_models.BaseSnapshotMetadataModel.export_data('trivial_user'))
expected_data: Dict[str, str] = {}
self.assertEqual(user_data, expected_data)
def test_export_data_nontrivial(self) -> None:
version_model = TestVersionedModel(id='version_model')
model1 = version_model.SNAPSHOT_METADATA_CLASS.create(
'model_id-1', 'committer_id', 'create', None, None)
model1.update_timestamps()
model1.put()
model2 = version_model.SNAPSHOT_METADATA_CLASS.create(
'model_id-2', 'committer_id', 'create', 'Hi this is a commit.',
[{'cmd': 'some_command'}, {'cmd2': 'another_command'}])
model2.update_timestamps()
model2.put()
user_data = (
version_model.SNAPSHOT_METADATA_CLASS.export_data('committer_id'))
expected_data = {
'model_id-1': {
'commit_type': 'create',
'commit_message': None,
},
'model_id-2': {
'commit_type': 'create',
'commit_message': 'Hi this is a commit.',
}
}
self.assertEqual(user_data, expected_data)
class BaseSnapshotContentModelTests(test_utils.GenericTestBase):
def test_get_version_string(self) -> None:
model1 = base_models.BaseSnapshotContentModel(id='model_id-1')
model1.update_timestamps()
model1.put()
self.assertEqual(model1.get_version_string(), '1')
def test_get_unversioned_instance_id(self) -> None:
model1 = base_models.BaseSnapshotContentModel(id='model_id-1')
model1.update_timestamps()
model1.put()
self.assertEqual(model1.get_unversioned_instance_id(), 'model_id')
class CommitLogEntryModelTests(test_utils.GenericTestBase):
"""Test methods for CommitLogEntryModel."""
def test_get_commit(self) -> None:
model1 = TestCommitLogEntryModel.create(
entity_id='id', committer_id='user',
commit_cmds={}, commit_type='create',
commit_message='New commit created.', version=1,
status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False
)
model1.update_timestamps()
model1.put()
test_model = TestCommitLogEntryModel.get_commit('id', 1)
# Ruling out the possibility of None for mypy type checking.
assert test_model is not None
self.assertEqual(test_model.version, 1)
self.assertEqual(test_model.user_id, 'user')
self.assertEqual(test_model.commit_type, 'create')
self.assertEqual(
test_model.post_commit_status, constants.ACTIVITY_STATUS_PUBLIC)
self.assertEqual(test_model.post_commit_community_owned, False)
self.assertEqual(test_model.post_commit_is_private, False)
def test_get_all_commits(self) -> None:
model1 = TestCommitLogEntryModel.create(
entity_id='id', committer_id='user',
commit_cmds={}, commit_type='create',
commit_message='New commit created.', version=1,
status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False
)
model2 = TestCommitLogEntryModel.create(
entity_id='id', committer_id='user',
commit_cmds={}, commit_type='edit',
commit_message='New commit created.', version=2,
status=constants.ACTIVITY_STATUS_PUBLIC, community_owned=False
)
model1.update_timestamps()
model1.put()
model2.update_timestamps()
model2.put()
test_models = TestCommitLogEntryModel.get_all_commits(2, None)
self.assertEqual(test_models[0][0].version, 2)
self.assertEqual(test_models[0][1].version, 1)
self.assertEqual(test_models[0][0].commit_type, 'edit')
self.assertEqual(test_models[0][1].commit_type, 'create')
class VersionedModelTests(test_utils.GenericTestBase):
"""Test methods for VersionedModel."""
def test_retrieval_of_multiple_version_models_for_fake_id(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
ValueError, 'The given entity_id fake_id is invalid'):
TestVersionedModel.get_multi_versions(
'fake_id', [1, 2, 3])
def test_commit_with_model_instance_deleted_raises_error(self) -> None:
model1 = TestVersionedModel(id='model_id1')
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'This model instance has been deleted.'):
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
def test_trusted_commit_with_no_snapshot_metadata_raises_error(
self
) -> None:
model1 = TestVersionedModel(id='model_id1')
# TODO(#13528): Remove this test after the backend is fully
# type-annotated. Here ignore[assignment] is used to test method
# commit() for invalid SNAPSHOT_METADATA_CLASS.
model1.SNAPSHOT_METADATA_CLASS = None # type: ignore[assignment]
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'No snapshot metadata class defined.'):
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1 = TestVersionedModel(id='model_id1')
# TODO(#13528): Remove this test after the backend is fully
# type-annotated. Here ignore[assignment] is used to test method
# commit() for invalid SNAPSHOT_CONTENT_CLASS.
model1.SNAPSHOT_CONTENT_CLASS = None # type: ignore[assignment]
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'No snapshot content class defined.'):
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1 = TestVersionedModel(id='model_id1')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Expected commit_cmds to be a list of dicts, received'):
# TODO(#13528): Remove this test after the backend is fully
# type-annotated. Here ignore[arg-type] is used to test method
# commit() for invalid input type.
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', {}) # type: ignore[arg-type]
model1 = TestVersionedModel(id='model_id1')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Expected commit_cmds to be a list of dicts, received'):
# TODO(#13528): Remove this test after the backend is fully
# type-annotated. Here ignore[list-item] is used to test method
# commit() for invalid input type.
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [[]]) # type: ignore[list-item]
def test_put_raises_not_implemented_error_for_versioned_models(
self
) -> None:
model1 = TestVersionedModel(id='model_id1')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
NotImplementedError,
re.escape(
'The put() method is missing from the derived '
'class. It should be implemented in the derived class.')):
model1.update_timestamps()
model1.put()
def test_force_deletion(self) -> None:
model_id = 'model_id'
model = TestVersionedModel(id=model_id)
model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_version_numbers = range(1, model.version + 1)
model_snapshot_ids = [
model.get_snapshot_id(model.id, version_number)
for version_number in model_version_numbers]
model.delete(
feconf.SYSTEM_COMMITTER_ID, 'commit_msg', force_deletion=True)
self.assertIsNone(TestVersionedModel.get_by_id(model_id))
for model_snapshot_id in model_snapshot_ids:
self.assertIsNone(
TestSnapshotContentModel.get_by_id(model_snapshot_id))
self.assertIsNone(
TestSnapshotMetadataModel.get_by_id(model_snapshot_id))
def test_delete_multi(self) -> None:
model_1_id = 'model_1_id'
model_1 = TestVersionedModel(id=model_1_id)
model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_1.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_1_version_numbers = range(1, model_1.version + 1)
model_1_snapshot_ids = [
model_1.get_snapshot_id(model_1.id, version_number)
for version_number in model_1_version_numbers]
model_2_id = 'model_2_id'
model_2 = TestVersionedModel(id=model_2_id)
model_2.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_2.commit(feconf.SYSTEM_COMMITTER_ID, 'commit_msg', [])
model_2_version_numbers = range(1, model_2.version + 1)
model_2_snapshot_ids = [
model_2.get_snapshot_id(model_2.id, version_number)
for version_number in model_2_version_numbers]
with self.swap(feconf, 'MAX_NUMBER_OF_OPS_IN_TRANSACTION', 2):
TestVersionedModel.delete_multi(
[model_1_id, model_2_id],
feconf.SYSTEM_COMMITTER_ID,
'commit_msg',
force_deletion=True)
self.assertIsNone(TestVersionedModel.get_by_id(model_1_id))
for model_snapshot_id in model_1_snapshot_ids:
self.assertIsNone(
TestSnapshotContentModel.get_by_id(model_snapshot_id))
self.assertIsNone(
TestSnapshotMetadataModel.get_by_id(model_snapshot_id))
self.assertIsNone(TestVersionedModel.get_by_id(model_2_id))
for model_snapshot_id in model_2_snapshot_ids:
self.assertIsNone(
TestSnapshotContentModel.get_by_id(model_snapshot_id))
self.assertIsNone(
TestSnapshotMetadataModel.get_by_id(model_snapshot_id))
def test_commit_with_invalid_change_list_raises_error(self) -> None:
model1 = TestVersionedModel(id='model_id1')
# Test for invalid commit command.
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Invalid commit_cmd:'):
model1.commit(
feconf.SYSTEM_COMMITTER_ID, '', [{'invalid_cmd': 'value'}])
# Test for invalid change list command.
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Invalid change list command:'):
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [{'cmd': 'AUTO'}])
def test_revert_raises_error_when_not_allowed(self) -> None:
model1 = TestVersionedModel(id='model_id1')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception,
'Reverting objects of type TestVersionedModel is not allowed.'):
model1.revert(model1, feconf.SYSTEM_COMMITTER_ID, '', 1)
def test_get_snapshots_metadata_with_invalid_model_raises_error(
self
) -> None:
model1 = TestVersionedModel(id='model_id1')
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception,
'Invalid version number 10 for model TestVersionedModel with id '
'model_id1'):
model1.get_snapshots_metadata('model_id1', [10])
def test_get_version(self) -> None:
model1 = TestVersionedModel(id='model_id1')
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
version_model = TestVersionedModel.get_version('model_id1', 2)
# Ruling out the possibility of None for mypy type checking.
assert version_model is not None
self.assertEqual(version_model.version, 2)
version_model = (
TestVersionedModel.get_version('nonexistent_id1', 4, strict=False))
self.assertIsNone(version_model)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
base_models.BaseModel.EntityNotFoundError,
'Entity for class TestVersionedModel with id nonexistent_id1 '
'not found'):
TestVersionedModel.get_version('nonexistent_id1', 4, strict=True)
version_model = (
TestVersionedModel.get_version('model_id1', 4, strict=False))
self.assertIsNone(version_model)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
base_models.BaseModel.EntityNotFoundError,
'Entity for class TestSnapshotContentModel with id model_id1-4 '
'not found'):
TestVersionedModel.get_version('model_id1', 4, strict=True)
def test_get_multi_versions(self) -> None:
model1 = TestVersionedModel(id='model_id1')
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
models_by_version = TestVersionedModel.get_multi_versions(
'model_id1', [1, 2])
self.assertEqual(len(models_by_version), 2)
self.assertEqual(models_by_version[0].version, 1)
self.assertEqual(models_by_version[1].version, 2)
def test_get_multi_versions_errors(self) -> None:
model1 = TestVersionedModel(id='model_id1')
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
model1.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
ValueError,
'Requested version number 3 cannot be higher than the current '
'version number 2.'):
TestVersionedModel.get_multi_versions('model_id1', [1, 2, 3])
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
ValueError,
'At least one version number is invalid'):
# TODO(#13528): Remove this test after the backend is fully
# type-annotated. Here ignore[list-item] is used to test method
# get_multi_versions() for invalid input type.
TestVersionedModel.get_multi_versions('model_id1', [1, 1.5, 2]) # type: ignore[list-item]
class TestBaseModel(base_models.BaseModel):
"""Model that inherits BaseModel for testing. This is required as BaseModel
gets subclassed a lot in other tests and that can create unexpected errors.
"""
pass
class BaseModelTests(test_utils.GenericTestBase):
def test_create_raises_error_when_many_id_collisions_occur(self) -> None:
# Swap dependent method get_by_id to simulate collision every time.
get_by_id_swap = self.swap(
TestBaseModel, 'get_by_id', types.MethodType(
lambda _, __: True, TestBaseModel))
assert_raises_regexp_context_manager = self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'New id generator is producing too many collisions.')
with assert_raises_regexp_context_manager, get_by_id_swap:
TestBaseModel.get_new_id('exploration')
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
import six
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _LE
ALIAS = 'extensions'
LOG = logging.getLogger(__name__)
authorize = extensions.os_compute_authorizer(ALIAS)
# NOTE(cyeoh): The following mappings are currently incomplete
# Having a v2.1 extension loaded can imply that several v2 extensions
# should also appear to be loaded (although they no longer do in v2.1)
v21_to_v2_extension_list_mapping = {
'os-quota-sets': [{'name': 'UserQuotas', 'alias': 'os-user-quotas'},
{'name': 'ExtendedQuotas',
'alias': 'os-extended-quotas'}],
'os-cells': [{'name': 'CellCapacities', 'alias': 'os-cell-capacities'}],
'os-baremetal-nodes': [{'name': 'BareMetalExtStatus',
'alias': 'os-baremetal-ext-status'}],
'os-block-device-mapping': [{'name': 'BlockDeviceMappingV2Boot',
'alias': 'os-block-device-mapping-v2-boot'}],
'os-cloudpipe': [{'name': 'CloudpipeUpdate',
'alias': 'os-cloudpipe-update'}],
'servers': [{'name': 'Createserverext', 'alias': 'os-create-server-ext'},
{'name': 'ExtendedIpsMac', 'alias': 'OS-EXT-IPS-MAC'},
{'name': 'ExtendedIps', 'alias': 'OS-EXT-IPS'},
{'name': 'ServerListMultiStatus',
'alias': 'os-server-list-multi-status'},
{'name': 'ServerSortKeys', 'alias': 'os-server-sort-keys'},
{'name': 'ServerStartStop', 'alias': 'os-server-start-stop'}],
'flavors': [{'name': 'FlavorDisabled', 'alias': 'OS-FLV-DISABLED'},
{'name': 'FlavorExtraData', 'alias': 'OS-FLV-EXT-DATA'},
{'name': 'FlavorSwap', 'alias': 'os-flavor-swap'}],
'os-services': [{'name': 'ExtendedServicesDelete',
'alias': 'os-extended-services-delete'},
{'name': 'ExtendedServices', 'alias':
'os-extended-services'}],
'os-evacuate': [{'name': 'ExtendedEvacuateFindHost',
'alias': 'os-extended-evacuate-find-host'}],
'os-floating-ips': [{'name': 'ExtendedFloatingIps',
'alias': 'os-extended-floating-ips'}],
'os-hypervisors': [{'name': 'ExtendedHypervisors',
'alias': 'os-extended-hypervisors'},
{'name': 'HypervisorStatus',
'alias': 'os-hypervisor-status'}],
'os-networks': [{'name': 'ExtendedNetworks',
'alias': 'os-extended-networks'}],
'os-rescue': [{'name': 'ExtendedRescueWithImage',
'alias': 'os-extended-rescue-with-image'}],
'os-extended-status': [{'name': 'ExtendedStatus',
'alias': 'OS-EXT-STS'}],
'os-used-limits': [{'name': 'UsedLimitsForAdmin',
'alias': 'os-used-limits-for-admin'}],
'os-volumes': [{'name': 'VolumeAttachmentUpdate',
'alias': 'os-volume-attachment-update'}],
'os-server-groups': [{'name': 'ServerGroupQuotas',
'alias': 'os-server-group-quotas'}],
}
# v2.1 plugins which should never appear in the v2 extension list
# This should be the v2.1 alias, not the V2.0 alias
v2_extension_suppress_list = ['servers', 'images', 'versions', 'flavors',
'os-block-device-mapping-v1', 'os-consoles',
'extensions', 'image-metadata', 'ips', 'limits',
'server-metadata'
]
# v2.1 plugins which should appear under a different name in v2
v21_to_v2_alias_mapping = {
'image-size': 'OS-EXT-IMG-SIZE',
'os-remote-consoles': 'os-consoles',
'os-disk-config': 'OS-DCF',
'os-extended-availability-zone': 'OS-EXT-AZ',
'os-extended-server-attributes': 'OS-EXT-SRV-ATTR',
'os-multinic': 'NMN',
'os-scheduler-hints': 'OS-SCH-HNT',
'os-server-usage': 'OS-SRV-USG',
'os-instance-usage-audit-log': 'os-instance_usage_audit_log',
}
# V2.1 does not support XML but we need to keep an entry in the
# /extensions information returned to the user for backwards
# compatibility
FAKE_XML_URL = "http://docs.openstack.org/compute/ext/fake_xml"
FAKE_UPDATED_DATE = "2014-12-03T00:00:00Z"
class FakeExtension(object):
def __init__(self, name, alias):
self.name = name
self.alias = alias
self.__doc__ = ""
self.version = -1
class ExtensionInfoController(wsgi.Controller):
def __init__(self, extension_info):
self.extension_info = extension_info
def _translate(self, ext):
ext_data = {}
ext_data["name"] = ext.name
ext_data["alias"] = ext.alias
ext_data["description"] = ext.__doc__
ext_data["namespace"] = FAKE_XML_URL
ext_data["updated"] = FAKE_UPDATED_DATE
ext_data["links"] = []
return ext_data
def _create_fake_ext(self, alias, name):
return FakeExtension(alias, name)
def _get_extensions(self, context):
"""Filter extensions list based on policy."""
discoverable_extensions = dict()
for alias, ext in six.iteritems(self.extension_info.get_extensions()):
authorize = extensions.os_compute_soft_authorizer(alias)
if authorize(context, action='discoverable'):
discoverable_extensions[alias] = ext
else:
LOG.debug("Filter out extension %s from discover list",
alias)
# Add fake v2 extensions to list
extra_exts = {}
for alias in discoverable_extensions:
if alias in v21_to_v2_extension_list_mapping:
for extra_ext in v21_to_v2_extension_list_mapping[alias]:
extra_exts[extra_ext["alias"]] = self._create_fake_ext(
extra_ext["name"], extra_ext["alias"])
discoverable_extensions.update(extra_exts)
# Suppress extensions which we don't want to see in v2
for suppress_ext in v2_extension_suppress_list:
try:
del discoverable_extensions[suppress_ext]
except KeyError:
pass
# v2.1 to v2 extension name mapping
for rename_ext in v21_to_v2_alias_mapping:
if rename_ext in discoverable_extensions:
new_name = v21_to_v2_alias_mapping[rename_ext]
mod_ext = copy.deepcopy(
discoverable_extensions.pop(rename_ext))
mod_ext.alias = new_name
discoverable_extensions[new_name] = mod_ext
return discoverable_extensions
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
sorted_ext_list = sorted(
six.iteritems(self._get_extensions(context)))
extensions = []
for _alias, ext in sorted_ext_list:
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self._get_extensions(context)[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
class ExtensionInfo(extensions.V21APIExtensionBase):
"""Extension information."""
name = "Extensions"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(
ALIAS, ExtensionInfoController(self.extension_info),
member_name='extension')]
return resources
def get_controller_extensions(self):
return []
class LoadedExtensionInfo(object):
"""Keep track of all loaded API extensions."""
def __init__(self):
self.extensions = {}
def register_extension(self, ext):
if not self._check_extension(ext):
return False
alias = ext.alias
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
return True
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
extension.is_valid()
except AttributeError:
LOG.exception(_LE("Exception loading extension"))
return False
return True
def get_extensions(self):
return self.extensions
|
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that read and write gzipped streams.
The user of the file doesn't have to worry about the compression,
but random access is not allowed.
Based on the standard gzip module. That module requires a seekable file.
This module buffers data and is useful for decompressing streams.
This module can also handle multiple segments.
Useful for sockets. supports reading only.
TODO: writing has not been tested.
"""
import os, time
import struct
import zlib
from pycopia import UserFile
__all__ = ["GzipFile", "open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
class GzipError(RuntimeError):
pass
_OS_TYPES = {
0: "FAT filesystem (MS-DOS, OS/2, NT/Win32)",
1: "Amiga",
2: "VMS (or OpenVMS)",
3: "Unix",
4: "VM/CMS",
5: "Atari TOS",
6: "HPFS filesystem (OS/2, NT)",
7: "Macintosh",
8: "Z-System",
9: "CP/M",
10: "TOPS-20",
11: "NTFS filesystem (NT)",
12: "QDOS",
13: "Acorn RISCOS",
255: "unknown",
}
class GzipHeader(object):
MAGIC = '\x1f\x8b'
def __init__(self):
# self.magic = None # two bytes
self.method = 8 # one byte
self.flags = 0 # one byte
self.text = 0 # represents FTEXT flag
self.mtime = long(time.time()) # four bytes
self.extraflag = 0 # one byte
self.os = 255 # one byte, default unknown
self.extra = "" # variable (not implemented)
self.name = "" # variable
self.comment = "" # variable
self.hcrc = None # two bytes
def __str__(self):
s = []
s.append(" name: %s" % (self.name))
s.append(" os: %s (%s)" % (self.os, _OS_TYPES.get(self.os, "bogusvalue")))
s.append(" comment: %s" % (self.comment))
s.append(" method: %s" % (self.method))
s.append(" is text: %s" % (self.text))
s.append(" mtime: %s" % (time.ctime(self.mtime)))
s.append(" flags: %s" % (self.flags))
s.append("extraflag: %s" % (self.extraflag))
s.append(" extra: %s" % (self.extra))
s.append(" hcrc: %s" % (self.hcrc))
return "\n".join(s)
def read(self, gzf):
buf = gzf.read_raw(10) # optimize reads for fixed header part
magic = buf[0:2]
if magic != self.MAGIC:
raise GzipError('Not a gzipped file')
method = ord( buf[2] )
if method != 8:
raise GzipError('Unknown compression method')
self.method = method
self.flags = ord( buf[3] )
self.text = self.flags & FTEXT
self.mtime = struct.unpack("<L", buf[4:8])[0]
self.extraflag = ord(buf[8])
self.os = ord(buf[9])
flag = self.flags
if flag & FEXTRA:
xlen = struct.unpack("<H", gzf.read_raw(2))[0]
self.extra = gzf.read_raw(xlen)
if flag & FNAME:
fn = []
while (1):
s=gzf.read_raw(1)
if not s or s=='\000':
break
fn.append(s)
self.name = "".join(fn)
if flag & FCOMMENT:
fc = []
while (1):
s=gzf.read_raw(1)
if not s or s=='\000':
break
fc.append(s)
self.comment = "".join(fc)
if flag & FHCRC:
self.hcrc = struct.unpack("<H", gzf.read_raw(2))[0]
def write(self, fo):
flags = 0
if self.extra:
flags = flags | FEXTRA
if self.name:
flags = flags | FNAME
if self.comment:
flags = flags | FCOMMENT
if self.hcrc: # XXX compute this
flags = flags | FHCRC
fixed = struct.pack("<2sBBLBB", self.MAGIC, self.method, flags,
self.mtime, self.extraflag, self.os)
fo.write_raw(fixed)
if self.extra:
fo.write_raw(struct.pack("<H", len(self.extra)))
fo.write_raw(self.extra)
if self.name:
fo.write_raw("%s\0" % (self.name))
if self.comment:
fo.write_raw("%s\0" % (self.comment))
if self.hcrc:
fo.write_raw(struct.pack("<H", len(self.hcrc)))
def set_comment(self, text):
self.comment = str(text)
def set_name(self, text):
self.name = str(text)
# a Gzip stream reader that does not require a seekable file.
class GzipFile(UserFile.FileWrapper):
def __init__(self, fo, mode="r", compresslevel=6, header=None):
super(GzipFile, self).__init__(fo)
self.compresslevel = compresslevel
self.eof = 0
self._buf = ""
self._bufsize = 0
self._rawq = ""
self._rawqsize = 0
self.segments = 0
# check mode and initialize
if mode[0] in "wa":
self._mode = WRITE
self._init_write(header)
elif mode[0] == "r":
self._mode = READ
self._init_read()
else:
raise ValueError("GzipFile: unknown file mode.")
def new_segment(self, header):
if self._mode == WRITE:
rest = self.compress.flush()
if rest:
self._write(rest)
self.write32(self.crc)
self.write32(self.segsize)
header.write(self)
self._init_write()
self.segments += 1
def __repr__(self):
return '<GzipFile open on fd %r id:%x>' % (self._fd, id(self))
def close(self):
if self._mode == WRITE:
rest = self.compress.flush()
if rest:
self._write(rest)
self.write32(self.crc)
self.write32(self.segsize)
super(GzipFile, self).close()
def _init_write(self, header):
self.crc = zlib.crc32("")
self.segsize = 0
if not header:
header = GzipHeader() # take default values
header.set_name("<unknown>")
header.write(self)
self.header = header
self.compress = zlib.compressobj(self.compresslevel)
def _init_read(self):
self.crc = zlib.crc32("")
self.segsize = 0
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self.header = GzipHeader()
self.header.read(self)
def _process_rawq(self):
data = self.decompress.decompress(self._rawq)
self._add_read_data(data)
self._rawq = ""
self._rawqsize = 0
if self.decompress.unused_data:
# Ending case: we've come to the end of a member in the file.
self.segments += 1
self._check_end()
def _check_end(self):
# Check the CRC and file size, and set the new_memeber flag so we read
# a new member on the next call.
left = self.decompress.unused_data
if len(left) < 8:
left += self.read_raw(8-len(left)) # read fell on trailer boundary
crc32 = struct.unpack("<l", left[0:4])[0]
isize = struct.unpack("<l", left[4:8])[0]
self._rawq = left[8:]
self._rawqsize = len(self._rawq)
# verify crc check and size
if crc32 % 0x100000000 != self.crc % 0x100000000:
raise GzipError("CRC check failed")
elif isize != self.segsize:
raise GzipError("Incorrect length of data produced")
# if there is more raw data left, there must be another segment
if self._rawq:
self._init_read()
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc)
self.segsize += len(data)
self._buf += data
self._bufsize += len(data)
def _add2raw(self, data):
self._rawq += data
self._rawqsize += len(data)
def read(self, amt=2147483646):
if self._rawq:
self._process_rawq()
return self._read_uncompressed(amt)
while not self.eof and self._bufsize <= amt:
buf = super(GzipFile, self).read(min(4096, amt)) # read compressed data, may block
if not buf:
self.eof = 1
else:
self._add2raw(buf)
self._process_rawq()
return self._read_uncompressed(amt)
def _read_uncompressed(self, amt):
if amt >= self._bufsize:
buf = self._buf
self._buf = ""
self._bufsize = 0
return buf
else:
buf = self._buf[:amt]
self._buf = self._buf[amt:]
self._bufsize -= amt
return buf
# read from the rawq or file
def read_raw(self, amt=2147483646):
while not self.eof and self._rawqsize < amt:
buf = super(GzipFile, self).read(min(4096, amt)) # read compressed data, may block
if not buf:
self.eof = 1
else:
self._add2raw(buf)
return self._read_rawq(amt)
def _read_rawq(self, amt=2147483646):
if amt >= self._rawqsize:
buf = self._rawq
self._rawq = ""
self._rawqsize = 0
return buf
else:
buf = self._rawq[:amt]
self._rawq = self._rawq[amt:]
self._rawqsize -= amt
return buf
#write methods
def write32(self, value):
self._write(struct.pack("<l", value))
def write32u(self, value):
if value < 0:
value = value + 0x100000000
self._write(struct.pack("<L", value))
# writes data out compressed
def write(self, data):
if self._mode == WRITE:
self.segsize += len(data)
self.crc = zlib.crc32(data, self.crc)
data = self.compress.compress(data)
self._write(data)
else:
raise GzipError("trying to write to stream in READ mode.")
# writes data out uncompressed
def write_raw(self, data):
if self._mode == WRITE:
self._write(data)
else:
raise GzipError("trying to write to stream in READ mode.")
### open factory function
def open(name, mode="r", compresslevel=9, header=None):
#flags = UserFile.mode2flags(mode)
#fd = os.open(name, flags)
fo = open(name, mode)
return GzipFile(fo, mode, compresslevel, header)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.