gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['has_CoolProp', 'coolprop_dict', 'CP_fluid', 'coolprop_fluids',
'CoolProp_T_dependent_property', 'CoolProp_failing_PT_flashes',
'PropsSI', 'PhaseSI','HAPropsSI', 'AbstractState']
import os
from fluids.numerics import assert_close1d, numpy as np
from thermo.base import data_dir, source_path
from chemicals.utils import mark_numba_incompatible
#try:
# import CoolProp
# has_CoolProp = True
## CPiP_min = CP.iP_min
#except: # pragma: no cover
# # Don't just except
# has_CoolProp = False
#has_CoolProp = False # For testing
CPiP_min = 17
global _PropsSI
global _has_CoolProp
_has_CoolProp = None
@mark_numba_incompatible
def has_CoolProp():
global _has_CoolProp
if _has_CoolProp is None:
try:
import CoolProp
load_coolprop_fluids()
_has_CoolProp = True
except:
_has_CoolProp = False
return _has_CoolProp
_PropsSI = None
@mark_numba_incompatible
def PropsSI(*args, **kwargs):
global _PropsSI
if _PropsSI is None:
from CoolProp.CoolProp import PropsSI as _PropsSI
return _PropsSI(*args, **kwargs)
global _HAPropsSI
_HAPropsSI = None
@mark_numba_incompatible
def HAPropsSI(*args, **kwargs):
global _HAPropsSI
if _HAPropsSI is None:
from CoolProp.CoolProp import HAPropsSI as _HAPropsSI
return _HAPropsSI(*args, **kwargs)
global _PhaseSI
_PhaseSI = None
@mark_numba_incompatible
def PhaseSI(*args, **kwargs):
global _PhaseSI
if _PhaseSI is None:
from CoolProp.CoolProp import PhaseSI as _PhaseSI
return _PhaseSI(*args, **kwargs)
global _AbstractState
_AbstractState = None
@mark_numba_incompatible
def AbstractState(*args, **kwargs):
global _AbstractState
if _AbstractState is None:
from CoolProp.CoolProp import AbstractState as _AbstractState
return _AbstractState(*args, **kwargs)
# Load the constants, store
# CoolProp.FluidsList() indicates some new fluids have been added
# All of these can be inputs to the PropsSI function!
coolprop_dict = ['100-41-4', '10024-97-2', '102687-65-0', '106-42-3',
'106-97-8', '106-98-9', '107-46-0', '107-51-7', '107-52-8', '107-83-5',
'108-38-3', '108-88-3', '109-66-0', '110-54-3', '110-82-7', '111-65-9',
'111-84-2', '112-39-0', '112-40-3', '112-61-8', '112-62-9', '112-63-0',
'1120-21-4', '115-07-1', '115-10-6', '115-11-7', '115-25-3', '124-18-5',
'124-38-9', '1333-74-0', '141-62-8', '141-63-9', '142-82-5', '1717-00-6',
'2551-62-4', '2837-89-0', '287-92-3', '29118-24-9', '29118-25-0', '301-00-8',
'306-83-2', '353-36-6', '354-33-6', '406-58-6', '420-46-2', '421-14-7',
'431-63-0', '431-89-0', '460-73-1', '463-58-1', '463-82-1', '540-97-6',
'556-67-2', '590-18-1', '593-53-3', '616-38-6', '624-64-6', '630-08-0',
'64-17-5', '67-56-1', '67-64-1', '690-39-1', '71-43-2', '74-82-8', '74-84-0',
'74-85-1', '74-87-3', '74-98-6', '74-99-7', '7439-90-9', '7440-01-9',
'7440-37-1', '7440-59-7', '7440-63-3', '7446-09-5', '75-10-5', '75-19-4',
'75-28-5', '75-37-6', '75-43-4', '75-45-6', '75-46-7', '75-68-3', '75-69-4',
'75-71-8', '75-72-9', '75-73-0', '754-12-1', '756-13-8', '76-13-1', '76-14-2',
'76-15-3', '76-16-4', '76-19-7', '7664-41-7', '7727-37-9', '7732-18-5',
'7782-39-0', '7782-41-4', '7782-44-7', '7783-06-4', '7789-20-0', '78-78-4',
'811-97-2', '95-47-6']
CoolProp_has_mu_CASs = set(['74-82-8', '109-66-0', '67-56-1', '115-07-1', '76-16-4', '75-72-9', '811-97-2', '75-73-0', '1717-00-6', '75-68-3', '76-19-7', '431-89-0', '431-63-0', '690-39-1', '115-25-3', '75-69-4', '75-71-8', '420-46-2', '306-83-2', '102687-65-0', '754-12-1', '29118-24-9', '2837-89-0', '75-37-6', '75-45-6', '460-73-1', '75-10-5', '354-33-6', '75-46-7', 'R404A.PPF', 'R407C.PPF', 'R410A.PPF', 'R507A.PPF', '2551-62-4', '108-88-3', '7732-18-5', '108-38-3', '106-97-8', '124-18-5', '111-84-2', '111-65-9', '112-40-3', '142-82-5', '110-54-3', '74-98-6', '95-47-6', '106-42-3', 'AIR.PPF', '7440-37-1', '7727-37-9', '7782-44-7', '7664-41-7', '71-43-2', '124-38-9', '110-82-7', '287-92-3', '78-78-4', '115-10-6', '74-84-0', '64-17-5', '7789-20-0', '7440-59-7', '1333-74-0', '7783-06-4', '75-28-5'])
CoolProp_has_k_CASs = set(['74-82-8', '67-56-1', '115-07-1', '76-16-4', '75-72-9', '75-73-0', '1717-00-6', '75-68-3', '76-19-7', '431-89-0', '431-63-0', '690-39-1', '115-25-3', '2837-89-0', '460-73-1', '75-10-5', '811-97-2', '75-69-4', '75-71-8', '420-46-2', '75-45-6', '306-83-2', '754-12-1', '29118-24-9', '354-33-6', '75-37-6', '75-46-7', 'R404A.PPF', 'R407C.PPF', 'R410A.PPF', 'R507A.PPF', '2551-62-4', '108-88-3', '7732-18-5', '106-97-8', '124-18-5', '111-84-2', '111-65-9', '112-40-3', '142-82-5', '110-54-3', '74-98-6', 'AIR.PPF', '7440-37-1', '7727-37-9', '7782-44-7', '7664-41-7', '71-43-2', '124-38-9', '109-66-0', '287-92-3', '78-78-4', '74-84-0', '64-17-5', '100-41-4', '108-38-3', '95-47-6', '106-42-3', '7789-20-0', '7440-59-7', '1333-74-0p', '1333-74-0', '75-28-5'])
CoolProp_k_failing_CASs = set(['100-41-4', '2837-89-0', '460-73-1', '75-10-5', '75-45-6'])
CoolProp_failing_PT_flashes = set(['115-07-1', '115-25-3', '1717-00-6', '420-46-2',
'431-63-0', '431-89-0', '690-39-1', '75-68-3', '75-69-4', '75-71-8', '75-72-9', '75-73-0', '76-19-7',
'110-82-7', '7782-44-7'])
CoolProp_Tmin_overrides = {
'106-97-8': 135,
'106-98-9': 87.9,
'109-66-0': 144,
'110-82-7': 279.52,
'67-56-1': 175.7,
'74-82-8': 90.8,
'74-84-0': 90.4,
'74-85-1': 104.1,
'75-28-5': 114,
'7727-37-9': 63.2,
'100-41-4': 263.5,
}
CoolProp_Tmax_overrides = {
'107-51-7': 563,
}
class CP_fluid(object):
# Basic object to store constants for a coolprop fluid, much faster than
# calling coolprop to retrieve the data when needed
__slots__ = ['Tmin', 'Tmax', 'Pmax', 'has_melting_line', 'Tc', 'Pc', 'Tt',
'omega', 'HEOS', 'CAS']
@property
def has_k(self):
return self.CAS in CoolProp_has_k_CASs and self.CAS not in CoolProp_k_failing_CASs
@property
def has_mu(self):
return self.CAS in CoolProp_has_mu_CASs
def as_json(self):
return {k: getattr(self, k) for k in self.__slots__}
def __deepcopy__(self, memo):
# AbstractState("HEOS", CAS) has no deep copy;
# fortunately, none is needed, so we can just return the existing copy
return self
def __init__(self, Tmin, Tmax, Pmax, has_melting_line, Tc, Pc, Tt, omega,
HEOS, CAS):
self.Tmin = Tmin
self.Tmax = Tmax
self.Pmax = Pmax
self.has_melting_line = has_melting_line
self.Tc = Tc
self.Pc = Pc
self.Tt = Tt
self.omega = omega
self.HEOS = HEOS
self.CAS = CAS
# Store the propoerties in a dict of CP_fluid instances
coolprop_fluids = {}
# if has_CoolProp:
# #for fluid in CP.FluidsList():
# # CASRN = CP.get_fluid_param_string(fluid, 'CAS')
# for CASRN in coolprop_dict:
# # TODO find other way of getting the data faster - there is no way
# # TODO use appdirs, store this data as a cache
# HEOS = AbstractState("HEOS", CASRN)
# coolprop_fluids[CASRN] = CP_fluid(Tmin=HEOS.Tmin(), Tmax=HEOS.Tmax(), Pmax=HEOS.pmax(),
# has_melting_line=HEOS.has_melting_line(), Tc=HEOS.T_critical(), Pc=HEOS.p_critical(),
# Tt=HEOS.Ttriple(), omega=HEOS.acentric_factor(), HEOS=None)
@mark_numba_incompatible
def store_coolprop_fluids():
import CoolProp
import json
for CASRN in coolprop_dict:
HEOS = AbstractState("HEOS", CASRN)
coolprop_fluids[CASRN] = CP_fluid(Tmin=HEOS.Tmin(), Tmax=HEOS.Tmax(), Pmax=HEOS.pmax(),
has_melting_line=HEOS.has_melting_line(), Tc=HEOS.T_critical(), Pc=HEOS.p_critical(),
Tt=HEOS.Ttriple(), omega=HEOS.acentric_factor(), HEOS=None, CAS=CASRN)
data = {CASRN: coolprop_fluids[CASRN].as_json() for CASRN in coolprop_dict}
ver = CoolProp.__version__
file = open(os.path.join(data_dir, 'CoolPropFluids%s.json' %ver), 'w')
json.dump(data, file)
file.close()
@mark_numba_incompatible
def load_coolprop_fluids():
import json
import CoolProp
ver = CoolProp.__version__
pth = os.path.join(data_dir, 'CoolPropFluids%s.json' %ver)
try:
file = open(pth, 'r')
except:
store_coolprop_fluids()
file = open(pth, 'r')
data = json.load(file)
for CASRN in coolprop_dict:
d = data[CASRN]
coolprop_fluids[CASRN] = CP_fluid(Tmin=d['Tmin'], Tmax=d['Tmax'], Pmax=d['Pmax'],
has_melting_line=d['has_melting_line'], Tc=d['Tc'], Pc=d['Pc'],
Tt=d['Tt'], omega=d['omega'], HEOS=None, CAS=CASRN)
class MultiCheb1D(object):
'''Simple class to store set of coefficients for multiple chebshev
approximations and perform calculations from them.
'''
def __init__(self, points, coeffs):
self.points = points
self.coeffs = coeffs
self.N = len(points)-1
def __call__(self, x):
from bisect import bisect_left
i = bisect_left(self.points, x)
if i == 0:
if x == self.points[0]:
# catch the case of being exactly on the lower limit
i = 1
else:
raise Exception('Requested value is under the limits')
if i > self.N:
raise Exception('Requested value is above the limits')
coeffs = self.coeffs[i-1]
a, b = self.points[i-1], self.points[i]
x = (2.0*x-a-b)/(b-a)
return self.chebval(x, coeffs)
@staticmethod
def chebval(x, c):
# copied from numpy's source, slightly optimized
# https://github.com/numpy/numpy/blob/v1.13.0/numpy/polynomial/chebyshev.py#L1093-L1177
# Will not support length-1 coefficient sets, must be 2 or more
x2 = 2.*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
class CP_fluid_approximator(object):
'''A class to hold (and calculate) approximations for certain aspects of
CoolProp chemical's properties. This could apply equally well to REFPROP.
'''
__slots__ = ['CAS', 'Tmin', 'Tmax', 'Pmax', 'has_melting_line', 'Tc', 'Pc', 'Tt',
'omega', 'HEOS', 'DMOLAR_g', 'HMOLAR_g', 'SMOLAR_g',
'SPEED_OF_SOUND_g', 'CONDUCTIVITY_g', 'VISCOSITY_g',
'CPMOLAR_g', 'CVMOLAR_g', 'DMOLAR_l', 'HMOLAR_l', 'SMOLAR_l',
'SPEED_OF_SOUND_l', 'CONDUCTIVITY_l', 'VISCOSITY_l',
'CPMOLAR_l', 'CVMOLAR_l', 'CP0MOLAR']
def calculate(self, T, prop, phase):
assert phase in ['l', 'g']
phase_key = '_g' if phase == 'g' else '_l'
name = prop + phase_key
try:
return getattr(self, name)(T)
except AttributeError:
raise Exception('Given chemical does not have a fit available for '
'that property and phase')
def validate_prop(self, prop, phase, evaluated_points=30):
phase_key = '_g' if phase == 'g' else '_l'
name = prop + phase_key
if prop in ['CP0MOLAR']:
name = prop
pts = getattr(self, name).points
predictor = getattr(self, name)
for i in range(len(pts)-1):
Ts = np.linspace(pts[i], pts[i+1], evaluated_points)
# print(Ts[0], Ts[-1])
prop_approx = [predictor(T) for T in Ts]
prop_calc = [CoolProp_T_dependent_property(T, self.CAS, prop, phase) for T in Ts]
# print(prop_approx)
# print(prop_calc)
# The approximators do give differences at the very low value side
# so we need atol=1E-9
# print(prop, self.CAS, prop_approx[0], prop_calc[0])
try:
assert_close1d(prop_approx, prop_calc, rtol=1E-7, atol=1E-9)
except:
'''There are several cases that assert_allclose doesn't deal
with well for some reason. We could increase rtol, but instead
the relative errors are used here to check everything is as desidred.
Some example errors this won't trip on but assert_allclose does
are:
1.00014278827e-08
1.62767956613e-06
-0.0
-1.63895899641e-16
-4.93284549625e-15
'''
prop_calc = np.array(prop_calc)
prop_approx = np.array(prop_approx)
errs = abs((prop_calc-prop_approx)/prop_calc)
try:
assert max(errs) < 2E-6
except:
print('%s %s failed with mean relative error %s and maximum relative error %s' %(self.CAS, prop, str(np.mean(errs)), str(max(errs))))
#
@mark_numba_incompatible
def CoolProp_T_dependent_property(T, CASRN, prop, phase):
r'''Calculates a property of a chemical in either the liquid or gas phase
as a function of temperature only. This means that the property is
either at 1 atm or along the saturation curve.
Parameters
----------
T : float
Temperature of the fluid [K]
CASRN : str
CAS number of the fluid
prop : str
CoolProp string shortcut for desired property
phase : str
Either 'l' or 'g' for liquid or gas properties respectively
Returns
-------
prop : float
Desired chemical property, [units]
Notes
-----
For liquids above their boiling point, the liquid property is found on the
saturation line (at higher pressures). Under their boiling point, the
property is calculated at 1 atm.
No liquid calculations are permitted above the critical temperature.
For gases under the chemical's boiling point, the gas property is found
on the saturation line (at sub-atmospheric pressures). Above the boiling
point, the property is calculated at 1 atm.
An exception is raised if the desired CAS is not supported, or if CoolProp
is not available.
The list of strings acceptable as an input for property types is:
http://www.coolprop.org/coolprop/HighLevelAPI.html#table-of-string-inputs-to-propssi-function
Examples
--------
Water at STP according to IAPWS-95
>>> CoolProp_T_dependent_property(298.15, '7732-18-5', 'D', 'l') # doctest:+SKIP
997.047636760347
References
----------
.. [1] Bell, Ian H., Jorrit Wronski, Sylvain Quoilin, and Vincent Lemort.
"Pure and Pseudo-Pure Fluid Thermophysical Property Evaluation and the
Open-Source Thermophysical Property Library CoolProp." Industrial &
Engineering Chemistry Research 53, no. 6 (February 12, 2014):
2498-2508. doi:10.1021/ie4033999. http://www.coolprop.org/
'''
if not has_CoolProp: # pragma: no cover
raise Exception('CoolProp library is not installed')
if CASRN not in coolprop_dict:
raise Exception('CASRN not in list of supported fluids')
Tc = coolprop_fluids[CASRN].Tc
T = float(T) # Do not allow custom objects here
if phase == 'l':
if T > Tc:
raise Exception('For liquid properties, must be under the critical temperature.')
if PhaseSI('T', T, 'P', 101325, CASRN) in [u'liquid', u'supercritical_liquid']:
return PropsSI(prop, 'T', T, 'P', 101325, CASRN)
else:
return PropsSI(prop, 'T', T, 'Q', 0, CASRN)
elif phase == 'g':
if PhaseSI('T', T, 'P', 101325, CASRN) == 'gas':
return PropsSI(prop, 'T', T, 'P', 101325, CASRN)
else:
if T < Tc:
return PropsSI(prop, 'T', T, 'Q', 1, CASRN)
else:
# catch supercritical_gas and friends
return PropsSI(prop, 'T', T, 'P', 101325, CASRN)
else:
raise Exception('Error in CoolProp property function')
if has_CoolProp and 0:
folder = os.path.join(os.path.dirname(__file__), 'Misc')
f = open(os.path.join(folder, 'CoolProp vapor properties fits.json'), 'r')
vapor_properties = json.load(f)
f.close()
f = open(os.path.join(folder, 'CoolProp CP0MOLAR fits.json'), 'r')
idea_gas_heat_capacity = json.load(f)
f.close()
CP_approximators = {}
for CAS in coolprop_dict:
obj = CP_fluid_approximator()
CP_approximators[CAS] = obj
obj.CAS = CAS
HEOS = AbstractState("HEOS", CAS)
obj.Tmin = HEOS.Tmin()
if CAS in CoolProp_Tmin_overrides:
obj.Tmin = max(obj.Tmin, CoolProp_Tmin_overrides[CAS])
obj.Tmax = HEOS.Tmax()
if CAS in CoolProp_Tmax_overrides:
obj.Tmax = max(obj.Tmax, CoolProp_Tmax_overrides[CAS])
obj.Pmax = HEOS.pmax()
obj.has_melting_line = HEOS.has_melting_line()
obj.Tc = HEOS.T_critical()
obj.Pc = HEOS.p_critical(),
obj.Tt = HEOS.Ttriple()
obj.omega = HEOS.acentric_factor()
if CAS in vapor_properties:
for key, value in vapor_properties[CAS].items():
chebcoeffs, limits = value
limits = [limits[0][0]] + [i[1] for i in limits]
approximator = MultiCheb1D(limits, chebcoeffs)
setattr(obj, key+'_g', approximator)
if CAS in idea_gas_heat_capacity:
chebcoeffs, Tmin, Tmax = idea_gas_heat_capacity[CAS]['CP0MOLAR']
approximator = MultiCheb1D([Tmin, Tmax], chebcoeffs)
setattr(obj, 'CP0MOLAR', approximator)
# obj.validate_prop('CP0MOLAR', 'g')
def CoolProp_T_dependent_property_approximation(T, CASRN, prop, phase):
pass
| |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from availability_finder import AvailabilityFinder
from api_schema_graph import LookupResult
from branch_utility import BranchUtility, ChannelInfo
from compiled_file_system import CompiledFileSystem
from fake_url_fetcher import FakeUrlFetcher
from host_file_system_iterator import HostFileSystemIterator
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES)
from test_data.object_level_availability.tabs import TABS_SCHEMA_BRANCHES
class FakeHostFileSystemProvider(object):
def __init__(self, file_system_data):
self._file_system_data = file_system_data
def GetTrunk(self):
return self.GetBranch('trunk')
def GetBranch(self, branch):
return TestFileSystem(self._file_system_data[str(branch)])
class AvailabilityFinderTest(unittest.TestCase):
def setUp(self):
self._branch_utility = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(os.path.join(sys.path[0], 'test_data')),
ObjectStoreCreator.ForTest())
def create_availability_finder(file_system_data):
fake_host_fs_creator = FakeHostFileSystemProvider(file_system_data)
test_object_store = ObjectStoreCreator.ForTest()
return AvailabilityFinder(self._branch_utility,
CompiledFileSystem.Factory(test_object_store),
HostFileSystemIterator(fake_host_fs_creator,
self._branch_utility),
fake_host_fs_creator.GetTrunk(),
test_object_store)
self._avail_finder = create_availability_finder(CANNED_API_FILE_SYSTEM_DATA)
self._node_avail_finder = create_availability_finder(TABS_SCHEMA_BRANCHES)
def testGetApiAvailability(self):
# Key: Using 'channel' (i.e. 'beta') to represent an availability listing
# for an API in a _features.json file, and using |channel| (i.e. |dev|) to
# represent the development channel, or phase of development, where an API's
# availability is being checked.
# Testing a whitelisted API.
self.assertEquals(
ChannelInfo('beta', CANNED_BRANCHES[27], 27),
self._avail_finder.GetApiAvailability('declarativeWebRequest'))
# Testing APIs found only by checking file system existence.
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[23], 23),
self._avail_finder.GetApiAvailability('windows'))
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[18], 18),
self._avail_finder.GetApiAvailability('tabs'))
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[18], 18),
self._avail_finder.GetApiAvailability('input.ime'))
# Testing API channel existence for _api_features.json.
# Listed as 'dev' on |beta|, 'dev' on |dev|.
self.assertEquals(
ChannelInfo('dev', CANNED_BRANCHES[28], 28),
self._avail_finder.GetApiAvailability('systemInfo.stuff'))
# Listed as 'stable' on |beta|.
self.assertEquals(
ChannelInfo('beta', CANNED_BRANCHES[27], 27),
self._avail_finder.GetApiAvailability('systemInfo.cpu'))
# Testing API channel existence for _manifest_features.json.
# Listed as 'trunk' on all channels.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('sync'))
# No records of API until |trunk|.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('history'))
# Listed as 'dev' on |dev|.
self.assertEquals(
ChannelInfo('dev', CANNED_BRANCHES[28], 28),
self._avail_finder.GetApiAvailability('storage'))
# Stable in _manifest_features and into pre-18 versions.
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[8], 8),
self._avail_finder.GetApiAvailability('pageAction'))
# Testing API channel existence for _permission_features.json.
# Listed as 'beta' on |trunk|.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('falseBetaAPI'))
# Listed as 'trunk' on |trunk|.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('trunkAPI'))
# Listed as 'trunk' on all development channels.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('declarativeContent'))
# Listed as 'dev' on all development channels.
self.assertEquals(
ChannelInfo('dev', CANNED_BRANCHES[28], 28),
self._avail_finder.GetApiAvailability('bluetooth'))
# Listed as 'dev' on |dev|.
self.assertEquals(
ChannelInfo('dev', CANNED_BRANCHES[28], 28),
self._avail_finder.GetApiAvailability('cookies'))
# Treated as 'stable' APIs.
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[24], 24),
self._avail_finder.GetApiAvailability('alarms'))
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[21], 21),
self._avail_finder.GetApiAvailability('bookmarks'))
# Testing older API existence using extension_api.json.
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[6], 6),
self._avail_finder.GetApiAvailability('menus'))
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[5], 5),
self._avail_finder.GetApiAvailability('idle'))
# Switches between _features.json files across branches.
# Listed as 'trunk' on all channels, in _api, _permission, or _manifest.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('contextMenus'))
# Moves between _permission and _manifest as file system is traversed.
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[23], 23),
self._avail_finder.GetApiAvailability('systemInfo.display'))
self.assertEquals(
ChannelInfo('stable', CANNED_BRANCHES[17], 17),
self._avail_finder.GetApiAvailability('webRequest'))
# Mid-upgrade cases:
# Listed as 'dev' on |beta| and 'beta' on |dev|.
self.assertEquals(
ChannelInfo('dev', CANNED_BRANCHES[28], 28),
self._avail_finder.GetApiAvailability('notifications'))
# Listed as 'beta' on |stable|, 'dev' on |beta| ... until |stable| on trunk.
self.assertEquals(
ChannelInfo('trunk', 'trunk', 'trunk'),
self._avail_finder.GetApiAvailability('events'))
def testGetApiNodeAvailability(self):
availability_graph = self._node_avail_finder.GetApiNodeAvailability('tabs')
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('trunk')),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty3'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('dev')),
availability_graph.Lookup('tabs', 'events', 'onActivated',
'parameters', 'activeInfo', 'properties',
'windowId'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('dev')),
availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters',
'tab'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('beta')),
availability_graph.Lookup('tabs', 'events','onActivated'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('beta')),
availability_graph.Lookup('tabs', 'functions', 'get', 'parameters',
'tabId'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('stable')),
availability_graph.Lookup('tabs', 'types', 'InjectDetails',
'properties', 'code'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetChannelInfo('stable')),
availability_graph.Lookup('tabs', 'types', 'InjectDetails',
'properties', 'file'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(25)),
availability_graph.Lookup('tabs', 'types', 'InjectDetails'))
# Nothing new in version 24 or 23.
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(22)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'windowId'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(21)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'selected'))
# Nothing new in version 20.
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(19)),
availability_graph.Lookup('tabs', 'functions', 'getCurrent'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(18)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'index'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(17)),
availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters',
'changeInfo'))
# Nothing new in version 16.
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(15)),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty2'))
# Everything else is available at the API's release, version 14 here.
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'types', 'Tab'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'types', 'Tab',
'properties', 'url'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty1'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'functions', 'get', 'parameters',
'callback'))
self.assertEquals(
LookupResult(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'events', 'onUpdated'))
# Test things that aren't available.
self.assertEqual(LookupResult(False, None),
availability_graph.Lookup('tabs', 'types',
'UpdateInfo'))
self.assertEqual(LookupResult(False, None),
availability_graph.Lookup('tabs', 'functions', 'get',
'parameters', 'callback',
'parameters', 'tab', 'id'))
self.assertEqual(LookupResult(False, None),
availability_graph.Lookup('functions'))
self.assertEqual(LookupResult(False, None),
availability_graph.Lookup('events', 'onActivated',
'parameters', 'activeInfo',
'tabId'))
if __name__ == '__main__':
unittest.main()
| |
# GENERATED BY devbox==0.2.0
"""
Summary
========
Helper file to generate package version numbers for you
Notes
=====
You're probably already using git tags to tag releases of your project. If you
aren't, you really should. Wouldn't it be great if your python package
automatically updated its version number using ``git describe``? You know, so
you don't have to do it manually all the time? It's almost like that's a
feature that should be available without stupid hacks.
But it's not. So here's how to do it with stupid hacks.
There are two modes of operation for version helper: static versioning and
dynamic versioning.
Static Versioning
-----------------
This is the simplest option. In this mode, you specify the version number in
you setup.py and ``__init__.py`` files by hand. Version helper provides a
script that will automatically update those for you. Your ``setup.py`` file
should look like this::
from mypackage_version import UpdateVersion
setup(
name='mypackage',
version='unknown',
cmdclass={'update_version': UpdateVersion},
...
)
No, really, the formatting is important. Your ``__init__.py`` file should have
a line in it that declares the version number::
__version__ = 'unknown'
The command to update these values will be exposed as the value specified in
``cmdclass``::
python setup.py update_version
This makes it easy to tag and upload your package to pypi::
python setup.py update_version
python setup.py test sdist upload
Note that you should not combine the 'update' and 'upload' commands because
setuptools will upload it with the old version number.
Dynamic Versioning
------------------
This option will auto-generate unique per-commit version numbers and stick them
in your project.
When you run ``python setup.py``, if you are running it from inside of a git
repository this script with generate a unique version number and embed it in an
auto-generated file in your package. By default the file is named
'_version.py', and you should add it to your ``.gitignore``. Since this is a
python file and it's in your package, it will get bundled up and distributed
with your package. During the installation process, this script will recognize
that it is not inside a git repository and will parse the version from the
``_version.py`` file.
Your setup.py file should look like this::
from version_helper import git_version
setup(
name='mypackage',
version=git_version(),
...
)
You're done! To view the auto-generated version number of your package, run::
python setup.py -V
If you want to embed the version as __version__ (PEP 396), put the following
lines into your package's __init__.py file::
try:
from ._version import __version__
except ImportError:
__version__ = 'unknown'
This method, while hacked, is useful if you need a CI server to continuously
build and upload your package to an internal pypi.
Hybrid Versioning
-----------------
You *can* use the two methods together. If you combine the two formats for the
``setup.py`` file::
from version_helper import git_version, UpdateVersion
setup(
name='mypackage',
version=git_version(),
cmdclass={'update_version': UpdateVersion},
...
)
This will auto-tag your builds. Then when you want to strip out all the fuckery
and just use static version strings you can run the update_version command::
python setup.py update_version
"""
# pylint: disable=E0611,F0401,C0111
from __future__ import print_function
import locale
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsError
import fileinput
import subprocess
from setuptools import find_packages
GIT_DESCRIBE = ('git', 'describe')
GIT_DESCRIBE_ARGS = ('--tags', '--dirty', '--abbrev=40', '--long')
class UpdateVersion(Command):
""" Setup command that updates hardcoded versions from git tags """
description = "Update the version number inside _version.py and setup.py"
user_options = [
('package=', 'p', "Name of the package (if ambiguous)"),
('tag-prefix=', 't', "Strip this prefix off the git tag"),
('match=', 'm', "--match argument passed to 'git describe' "
"(default [0-9]*)"),
('pre', None, "Don't fail on prerelease versions"),
('dev', None, "Don't fail on development versions"),
('strict', None, "Convert development version strings to follow "
"PEP440"),
('no-purge', None, "Don't attempt to remove all references to "
"version helper"),
('version-mod', None, "The file to write version constants to "
"(default _version.py) (hybrid mode only)"),
]
boolean_options = ['strict', 'pre', 'dev', 'no-purge']
def initialize_options(self):
self.tag_match = None
self.tag_prefix = ''
self.strict = 0
self.pre = 0
self.dev = 0
self.no_purge = 0
self.package = None
self.version_mod = '_version.py'
def finalize_options(self):
if self.tag_match is None:
self.tag_match = self.tag_prefix + '[0-9]*'
if self.package is None:
self.package = find_package()
def strip_tag(self, version_data):
""" Strip a prefix off the git tag """
version_data['tag'] = version_data['tag'][len(self.tag_prefix):]
def run(self):
version_data = git_version_data(self.tag_match, self.strip_tag,
self.strict)
if version_data['is_dev']:
if not self.dev:
raise DistutilsError("Development version '%(version)s' "
"blocked! Use --dev to override." %
version_data)
elif not self.pre and version_data['is_prerelease']:
raise DistutilsError("Prerelease version '%(version)s' blocked! "
"Use --pre to override." % version_data)
data = {
'version': version_data['version']
}
is_hybrid = replace_dynamic_with_static(version_data['version'])
if not self.no_purge:
print("Removing %s from setup.py and MANIFEST.in" % __name__)
remove_all_references()
if is_hybrid:
mod_file = os.path.join(os.path.curdir, self.package,
self.version_mod)
write_constants_to_mod(mod_file, data)
print("Set version: %(version)s" % version_data)
else:
write_constants_to_setup(data)
write_constants_to_init(self.package, data)
def find_package():
"""
Find the correct package
Returns
-------
package_dir : str
The name of the directory that contains the python package
Raises
------
error : :class:`distutils.errors.DistutilsOptionError`
If a single package cannot be found
"""
candidates = find_packages(exclude=['*.*'])
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
raise DistutilsOptionError("No package found")
else:
raise DistutilsOptionError("Multiple possible packages found! "
"Please specify one: %s" % (candidates,))
def parse_constants_from_mod(filename):
""" Parse python constants from a file """
if not os.path.exists(filename):
return None
constants = {}
with open(filename, 'r') as infile:
for line in infile:
components = line.split('=')
if len(components) <= 1:
continue
key = components[0].strip(' _')
value = '='.join(components[1:]).strip().strip('\'\"')
if key != 'all':
constants[key] = value
return constants
def write_constants_to_mod(filename, constants):
""" Write python constants to a special 'version' module """
with open(filename, 'w') as outfile:
outfile.write('""" This file is auto-generated during the '
'package-building process """%s' % os.linesep)
for key, value in constants.items():
outfile.write("__%s__ = '%s'%s" % (key, value, os.linesep))
outfile.write('__all__ = %s%s' % (['__%s__' % key for key in
constants], os.linesep))
def write_constants_to_setup(constants):
""" Replace constant values in ``setup.py`` """
filename = os.path.join(os.path.curdir, 'setup.py')
replace_in_file(filename, constants,
r'^(\s*)%s\s*=\s*[\'"].*?[\'"]\s*,?\s*$',
r"\1%s='%s',")
def write_constants_to_init(package, constants):
""" Replace constant values in ``__init__.py`` """
filename = os.path.join(os.path.curdir, package, '__init__.py')
replace_in_file(filename, constants,
r'^__%s__\s*=\s*["\'].*?["\']\s*$',
r"__%s__ = '%s',")
def replace_dynamic_with_static(version):
"""
If git_version() is being called inside setup.py, replace it and return
True
"""
filename = os.path.join(os.path.curdir, 'setup.py')
with open(filename, 'r') as ifile:
setup = ifile.read()
new_setup = re.sub(r'git_version\s*\([^\)]*\)', "'%s'" % version, setup)
if new_setup != setup:
with open(filename, 'w') as ofile:
ofile.write(new_setup)
return True
return False
def remove_all_references():
""" Remove all references to version helper from this package """
filename = os.path.join(os.path.curdir, 'setup.py')
import_line = re.compile(r'^(from {0} import|import {0})'.format(__name__))
for line in fileinput.FileInput(filename, inplace=True):
if not import_line.match(line):
print(line.replace('UpdateVersion', 'None'), end='')
manifest_file = os.path.join(os.path.curdir, 'MANIFEST.in')
for line in fileinput.FileInput(manifest_file, inplace=True):
print(re.sub(r'^include (%s.py)' %
__name__, r'exclude \1', line), end='')
def replace_in_file(filename, constants, pattern, replace_pattern):
""" Replace constant values in a file using regexes """
sub_args = []
for key, val in constants.iteritems():
sub_args.append((
pattern % key,
replace_pattern % (key, val),
))
for line in fileinput.FileInput(filename, inplace=True):
modified = False
for pattern, replacement in sub_args:
new_line = re.sub(pattern, replacement, line)
if new_line != line:
print(new_line)
modified = True
break
if not modified:
print(line, end='')
def git_describe(describe_args):
"""
Pull the version information from git
Parameters
----------
describe_args : list
Arguments for ``describe_cmd`` to be passed to subprocess
Returns
-------
data : dict
Dictionary of repo data. The fields are listed below
tag : str
The git tag for this version
description : str
The output of ``git describe``
is_dev : bool
True if is_dirty or if addl_commits > 0
is_dirty : bool
True if the git repo is dirty
addl_commits : int
The number of additional commits on top of the tag ref
ref : str
The ref for the current commit
dirty_suffix : str
The string that would denote that the working copy is dirty
Raises
------
error : :class:`subprocess.CalledProcessError`
If there is an error running ``git describe``
"""
encoding = locale.getdefaultlocale()[1] or 'utf-8'
proc = subprocess.Popen(GIT_DESCRIBE + describe_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
description = output.decode(encoding).strip()
if proc.returncode != 0:
print("Error parsing git revision! Make sure that you have tagged a "
"commit, and that the tag matches the 'tag_match' argument")
print("Git output: " + description)
return {
'tag': 'unknown',
'description': 'unknown',
'is_dirty': False,
'is_dev': True,
'is_prerelease': True,
'addl_commits': 0,
'ref': 'unknown',
'dirty_suffix': '-dirty',
}
components = description.split('-')
# trim off the dirty suffix
dirty_suffix = '-dirty'
is_dirty = False
for arg in describe_args:
if arg.startswith('--dirty='):
dirty_suffix = arg.split('=')[1]
break
if dirty_suffix.startswith('-') and components[-1] == dirty_suffix[1:]:
components = components[:-1]
is_dirty = True
elif components[-1].endswith(dirty_suffix):
components[-1] = components[-1][:-len(dirty_suffix)]
is_dirty = True
ref = components[-1][1:]
addl_commits = int(components[-2])
tag = '-'.join(components[:-2])
return {
'tag': tag,
'description': description,
'is_dirty': is_dirty,
'is_dev': is_dirty or addl_commits > 0,
'addl_commits': addl_commits,
'ref': ref,
'dirty_suffix': dirty_suffix,
}
def git_version_data(tag_match='[0-9]*', post_process=None, strict=False):
"""
Convert the raw ``git describe`` data into version info
Parameters
----------
tag_match : str
Match only tags with this format (default '[0-9]*'). Note that this
uses glob matching, not PCRE.
post_process : callable or None
A function that accepts the output of :meth:`.git_describe` and
optionally mutates it. This can be used to convert custom tags into
version numbers (ex. 'v0.1' => '0.1') (default None)
strict : bool
If true, create a PEP 440 compatible version number for development
versions (default False)
Returns
-------
version_data : dict
Data dict with all the values from :meth:`~.git_describe` plus the keys
below
version : str
The finalized version string
is_prerelease : bool
True if the version is considered 'prerelease'
"""
describe_args = GIT_DESCRIBE_ARGS
if tag_match is not None:
describe_args += ('--match=%s' % tag_match,)
version_data = git_describe(describe_args)
if post_process is not None:
post_process(version_data)
if version_data['is_dev']:
if strict:
version = (version_data['tag'] +
".post0.dev%(addl_commits)d" % version_data)
else:
version = "{tag}-{addl_commits}-g{ref:<.7}".format(**version_data)
if version_data['is_dirty']:
version += version_data['dirty_suffix']
else:
version = version_data['tag']
version_data['version'] = version
version_data['is_prerelease'] = re.match(r'^\d+(\.\d+)*$', version) is None
return version_data
def git_version(package=None,
tag_match='[0-9]*',
version_mod='_version.py',
post_process=None,
strict=False):
"""
Generate the version from the git revision, or retrieve it from the
auto-generated module
Parameters
----------
package : str, optional
The name of the directory that contains the package's code. If not
specified, it will be inferred.
tag_match : str, optional
Match only tags with this format (default '[0-9]*'). Note that this
uses glob matching, not PCRE.
version_mod : str, optional
The name of the file to write the version into (default '_version.py')
post_process : callable, optional
A function that accepts the output of :meth:`.git_describe` and
optionally mutates it. This can be used to convert custom tags into
version numbers (ex. 'v0.1' => '0.1') (default None)
strict : bool, optional
If true, create a PEP 440 compatible version number for development
versions (default False)
Returns
-------
version : str
"""
here = os.path.abspath(os.path.dirname(__file__))
if package is None:
package = find_package()
mod_file = os.path.join(here, package, version_mod)
if not os.path.isdir(os.path.join(here, '.git')):
data = parse_constants_from_mod(mod_file)
# We might be inside a github archive or something
if data is None:
dirname = os.path.basename(here)
if dirname.lower().startswith(package.lower() + '-'):
return dirname.split('-', 1)[1]
else:
return 'unknown'
else:
version_data = git_version_data(tag_match, post_process, strict)
data = {
'version': version_data['version']
}
write_constants_to_mod(mod_file, data)
return data['version']
| |
import re
from lxml import html
try:
from pyokc import helpers
from pyokc import magicnumbers
from pyokc.objects import MessageThread, Question, Session
from pyokc.settings import USERNAME, PASSWORD
except ImportError:
import helpers
import magicnumbers
from objects import MessageThread, Question, Session
from settings import USERNAME, PASSWORD
class User:
"""
Represent an OKCupid user. Username and password are only optional
if you have already filled in your username and password in
settings.py.
Parameters
----------
username : str, optional
The username for your OKCupid account.
password : str, optional
The password for your OKCupid account.
Raises
----------
AuthenticationError
If you are unable to login with the username and password
provided.
"""
def __init__(self, username=USERNAME, password=PASSWORD):
self.username = username
self.inbox = []
self.outbox = []
self.drafts = []
self.questions = []
self.visitors = []
self._session = Session()
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36'
}
credentials = {'username': username, 'password': password}
helpers.login(self._session, credentials, headers)
profile_response = self._session.get('https://www.okcupid.com/profile')
profile_tree = html.fromstring(profile_response.content.decode('utf8'))
self.age, self.gender, self.orientation, self.status = helpers.get_additional_info(profile_tree)
self.update_mailbox(pages=1)
self.update_visitors()
def update_mailbox(self, box='inbox', pages=10):
"""
Update either `self.inbox`, `self.outbox`, or `self.drafts` with
MessageThread objects that represent a conversation with another
user.
Parameters
----------
box : str, optional
Specifies which box to update. Valid choices are inbox, outbox,
drafts and all. Update the inbox by default.
"""
for i in ('inbox', 'outbox', 'drafts'):
if box.lower() != 'all':
i = box.lower()
if i.lower() == 'inbox':
folder_number = 1
update_box = self.inbox
direction = 'from'
elif i.lower() == 'outbox':
folder_number = 2
update_box = self.outbox
direction = 'to'
elif i.lower() == 'drafts':
# What happened to folder 3? Who knows.
folder_number = 4
update_box = self.drafts
direction = 'to'
for page in range(pages):
inbox_data = {
'low': 30*page + 1,
'folder': folder_number,
}
get_messages = self._session.post('http://www.okcupid.com/messages', data=inbox_data)
inbox_tree = html.fromstring(get_messages.content.decode('utf8'))
messages_container = inbox_tree.xpath("//ul[@id = 'messages']")[0]
for li in messages_container.iterchildren('li'):
threadid = li.attrib['data-threadid'] + str(page)
if threadid not in [thread.threadid for thread in update_box]:
sender = li.xpath(".//span[@class = 'subject']")[0].text_content()
if len(sender) > 3 and sender[:3] == 'To ':
sender = sender[3:]
if 'unreadMessage' in li.attrib['class']:
unread = True
else:
unread = False
update_box.append(MessageThread(sender, threadid, unread, self._session, direction))
next_disabled = inbox_tree.xpath("//li[@class = 'next disabled']")
if len(next_disabled):
break
if box.lower() != 'all':
break
def message(self, username, message_text):
"""
Send a message to the username specified.
Parameters
----------
username : str or Profile
Username of the profile that is being messaged.
message_text : str
Text body of the message.
"""
threadid = ''
if isinstance(username, Profile):
username == username.name
for thread in self.inbox[::-1]: # reverse, find most recent messages first
if thread.sender.lower() == username.lower():
threadid = thread.threadid
break
get_messages = self._session.get('http://www.okcupid.com/messages')
inbox_tree = html.fromstring(get_messages.content.decode('utf8'))
authcode = helpers.get_authcode(inbox_tree)
msg_data = {
'ajax': '1',
'sendmsg': '1',
'r1': username,
'body': message_text,
'threadid': threadid,
'authcode': authcode,
'reply': '1',
}
send_msg = self._session.post('http://www.okcupid.com/mailbox', data=msg_data)
def search(self, location='', radius=25, number=18, age_min=18, age_max=99,
order_by='match', last_online='week', status='single',
height_min=None, height_max=None, looking_for='', **kwargs):
"""
Search OKCupid profiles, return a list of matching profiles.
See the search page on OKCupid for a better idea of the
arguments expected.
Parameters
----------
location : string, optional
Location of profiles returned. Accept ZIP codes, city
names, city & state combinations, and city & country
combinations. Default to user location if unable to
understand the string or if no value is given.
radius : int, optional
Radius in miles searched, centered on the location.
number : int, optional
Number of profiles returned. Default to 18, which is the
same number that OKCupid returns by default.
age_min : int, optional
Minimum age of profiles returned. Cannot be lower than 18.
age_max : int, optional
Maximum age of profiles returned. Cannot by higher than 99.
order_by : str, optional
Order in which profiles are returned.
last_online : str, optional
How recently online the profiles returned are. Can also be
an int that represents seconds.
status : str, optional
Dating status of profiles returned. Default to 'single'
unless the argument is either 'not single', 'married', or
'any'.
height_min : int, optional
Minimum height in inches of profiles returned.
height_max : int, optional
Maximum height in inches of profiles returned.
looking_for : str, optional
Describe the gender and orientation of profiles returned.
If left blank, return some variation of "guys/girls who
like guys/girls" or "both who like bi girls/guys, depending
on the user's gender and orientation.
smokes : str or list of str, optional
Smoking habits of profiles returned.
drinks : str or list of str, optional
Drinking habits of profiles returned.
drugs : str or list of str, optional
Drug habits of profiles returned.
education : str or list of str, optional
Highest level of education attained by profiles returned.
job : str or list of str, optional
Industry in which the profile users work.
income : str or list of str, optional
Income range of profiles returned.
religion : str or list of str, optional
Religion of profiles returned.
monogamy : str or list of str, optional
Whether the profiles returned are monogamous or non-monogamous.
offspring : str or list of str, optional
Whether the profiles returned have or want children.
pets : str or list of str, optional
Dog/cat ownership of profiles returned.
languages : str or list of str, optional
Languages spoken for profiles returned.
diet : str or list of str, optional
Dietary restrictions of profiles returned.
sign : str or list of str, optional
Astrological sign of profiles returned.
ethnicity : str or list of str, optional
Ethnicity of profiles returned.
join_date : int or str, optional
Either a string describing the profile join dates ('last
week', 'last year' etc.) or an int indicating the number
of maximum seconds from the moment of joining OKCupid.
keywords : str, optional
Keywords that the profiles returned must contain. Note that
spaces separate keywords, ie. `keywords="love cats"` will
return profiles that contain both "love" and "cats" rather
than the exact string "love cats".
"""
if not len(looking_for):
looking_for = helpers.get_looking_for(self.gender, self.orientation)
looking_for_number = magicnumbers.seeking[looking_for.lower()]
if age_min < 18:
age_min = 18
if age_max > 99:
age_max = 99
if age_min > age_max:
age_min, age_max = age_max, age_min
locid = helpers.get_locid(self._session, location)
last_online_int = helpers.format_last_online(last_online)
status_parameter = helpers.format_status(status)
search_parameters = {
'filter1': '0,{0}'.format(looking_for_number),
'filter2': '2,{0},{1}'.format(age_min, age_max),
'filter3': '5,{0}'.format(last_online_int),
'filter4': '35,{0}'.format(status_parameter),
'locid': locid,
'lquery': location,
'timekey': 1,
'matchOrderBy': order_by.upper(),
'custom_search': 0,
'fromWhoOnline': 0,
'mygender': self.gender[0],
'update_prefs': 1,
'sort_type': 0,
'sa': 1,
'using_saved_search': '',
'limit': number,
}
filter_no = '5'
if location.lower() != 'anywhere':
search_parameters['filter5'] = '3,{0}'.format(radius)
filter_no = str(int(filter_no) + 1)
if height_min is not None or height_max is not None:
height_query = magicnumbers.get_height_query(height_min, height_max)
search_parameters['filter{0}'.format(filter_no)] = height_query
filter_no = str(int(filter_no) + 1)
for key, value in kwargs.items():
if isinstance (value, str) and key.lower() not in ('join_date', 'keywords'):
value = [value]
if key in ['smokes', 'drinks', 'drugs', 'education', 'job',
'income', 'religion', 'monogamy', 'diet', 'sign',
'ethnicity'] and len(value):
search_parameters['filter{0}'.format(filter_no)] = magicnumbers.get_options_query(key, value)
filter_no = str(int(filter_no) + 1)
elif key == 'pets':
dog_query, cat_query = magicnumbers.get_pet_queries(value)
search_parameters['filter{0}'.format(filter_no)] = dog_query
filter_no = str(int(filter_no) + 1)
search_parameters['filter{0}'.format(filter_no)] = cat_query
filter_no = str(int(filter_no) + 1)
elif key == 'offspring':
kids_query = magicnumbers.get_kids_query(value)
search_parameters['filter{0}'.format(filter_no)] = kids_query
filter_no = str(int(filter_no) + 1)
elif key == 'languages':
language_query = magicnumbers.language_map[value.title()]
search_parameters['filter{0}'.format(filter_no)] = '22,{0}'.format(language_query)
filter_no = str(int(filter_no) + 1)
elif key == 'join_date':
join_date_query = magicnumbers.get_join_date_query(value)
search_parameters['filter{0}'.format(filter_no)] = join_date_query
filter_no = str(int(filter_no) + 1)
elif key == 'keywords':
search_parameters['keywords'] = value
return self._session, search_parameters
return self._session.post('http://www.okcupid.com/match', data=search_parameters)
profiles_tree = html.fromstring(profiles_request.content.decode('utf8'))
profiles = []
for div in profiles_tree.iter('div'):
info = helpers.get_profile_basics(div, profiles)
if len(info):
profiles.append(Profile(self._session, info['name'], info['age'],
info['location'], info['match'], enemy=info['enemy'],
id=info['id'], rating=info['rating'], contacted=info['contacted']))
return profiles
def visit(self, username, update_pics=False):
"""Visit another user's profile. Automatically update the
`essays`, `details`, and `looking_for` attributes of the
visited profile. Accept either a string or a Profile object as
an argument. Note that unless your profile is set to browse
anonymously on OKCupid, you are likely to show up on this
user's visitors list.
Parameters
---------
username : str, Profile
Username of the profile to visit. Can be either a string or a
Profile object.
update_pics : Bool
Determines whether or not update_pics() is automatically
called for this profile.
Returns
---------
Profile
An instance of Profile containing the visited user's
information.
"""
if isinstance(username, Profile):
prfl = username
else: # string
prfl = Profile(self._session, username)
params = {
'cf': 'leftbar_match',
'leftbar_match': 1,
}
profile_request = self._session.post('http://www.okcupid.com/profile/{0}'.format(prfl.name), data=params)
profile_tree = html.fromstring(profile_request.content.decode('utf8'))
prfl.match, prfl.enemy = helpers.get_percentages(profile_tree)
prfl.age, prfl.gender, prfl.orientation, prfl.status = helpers.get_additional_info(profile_tree)
if len(profile_tree.xpath("//div[@id = 'rating']")):
prfl.rating = helpers.get_rating(profile_tree.xpath("//div[@id = 'rating']")[0])
elif len(profile_tree.xpath("//button[@class = 'flatbutton white binary_rating_button like liked']")):
prfl.rating = 5
helpers.update_essays(profile_tree, prfl.essays)
helpers.update_looking_for(profile_tree, prfl.looking_for)
helpers.update_details(profile_tree, prfl.details)
# If update_pics is False, you will need to call Profile.update_pics()
# manually if you wish to access urls in this profile's pics attribute,
# however this method will be approximately 3 seconds quicker because
# it makes only 1 request instead of 2.
if update_pics:
prfl.update_pics()
if prfl._id is None:
prfl._id = helpers.get_profile_id(profile_tree)
return prfl
def update_questions(self):
"""
Update `self.questions` with a sequence of question objects,
whose properties can be found in objects.py. Note that this
can take a while due to OKCupid displaying only ten questions
on each page, potentially requiring a large number of requests.
"""
keep_going = True
question_number = 0
while keep_going:
questions_data = {
'low': 1 + question_number,
}
get_questions = self._session.post(
'http://www.okcupid.com/profile/{0}/questions'.format(self.username),
data=questions_data)
tree = html.fromstring(get_questions.content.decode('utf8'))
next_wrapper = tree.xpath("//li[@class = 'next']")
# Get a list of each question div wrapper, ignore the first because it's an unanswered question
question_wrappers = tree.xpath("//div[contains(@id, 'question_')]")[1:]
for div in question_wrappers:
if not div.attrib['id'][9:].isdigit():
question_wrappers.remove(div)
for div in question_wrappers:
question_number += 1
explanation = ''
text = helpers.replace_chars(div.xpath(".//div[@class = 'qtext']/p/text()")[0])
user_answer = div.xpath(".//li[contains(@class, 'mine')]/text()")[0]
explanation_p = div.xpath(".//p[@class = 'value']")
if explanation_p[0].text is not None:
explanation = explanation_p[0].text
self.questions.append(Question(text, user_answer, explanation))
if not len(next_wrapper):
keep_going = False
def read(self, thread):
"""
Update messages attribute of a thread object with a list of
messages to and from the main User and another profile.
Parameters
----------
thread : MessageThread
Instance of MessageThread whose `messages` attribute you
wish to update.
"""
thread_data = {'readmsg': 'true', 'threadid': thread.threadid[:-1], 'folder': 1}
get_thread = self._session.get('http://www.okcupid.com/messages', params=thread_data)
thread_tree = html.fromstring(get_thread.content.decode('utf8'))
helpers.add_newlines(thread_tree)
for li in thread_tree.iter('li'):
if 'class' in li.attrib and li.attrib['class'] in ('to_me', 'from_me', 'from_me preview'):
message_string = helpers.get_message_string(li, thread.sender)
thread.messages.append(message_string)
def update_visitors(self):
"""
Update self.visitors with a Profile instance for each
visitor on your visitors list.
"""
get_visitors = self._session.get('http://www.okcupid.com/visitors')
tree = html.fromstring(get_visitors.content.decode('utf8'))
divs = tree.xpath("//div[@class = 'user_row_item clearfix ']")
for div in divs:
name = div.xpath(".//a[@class = 'name']/text()")[0]
age = int(div.xpath(".//div[@class = 'userinfo']/span[@class = 'age']/text()")[0])
location = div.xpath(".//div[@class = 'userinfo']/span[@class = 'location']/text()")[0]
match = int(div.xpath(".//p[@class = 'match_percentages']/span[@class = 'match']/text()")[0].replace('%', ''))
enemy = int(div.xpath(".//p[@class = 'match_percentages']/span[@class = 'enemy']/text()")[0].replace('%', ''))
self.visitors.append(Profile(self._session, name, age, location, match, enemy))
def rate(self, profile, rating):
"""
Rate a profile 1 through 5 stars. Profile argument may be
either a Profile object or a string. However, if it is a
string we must first visit the profile to get its id number.
Parameters
----------
profile : str or Profile
The profile that you wish to rate.
rating : str or int
1 through 5 star rating that you wish to bestow.
"""
if isinstance(profile, str):
profile = self.visit(profile)
parameters = {
'target_userid': profile._id,
'type': 'vote',
'target_objectid': '0',
'vote_type': 'personality',
'score': rating,
}
self._session.post('http://www.okcupid.com/vote_handler',
data=parameters)
def quickmatch(self):
'''
Return an instance of a Profile representing the profile on
your Quickmatch page.
Returns
----------
Profile
'''
get_quickmatch = self._session.get('http://www.okcupid.com/quickmatch')
tree = html.fromstring(get_quickmatch.content.decode('utf8'))
# all of the profile information on the quickmatch page is hidden in
# a <script> element, meaning that regex is unfortunately necessary
for script in tree.iter('script'):
if script.text is not None:
search_result = re.search(r'[^{]"tuid" : "(\d+)', script.text)
if search_result is not None:
id = search_result.group(1)
# I'm sorry.
broad_result = re.search(r'''"location"\s:\s"(.+?)".+
"epercentage"\s:\s(\d{1,2}),\s
"fpercentage"\s:\s(\d{1,2}),\s
"tracking_age"\s:\s(\d{2}).+
"sn"\s:\s"(.+?)",\s
"percentage"\s:\s(\d{1,2})''',
script.text, re.VERBOSE)
if broad_result is not None:
location = broad_result.group(1)
enemy = int(broad_result.group(2))
friend = int(broad_result.group(3))
age = int(broad_result.group(4))
username = broad_result.group(5)
match = int(broad_result.group(6))
return Profile(self._session, username, age=age, location=location,
match=match, enemy=enemy, id=id)
def __str__(self):
return '<User {0}>'.format(self.username)
class Profile:
"""
Represent another user on OKCupid. You should not initialize these
on their own. Instead, User.search() returns a list of Profile
objects, and User.visit() returns a single Profile object. You can
also find a list of Profile objects in User.visitors. Most of the
attributes will be empty until User.visit() is called.
self.questions, self.traits, and self.pics will remain empty until
self.update_questions(), self.update_traits(), and
self.update_pics() are called, respectively.
Parameters
----------
name : str
The username of this profile.
age : int
The age of this profile's user.
location : str
The geographical location of this profile's user.
match : int
The match percentage that you have with this profile.
enemy : int
The enemy percentage that you have with this profile.
rating : int
The rating you gave this profile.
contacted : bool
Whether you've contacted this user or not.
"""
def __init__(self, _session, name, age=None, location='', match=None,
enemy=None, id=None, rating=0, contacted=False):
self._session = _session
self._id = id
self.name = name
self.age = age
self.location = location
self.match = match
self.enemy = enemy
self.rating = rating
self.contacted = contacted
self.gender = None
self.orientation = None
self.status = None
self.pics = []
self.questions = []
self.traits = []
self.essays = {
'self summary': '',
'life': '',
'good at': '',
'first things': '',
'favorites': '',
'six things': '',
'thinking': '',
'friday night': '',
'private thing': '',
'message me if': '',
}
self.looking_for = {
'gentation': '',
'ages': '',
'near': '',
'single': '',
'seeking': '',
}
self.details = {
'last online': '',
'orientation': '',
'ethnicity': '',
'height': '',
'body type': '',
'diet': '',
'smokes': '',
'drinks': '',
'drugs': '',
'religion': '',
'sign': '',
'education': '',
'job': '',
'income': '',
'relationship type': '',
'offspring': '',
'pets': '',
'speaks': '',
}
def update_questions(self):
"""
Update self.questions with Question instances, which contain
text, user_answer, and explanation attributes. See
the Question class in objects.py for more details. Like
User.update_questions(), note that this can take a while due to
OKCupid displaying only ten questions on each page, potentially
requiring a large number of requests to the server.
"""
keep_going = True
question_number = 0
while keep_going:
questions_data = {
'low': 1 + question_number,
}
get_questions = self._session.post(
'http://www.okcupid.com/profile/{0}/questions'.format(self.name),
data=questions_data)
tree = html.fromstring(get_questions.content.decode('utf8'))
next_wrapper = tree.xpath("//li[@class = 'next']")
question_wrappers = tree.xpath("//div[contains(@id, 'question_')]")
for div in question_wrappers:
if not div.attrib['id'][9:].isdigit():
question_wrappers.remove(div)
for div in question_wrappers:
question_number += 1
explanation = ''
text = helpers.replace_chars(div.xpath(".//div[@class = 'qtext']/p/text()")[0])
user_answer = div.xpath(".//span[contains(@id, 'answer_target_')]/text()")[0].strip()
explanation_span = div.xpath(".//span[@class = 'note']")
if explanation_span[0].text is not None:
explanation = explanation_span[0].text.strip()
self.questions.append(Question(text, user_answer, explanation))
if not len(next_wrapper):
keep_going = False
def update_traits(self):
"""
Fill `self.traits` the personality traits of this profile.
"""
get_traits = self._session.get('http://www.okcupid.com/profile/{0}/personality'.format(self.name))
tree = html.fromstring(get_traits.content.decode('utf8'))
self.traits = tree.xpath("//div[@class = 'pt_row']//label/text()")
def update_pics(self):
"""
Fill `self.pics` with url strings of pictures for this profile.
"""
pics_request = self._session.get('http://www.okcupid.com/profile/{0}/photos?cf=profile'.format(self.name))
pics_tree = html.fromstring(pics_request.content.decode('utf8'))
self.pics = pics_tree.xpath("//div[@id = 'album_0']//img/@src")
def __repr__(self):
return '<Profile of {0}>'.format(self.name)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
import tensorflow.compat.v2 as tf
import contextlib
from keras import backend
from keras.engine import base_layer_utils
from keras.mixed_precision import device_compatibility_check
from keras.utils import generic_utils
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.Policy', v1=[])
class Policy:
"""A dtype policy for a Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`tf.keras.mixed_precision.set_global_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes. Can
be any dtype name, such as `'float32'` or `'float64'`, which causes both
the compute and variable dtypes will be that dtype. Can also be the string
`'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute dtype to
be float16 or bfloat16 and the variable dtype to be float32.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `'mixed_float16'` or
`'mixed_bfloat16'` to `tf.keras.mixed_precision.set_global_policy`. See [the
mixed precision guide](https://www.tensorflow.org/guide/keras/mixed_precision)
for more information on how to use mixed precision.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> layer1 = tf.keras.layers.Dense(10)
>>> layer1.dtype_policy # `layer1` will automatically use mixed precision
<Policy "mixed_float16">
>>> # Can optionally override layer to use float32 instead of mixed precision.
>>> layer2 = tf.keras.layers.Dense(10, dtype='float32')
>>> layer2.dtype_policy
<Policy "float32">
>>> # Set policy back to initial float32 for future examples.
>>> tf.keras.mixed_precision.set_global_policy('float32')
In the example above, passing `dtype='float32'` to the layer is equivalent to
passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
passing a dtype policy name to a layer is equivalent to passing the
corresponding policy, so it is never necessary to explicitly construct a
`Policy` object.
Note: `Model.compile` will automatically wrap an optimizer with a
`tf.keras.mixed_precision.LossScaleOptimizer` if you use the `'mixed_float16'`
policy. If you use a custom training loop instead of calling `Model.compile`,
you should explicitly use a `tf.keras.mixed_precision.LossScaleOptimizer` to
avoid numeric underflow with float16.
### How a layer uses its policy's compute dtype
A layer casts its inputs to its compute dtype. This causes the layer's
computations and output to also be in the compute dtype. For example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> layer.compute_dtype # Equivalent to layer.dtype_policy.compute_dtype
'float32'
>>> # `layer` casts its inputs to its compute dtype and does computations in
>>> # that dtype.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted (although this will likely be changed in a future minor release). For
example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer with multiple inputs, you should either explicitly
cast other tensors to `self.compute_dtype` in `call` or accept all tensors in
the first argument as a list.
The casting only occurs in TensorFlow 2. If
`tf.compat.v1.disable_v2_behavior()` has been called, you can enable the
casting behavior with `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
`AutoCastVariable` is identical to the original variable except it casts
itself to the layer's compute dtype when used within `Layer.call`. This means
if you are writing a layer, you do not have to explicitly cast the variables
to the layer's compute dtype. For example:
>>> class SimpleDense(tf.keras.layers.Layer):
...
... def build(self, input_shape):
... # With mixed precision, self.kernel is a float32 AutoCastVariable
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
...
... def call(self, inputs):
... # With mixed precision, self.kernel will be casted to float16
... return tf.linalg.matmul(inputs, self.kernel)
...
>>> layer = SimpleDense(dtype='mixed_float16')
>>> y = layer(tf.ones((10, 10)))
>>> y.dtype
tf.float16
>>> layer.kernel.dtype
tf.float32
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`,
which is useful if the float32 value of the variable must be accessed within
the layer.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.random.normal`, In such cases, you must create the tensor of the correct
dtype. For example, if you call `tf.random.normal`, you must pass the compute
dtype, which is the dtype the inputs have been casted to:
>>> class AddRandom(tf.keras.layers.Layer):
...
... def call(self, inputs):
... # We must pass `dtype=inputs.dtype`, otherwise a TypeError may
... # occur when adding `inputs` to `rand`.
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return inputs + rand
>>> layer = AddRandom(dtype='mixed_float16')
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a
`TypeError` would have occurred. This is because the `tf.random.normal`'s
dtype defaults to `"float32"`, but the input dtype is float16. You cannot add
a float32 tensor with a float16 tensor.
"""
def __init__(self, name):
if isinstance(name, tf.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, str):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == '_infer':
# The "_infer" policy exists only for compatibility with TF 1, where
# "_infer" is the default. The behavior matches the behavior of TF 1's
# behavior before policies were introduced. With "_infer", the computation
# and variable dtype are inferred from the first input the first time the
# layer is called. Once the layer is called for the first time, the
# layer's policy will change to the dtype of the first input, and it will
# no longer have the "_infer" policy.
#
# The infer policy should be considered an implementation detail and may
# be removed in the future.
return None, None
try:
dtype = tf.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include 'mixed_float16', 'mixed_bfloat16', "
"and the name of any dtype such as 'float32'." % (name,))
raise ValueError(error)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Variable regularizers are run in the variable dtype, not the compute dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
float16 or bfloat16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32 or some other device-internal
intermediate format with higher precision than float16/bfloat16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`. But,
`tf.linalg.matmul` will do use float32 intermediate math. The performance
benefit of float16 is still apparent, due to increased memory bandwidth and
the fact modern GPUs have specialized hardware for computing matmuls on
float16 inputs while still keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s">' % self._name
def get_config(self):
return {'name': self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
del custom_objects
if 'loss_scale' in config:
config = config.copy()
# Policy.get_config in TensorFlow 2.3 and below had a loss_scale. We
# silently drop it.
del config['loss_scale']
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "_infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.global_policy', v1=[])
def global_policy():
"""Returns the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor. If no policy has been
set with `keras.mixed_precision.set_global_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` (floatx defaults to float32).
>>> tf.keras.mixed_precision.global_policy()
<Policy "float32">
>>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy
<Policy "float32">
If TensorFlow 2 behavior has been disabled with
`tf.compat.v1.disable_v2_behavior()`, this will instead return a special
"_infer" policy which infers the dtype from the dtype of the first input the
first time the layer is called. This behavior matches the behavior that
existed in TensorFlow 1.
See `tf.keras.mixed_precision.Policy` for more information on policies.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('_infer')
return _global_policy
def _check_if_mixed_precision_graph_rewrite_is_enabled(policy):
if tf.__internal__.train.is_mixed_precision_graph_rewrite_enabled():
raise ValueError(
'The global dtype policy cannot be set to "{policy.name}", because the '
'mixed precision graph rewrite has already been enabled.\n'
'At most, one of the following can be called:\n\n'
' 1. tf.compat.v1.train.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.set_global_policy() with a mixed '
'precision policy (You called this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.'.format(policy=policy))
@keras_export('keras.mixed_precision.set_global_policy', v1=[])
def set_global_policy(policy):
"""Sets the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> tf.keras.mixed_precision.global_policy()
<Policy "mixed_float16">
>>> tf.keras.layers.Dense(10).dtype_policy
<Policy "mixed_float16">
>>> # Global policy is not used if a policy is directly passed to constructor
>>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
<Policy "float64">
>>> tf.keras.mixed_precision.set_global_policy('float32')
If no global policy is set, layers will instead default to a Policy
constructed from `tf.keras.backend.floatx()`.
To use mixed precision, the global policy should be set to `'mixed_float16'`
or `'mixed_bfloat16'`, so that every layer uses a 16-bit compute dtype and
float32 variable dtype by default.
Only floating point policies can be set as the global policy, such as
`'float32'` and `'mixed_float16'`. Non-floating point policies such as
`'int32'` and `'complex64'` cannot be set as the global policy because most
layers do not support such policies.
See `tf.keras.mixed_precision.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy. Can also
be None, in which case the global policy will be constructed from
`tf.keras.backend.floatx()`
"""
global _global_policy
if not base_layer_utils.v2_dtype_behavior_enabled():
raise ValueError('The global policy can only be set in TensorFlow 2 or if '
'V2 dtype behavior has been set. To enable V2 dtype '
'behavior, call '
'"tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"')
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
is_mixed_policy = (policy is not None and
policy.compute_dtype != policy.variable_dtype)
if is_mixed_policy:
_check_if_mixed_precision_graph_rewrite_is_enabled(policy)
if (policy is not None and policy.compute_dtype is not None and
not tf.as_dtype(policy.compute_dtype).is_floating):
raise ValueError('set_global_policy can only be used to set the global '
'policy to floating-point policies, such as "float32" and '
'"mixed_float16", but got policy: %s'
% (policy.name,))
_global_policy = policy
tf.__internal__.train.set_using_mixed_precision_policy(is_mixed_policy)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_global_policy(policy)
yield
finally:
set_global_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
tf.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy's type is Policy and not a subclass of
Policy.
The "_infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a subclass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
(policy.name == '_infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == '_infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('_infer')
# PolicyV1 was an old version of Policy that was removed. Deserializing it
# turns it into a (non-V1) Policy.
module_objects = {'Policy': Policy, 'PolicyV1': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| |
# This module is part of mail_merge_scheduler and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""This script is run through Windows Task Scheduler at the specified time by
the end-user by using the mail_merge_scheduler module or by manually entering
data into the scheduled_merges.ini file and manually creating a task in Windows
Task Scheduler. It is reccommended to always set up scheduled mail merges
through the mail_merge_scheduler.
This script is NOT meant to be used by the end-user.
"""
# Standard library imports
import ast
import configparser
from datetime import datetime
from datetime import timedelta
import logging
import os
# Third-party imports
from dateutil.parser import parse
from mailmerge import MailMerge
import sqlalchemy
def create_logger():
"""Creates and returns a logger. Errors are logged in schedules.log."""
module_path = __file__
path = os.path.split(module_path)[0]
log_path = r"{}\schedules.log".format(path)
new_logger = logging.getLogger(__name__)
new_logger.setLevel(logging.ERROR)
seperator = ["_"]*80
seperator = ''.join(seperator)
logger_format = "{}\n%(asctime)s\n%(message)s\n".format(seperator)
formatter = logging.Formatter(logger_format)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
new_logger.addHandler(file_handler)
return new_logger
# pylint: disable=invalid-name
logger = create_logger()
class ScheduledMerge(object):
"""Object containing methods for loading and processing data found in the
schedules_merges.ini file.
Attributes:
db_connection_string: A connection string that can be used to connect
to the desired database. Instructions can be found on sqlalchemy's
website at: http://docs.sqlalchemy.org/en/latest/core/engines.html
db_query: A string of a database query that will be used to take data
from the database that the db_connection_string attribute connects
with.
template_docx_file_path: A string giving the full path to a .docx or
.dotx document that has mail merge fields with names that correlate
to the field names of the table in the db_query attribute.
output_docx_name: A string of the file name only, and not the
full path, of the the .docx document created from the mail merge.
This is a new document, and does not overwrite the
template_docx_file_path.
week_int: An integer indicating the week interval/frequency for the
scheduled mail merge task to occur.
sched_days: A list of strings indicating the days of the week that the
user would like the scheduled mail merge task to occur on.
"""
# pylint: disable=too-many-arguments
def __init__(self, db_connection_string, db_query, template_docx_file_path,
output_docx_name, week_int, sched_days):
## Database Information
self.db_connection_string = db_connection_string
self.db_query = db_query
## Path Information
self.template_docx_file_path = template_docx_file_path
self.output_docx_name = output_docx_name
## Scheduling Information
self.week_int = week_int
self.sched_days = sched_days
def generate_out_filename(self):
"""Creates a unique file name for the output docx file, that is used in
the perform_mail_merge method below.
Returns:
A string for a unique name for the .docx document created from the
mail merge.
"""
head, tail = os.path.split(self.template_docx_file_path)
out_docx_path = r"{}\Merged_{}".format(head, tail)
if os.path.isfile(out_docx_path):
file_name, ext = os.path.splitext(tail)
num = 1
# If the file name already exists, keep adding 1 to num, until a
# unique file name is found.
while True:
out_docx_path = r"{}\Merged_{}_{}{}".format(
head, file_name, num, ext)
if not os.path.isfile(out_docx_path):
break
num += 1
return out_docx_path
def create_dict_of_data_from_vars(self):
"""Writes updated data back to the schedules_merges.ini config file.
If any data is updated, this method converts all the instance
attributes into a dictionary so the list can be used to
overwrite the data for this object in the schedules_merges.ini file.
Returns:
dict_of_data: A dictionary with keys that correlate with
the names of the class attributes and values of the data for
that attribute.
"""
days = [str(d) for d in self.sched_days]
dict_of_data = {
"db_connection_string":self.db_connection_string,
"db_query":self.db_query,
"template_docx_file_path":self.template_docx_file_path,
"output_docx_name":self.output_docx_name,
"week_int":self.week_int,
"sched_days":days}
return dict_of_data
def compare_time_to_sched_days(self):
"""Checks self.sched_days to see if a mail merge needs to be performed.
Compares the current datetime to all datetimes in the sched_days
instance attribute, to check if the there is a merge scheduled for
right now. Also in cases where the user was not logged on when a mail
merge task was scheduled, this method will also check if any datetimes
in sched_days occur before the current datetime, and if it does, it
will return True, and therefore perform the mail merge.
Returns:
Boolean True or False.
"""
today = datetime.today()
for index, day in enumerate(self.sched_days):
time_diff = (today - day).days
if time_diff >= 0:
self.sched_days[index] = self.update_day(day)
return True
else:
continue
return False
def update_day(self, day):
"""Updates a datetime in sched_days by timedelta-ing it by the week
interval, if a mail merge was performed.
Args:
day: A datetime.datetime object
Returns:
A datetime.datetime object
"""
day += timedelta(weeks=self.week_int)
return day
def get_records_from_db(self):
"""Gets data from the database, based on the query given, and returns
the data.
Returns:
A list of dictionaries, with key:value pairs arranged as
key=field_name : value=record for that row.
"""
eng = sqlalchemy.create_engine(self.db_connection_string)
eng.connect()
rows = eng.execute(self.db_query)
flds = rows.keys()
records = []
for row in rows:
rec = {str(fld):str(row[ind]) for ind, fld in enumerate(flds)}
records.append(rec)
return records
def perform_mail_merge(self):
"""Performs a mail merge and creates a new docx file."""
in_docx_path = self.template_docx_file_path
if self.output_docx_name is None:
out_docx_path = self.generate_out_filename()
data = self.get_records_from_db()
document = MailMerge(in_docx_path)
document.merge_pages(data)
document.write(out_docx_path)
return
def write_dict_to_config(config_path, config, config_key_id, dict_of_data):
"""Writes data as a list of dictionaries back to the schedules_merges.ini
file.
"""
for key, value in dict_of_data.items():
if isinstance(value, str):
value = 'r"{}"'.format(value)
config[config_key_id][key] = str(value)
with open(config_path, 'w') as config_file:
config.write(config_file)
config_file.close()
return
# pylint: disable=too-many-locals
# pylint: disable=broad-except
# pylint: disable=unused-variable
def check_for_scheduled_merges():
"""This function runs when the script is run.
Loads and iterates through the dictionaries in the schedules_merges.ini
file, and checks if there is a mail merge scheduled for right now, if so, it
performs the mail merge and updates the datetimes in the list of
sched_days, and overwrites that dictionary with the updated data.
Returns:
None
Raises:
All Errors raised will be written to the schedules.log file.
"""
module_path = __file__
path = os.path.split(module_path)[0]
config_path = r"{}\scheduled_merges.ini".format(path)
config = configparser.ConfigParser()
# optionxform maintains upercase letters in strings for keys.
config.optionxform = str
config.read(config_path)
try:
if os.path.isfile(config_path) is False:
raise FileNotFoundError(config_path)
# If the scheduled_merges.ini file has been deleted, for whatever reason,
# this will re-create the file.
except Exception as exception:
logger.exception("")
with open(config_path, 'w') as new_config_file:
config.write(new_config_file)
new_config_file.close()
for key in config.sections():
try:
dict_of_data = [ast.literal_eval(v) for v in config[key].values()]
db_connection_string = dict_of_data[0]
db_query = dict_of_data[1]
template_docx_file_path = dict_of_data[2]
output_docx_name = dict_of_data[3]
week_int = dict_of_data[4]
sched_days = [parse(item) for item in dict_of_data[5]]
new_mer_obj = ScheduledMerge(
db_connection_string,
db_query,
template_docx_file_path,
output_docx_name,
week_int,
sched_days)
# do_merge returns a boolean to determine if a merge should be done.
do_merge = new_mer_obj.compare_time_to_sched_days()
if do_merge is True:
new_mer_obj.perform_mail_merge()
dict_of_data = new_mer_obj.create_dict_of_data_from_vars()
write_dict_to_config(
config_path, config, key, dict_of_data)
# Use the general, Exception as exception, so that any error
# that occurs has its Traceback written to the schedules.log
# file.
except Exception as exception:
logger.exception("KEY_ID: %s", key)
return
if __name__ == "__main__":
check_for_scheduled_merges()
| |
# -*- coding: utf-8 -*-
import codecs
import logging
import os
import re
import sys
import time
import uuid
from http.cookiejar import Cookie, LWPCookieJar
from contextlib import contextmanager
from functools import wraps
from PyQt5.QtCore import (
QByteArray,
QDateTime,
qInstallMessageHandler,
QSize,
QSizeF,
Qt,
QtCriticalMsg,
QtDebugMsg,
QtFatalMsg,
QtWarningMsg,
QUrl,
)
from PyQt5.QtGui import (
QImage,
QPainter,
QRegion,
)
from PyQt5.QtPrintSupport import QPrinter
from PyQt5.QtWidgets import (
QApplication,
)
from PyQt5.QtNetwork import (
QNetworkAccessManager,
QNetworkCookie,
QNetworkCookieJar,
QNetworkProxy,
QNetworkRequest,
)
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import (
QWebPage,
QWebView,
)
from xvfbwrapper import Xvfb
DEFAULT_USERAGENT = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' CDP/47.0.2526.73 Safari/537.36'
)
logger = logging.getLogger('ghost')
logger.addHandler(logging.NullHandler())
class Error(Exception):
"""Base class for Ghost exceptions."""
pass
class TimeoutError(Error):
"""Raised when a request times out"""
pass
class QTMessageProxy(object):
def __init__(self, logger):
self.logger = logger
def __call__(self, msg_type, msg):
levels = {
QtDebugMsg: logging.DEBUG,
QtWarningMsg: logging.WARNING,
QtCriticalMsg: logging.CRITICAL,
QtFatalMsg: logging.FATAL,
}
self.logger.log(levels[msg_type], msg)
class GhostWebPage(QWebPage):
"""Overrides QtWebKitwidgets.QWebPage in order to intercept some graphical
behaviours like alert(), confirm0().
Also intercepts client side console.log().
"""
def __init__(self, app, session):
self.session = session
super(GhostWebPage, self).__init__()
def choose_file(self, frame, suggested_file=None):
filename = self.session._upload_file
self.session.logger.debug('Choosing file %s', filename)
return filename
def javaScriptConsoleMessage(self, message, line, source):
"""Prints client console message in current output stream."""
super(GhostWebPage, self).javaScriptConsoleMessage(
message,
line,
source,
)
self.session.logger.log(
logging.WARNING if "Error" in message else logging.INFO,
"%s(%d): %s", source or '<unknown>', line, message,
)
def javaScriptAlert(self, frame, message):
"""Notifies session for alert, then pass."""
self.session._alert = message
self.session.append_popup_message(message)
self.session.logger.info("alert('%s')", message)
def _get_value(self, value):
if callable(value):
return value()
return value
def javaScriptConfirm(self, frame, message):
"""Checks if session is warning for confirm, then returns the right
value.
"""
if self.session._confirm_expected is None:
raise Error(
'You must specified a value to confirm "%s"' %
message,
)
self.session.append_popup_message(message)
value = self.session._confirm_expected
self.session.logger.info("confirm('%s')", message)
return self._get_value(value)
def javaScriptPrompt(self, frame, message, default_value, result=None):
"""Checks if ghost is waiting for prompt, then enters the right
value.
"""
if self.session._prompt_expected is None:
raise Error(
'You must specified a value for prompt "%s"' %
message,
)
self.session.append_popup_message(message)
value = self.session._prompt_expected
self.session.logger.info("prompt('%s')", message)
value = self._get_value(value)
if value == '':
self.session.logger.warning(
"'%s' prompt filled with empty string", message,
)
if result is None:
# PySide
return True, value
result.append(str(value))
return True
def set_user_agent(self, user_agent):
self.user_agent = user_agent
def userAgentForUrl(self, url):
return self.user_agent
def can_load_page(func):
"""Decorator that specifies if user can expect page loading from
this action. If expect_loading is set to True, ghost will wait
for page_loaded event.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
expect_loading = kwargs.pop('expect_loading', False)
timeout = kwargs.pop('timeout', None)
if expect_loading:
self.loaded = False
func(self, *args, **kwargs)
return self.wait_for_page_loaded(
timeout=timeout)
return func(self, *args, **kwargs)
return wrapper
class HttpResource(object):
"""Represents an HTTP resource.
"""
def __init__(self, session, reply, content):
self.session = session
self.url = reply.url().toString()
self.content = bytes(content.data())
self.http_status = reply.attribute(
QNetworkRequest.HttpStatusCodeAttribute)
self.session.logger.info(
"Resource loaded: %s %s", self.url, self.http_status,
)
self.headers = {}
for header in reply.rawHeaderList():
try:
self.headers[str(header)] = str(
reply.rawHeader(header))
except UnicodeDecodeError:
# it will lose the header value,
# but at least not crash the whole process
self.session.logger.error(
"Invalid characters in header %s=%s",
header,
reply.rawHeader(header),
)
self._reply = reply
def replyReadyRead(reply):
if not hasattr(reply, 'data'):
reply.data = ''
reply.data += reply.peek(reply.bytesAvailable())
class NetworkAccessManager(QNetworkAccessManager):
"""Subclass QNetworkAccessManager to always cache the reply content
:param exclude_regex: A regex use to determine which url exclude
when sending a request
"""
def __init__(self, exclude_regex=None, *args, **kwargs):
self._regex = re.compile(exclude_regex) if exclude_regex else None
super(NetworkAccessManager, self).__init__(*args, **kwargs)
def createRequest(self, operation, request, data):
if self._regex and self._regex.findall(str(request.url().toString())):
return QNetworkAccessManager.createRequest(
self, QNetworkAccessManager.GetOperation,
QNetworkAccessManager(QUrl()))
reply = QNetworkAccessManager.createRequest(
self,
operation,
request,
data,
)
reply.readyRead.connect(lambda reply=reply: replyReadyRead(reply))
time.sleep(0.001)
return reply
class Ghost(object):
"""`Ghost` manages a Qt application.
:param log_level: The optional logging level.
:param log_handler: The optional logging hander.
:param plugin_path: Array with paths to plugin directories
(default ['/usr/lib/mozilla/plugins'])
:param defaults: The defaults arguments to pass to new child sessions.
"""
_app = None
def __init__(
self,
plugin_path=['/usr/lib/mozilla/plugins'],
defaults=None,
display_size=(1600, 900),
):
self.logger = logger.getChild('application')
if (
sys.platform.startswith('linux') and
'DISPLAY' not in os.environ
):
try:
self.xvfb = Xvfb(
width=display_size[0],
height=display_size[1],
)
self.xvfb.start()
except OSError:
raise Error('Xvfb is required to a ghost run outside ' +
'an X instance')
self.logger.info('Initializing Qt application')
Ghost._app = QApplication.instance() or QApplication(['ghost'])
qInstallMessageHandler(QTMessageProxy(logging.getLogger('qt')))
if plugin_path:
for p in plugin_path:
Ghost._app.addLibraryPath(p)
self.display_size = display_size
_defaults = dict(viewport_size=display_size)
_defaults.update(defaults or dict())
self.defaults = _defaults
def exit(self):
self._app.quit()
if hasattr(self, 'xvfb'):
self.xvfb.stop()
def start(self, **kwargs):
"""Starts a new `Session`."""
_kwargs = self.defaults.copy()
_kwargs.update(kwargs)
return Session(self, **_kwargs)
def __del__(self):
self.exit()
class Session(object):
"""`Session` manages a QWebPage.
:param ghost: The parent `Ghost` instance.
:param user_agent: The default User-Agent header.
:param wait_timeout: Maximum step duration in second.
:param wait_callback: An optional callable that is periodically
executed until Ghost stops waiting.
:param log_level: The optional logging level.
:param log_heander: The optional logging hander.
:param display: A boolean that tells ghost to display UI.
:param viewport_size: A tuple that sets initial viewport size.
:param ignore_ssl_errors: A boolean that forces ignore SSL errors.
:param plugins_enabled: Enable plugins (like Flash).
:param java_enabled: Enable Java JRE.
:param download_images: Indicate if the browser should download images
:param exclude: A regex use to determine which url exclude
when sending a request
:param local_storage_enabled: An optional boolean to enable / disable
local storage.
"""
_alert = None
_confirm_expected = None
_prompt_expected = None
_upload_file = None
_app = None
def __init__(
self,
ghost,
user_agent=DEFAULT_USERAGENT,
wait_timeout=0,
wait_callback=None,
display=False,
viewport_size=None,
ignore_ssl_errors=True,
plugins_enabled=False,
java_enabled=False,
javascript_enabled=True,
download_images=True,
show_scrollbars=True,
exclude=None,
network_access_manager_class=NetworkAccessManager,
web_page_class=GhostWebPage,
local_storage_enabled=True,
):
self.ghost = ghost
self.id = str(uuid.uuid4())
self.logger = logging.LoggerAdapter(
logger.getChild('session'),
{'session': self.id},
)
self.logger.info("Starting new session")
self.http_resources = []
self.wait_timeout = wait_timeout
self.wait_callback = wait_callback
self.ignore_ssl_errors = ignore_ssl_errors
self.loaded = True
self.display = display
self.popup_messages = []
self.page = web_page_class(self.ghost._app, self)
if network_access_manager_class is not None:
self.page.setNetworkAccessManager(network_access_manager_class(exclude_regex=exclude))
QWebSettings.setMaximumPagesInCache(0)
QWebSettings.setObjectCacheCapacities(0, 0, 0)
QWebSettings.globalSettings().setAttribute(QWebSettings.LocalStorageEnabled, local_storage_enabled)
self.page.setForwardUnsupportedContent(True)
self.page.settings().setAttribute(QWebSettings.AutoLoadImages, download_images)
self.page.settings().setAttribute(QWebSettings.PluginsEnabled, plugins_enabled)
self.page.settings().setAttribute(QWebSettings.JavaEnabled, java_enabled)
self.page.settings().setAttribute(QWebSettings.JavascriptEnabled, javascript_enabled)
if not show_scrollbars:
self.page.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff, )
self.page.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff, )
# page signals
self.page.loadFinished.connect(self._page_loaded)
self.page.loadStarted.connect(self._page_load_started)
self.page.unsupportedContent.connect(self._unsupported_content)
self.manager = self.page.networkAccessManager()
self.manager.finished.connect(self._request_ended)
self.manager.sslErrors.connect(self._on_manager_ssl_errors)
# Cookie jar
self.cookie_jar = QNetworkCookieJar()
self.manager.setCookieJar(self.cookie_jar)
# User Agent
self.page.set_user_agent(user_agent)
self.page.networkAccessManager().authenticationRequired.connect(self._authenticate)
self.page.networkAccessManager().proxyAuthenticationRequired.connect(self._authenticate)
self.main_frame = self.page.mainFrame()
class GhostQWebView(QWebView):
def sizeHint(self):
return QSize(*viewport_size)
self.webview = GhostQWebView()
self.set_viewport_size(*viewport_size)
if plugins_enabled:
self.webview.settings().setAttribute(QWebSettings.PluginsEnabled, True)
if java_enabled:
self.webview.settings().setAttribute(QWebSettings.JavaEnabled, True)
self.webview.setPage(self.page)
if self.display:
self.show()
def frame(self, selector=None):
"""Set main frame as current main frame's parent.
:param frame: An optional name or index of the child to descend to.
"""
if isinstance(selector, str):
for frame in self.main_frame.childFrames():
if frame.frameName() == selector:
self.main_frame = frame
return
# frame not found so we throw an exception
raise LookupError(
"Child frame for name '%s' not found." % selector,
)
if isinstance(selector, int):
try:
self.main_frame = self.main_frame.childFrames()[selector]
return
except IndexError:
raise LookupError(
"Child frame at index '%s' not found." % selector,
)
# we can't ascend directly to parent frame because it might have been
# deleted
self.main_frame = self.page.mainFrame()
@can_load_page
def call(self, selector, method):
"""Call method on element matching given selector.
:param selector: A CSS selector to the target element.
:param method: The name of the method to call.
:param expect_loading: Specifies if a page loading is expected.
"""
self.logger.debug('Calling `%s` method on `%s`', method, selector)
element = self.main_frame.findFirstElement(selector)
return element.evaluateJavaScript('this[%s]();' % repr(method))
def capture(
self,
region=None,
selector=None,
format=None,
):
"""Returns snapshot as QImage.
:param region: An optional tuple containing region as pixel
coordinates.
:param selector: A selector targeted the element to crop on.
:param format: The output image format.
"""
if format is None:
format = QImage.Format_ARGB32_Premultiplied
self.main_frame.setScrollBarPolicy(
Qt.Vertical,
Qt.ScrollBarAlwaysOff,
)
self.main_frame.setScrollBarPolicy(
Qt.Horizontal,
Qt.ScrollBarAlwaysOff,
)
frame_size = self.main_frame.contentsSize()
max_size = 23170 * 23170
if frame_size.height() * frame_size.width() > max_size:
self.logger.warning("Frame size is too large.")
default_size = self.page.viewportSize()
if default_size.height() * default_size.width() > max_size:
return None
else:
self.page.setViewportSize(self.main_frame.contentsSize())
self.logger.info("Frame size -> %s", str(self.page.viewportSize()))
image = QImage(self.page.viewportSize(), format)
painter = QPainter(image)
if region is None and selector is not None:
region = self.region_for_selector(selector)
if region:
x1, y1, x2, y2 = region
w, h = (x2 - x1), (y2 - y1)
reg = QRegion(x1, y1, w, h)
self.main_frame.render(painter, reg)
else:
self.main_frame.render(painter)
painter.end()
if region:
x1, y1, x2, y2 = region
w, h = (x2 - x1), (y2 - y1)
image = image.copy(x1, y1, w, h)
return image
def capture_to(
self,
path,
region=None,
selector=None,
format=None,
):
"""Saves snapshot as image.
:param path: The destination path.
:param region: An optional tuple containing region as pixel
coordinates.
:param selector: A selector targeted the element to crop on.
:param format: The output image format.
"""
if format is None:
format = QImage.Format_ARGB32_Premultiplied
self.capture(region=region, format=format, selector=selector).save(path)
def print_to_pdf(
self,
path,
paper_size=(8.5, 11.0),
paper_margins=(0, 0, 0, 0),
paper_units=None,
zoom_factor=1.0,
):
"""Saves page as a pdf file.
:param path: The destination path.
:param paper_size: A 2-tuple indicating size of page to print to.
:param paper_margins: A 4-tuple indicating size of each margins.
:param paper_units: Units for paper size, paper margins.
:param zoom_factor: Scale the output content.
"""
assert len(paper_size) == 2
assert len(paper_margins) == 4
if paper_units is None:
paper_units = QPrinter.Inch
printer = QPrinter(mode=QPrinter.ScreenResolution)
printer.setOutputFromat(QPrinter.PdfFormat)
printer.setPaperSize(QSizeF(*paper_size), paper_units)
printer.setPageMargins(*(paper_margins + (paper_units,)))
if paper_margins != (0, 0, 0, 0):
printer.setFullPage(True)
printer.setOutputFileName(path)
if self.webview is None:
self.webview = QWebView()
self.webview.setPage(self.page)
self.webview.setZoomFactor(zoom_factor)
self.webview.print_(printer)
@can_load_page
def click(self, selector, btn=0):
"""Click the targeted element.
:param selector: A CSS3 selector to targeted element.
:param btn: The number of mouse button.
0 - left button,
1 - middle button,
2 - right button
"""
if not self.exists(selector):
raise Error("Can't find element to click")
return self.evaluate("""
(function () {
var element = document.querySelector(%s);
var evt = document.createEvent("MouseEvents");
evt.initMouseEvent("click", true, true, window, 1, 1, 1, 1, 1,
false, false, false, false, %s, element);
return element.dispatchEvent(evt);
})();
""" % (repr(selector), str(btn)))
@contextmanager
def confirm(self, confirm=True):
"""Statement that tells Ghost how to deal with javascript confirm().
:param confirm: A boolean or a callable to set confirmation.
"""
self._confirm_expected = confirm
yield
self._confirm_expected = None
@property
def content(self, to_unicode=True):
"""Returns current frame HTML as a string.
:param to_unicode: Whether to convert html to unocode or not.
"""
if to_unicode:
return str(self.main_frame.toHtml())
else:
return self.main_frame.toHtml()
@property
def cookies(self):
"""Returns all cookies."""
return self.cookie_jar.allCookies()
def delete_cookies(self):
"""Deletes all cookies."""
self.cookie_jar.setAllCookies([])
def clear_alert_message(self):
"""Clears the alert message"""
self._alert = None
@can_load_page
def evaluate(self, script):
"""Evaluates script in page frame.
:param script: The script to evaluate.
"""
return (
self.main_frame.evaluateJavaScript("%s" % script),
self._release_last_resources(),
)
def evaluate_js_file(self, path, encoding='utf-8', **kwargs):
"""Evaluates javascript file at given path in current frame.
Raises native IOException in case of invalid file.
:param path: The path of the file.
:param encoding: The file's encoding.
"""
with codecs.open(path, encoding=encoding) as f:
return self.evaluate(f.read(), **kwargs)
def exists(self, selector):
"""Checks if element exists for given selector.
:param selector: The element selector.
"""
return not self.main_frame.findFirstElement(selector).isNull()
def exit(self):
"""Exits all Qt Widgets."""
self.logger.info("Closing session")
self.page.deleteLater()
self.sleep()
del self.webview
del self.cookie_jar
del self.manager
del self.main_frame
@can_load_page
def fill(self, selector, values):
"""Fills a form with provided values.
:param selector: A CSS selector to the target form to fill.
:param values: A dict containing the values.
"""
if not self.exists(selector):
raise Error("Can't find form")
resources = []
for field in values:
r, res = self.set_field_value(
"%s [name=%s]" % (selector, repr(field)), values[field])
resources.append(res)
return True, resources
@can_load_page
def fire(self, selector, event):
"""Fire `event` on element at `selector`
:param selector: A selector to target the element.
:param event: The name of the event to trigger.
"""
self.logger.debug('Fire `%s` on `%s`', event, selector)
element = self.main_frame.findFirstElement(selector)
return element.evaluateJavaScript("""
var event = document.createEvent("HTMLEvents");
event.initEvent('%s', true, true);
this.dispatchEvent(event);
""" % event)
def global_exists(self, global_name):
"""Checks if javascript global exists.
:param global_name: The name of the global.
"""
return self.evaluate(
'!(typeof this[%s] === "undefined");'
% repr(global_name)
)[0]
def hide(self):
"""Close the webview."""
try:
self.webview.close()
except:
raise Error("no webview to close")
def load_cookies(self, cookie_storage, keep_old=False):
"""load from cookielib's CookieJar or Set-Cookie3 format text file.
:param cookie_storage: file location string on disk or CookieJar
instance.
:param keep_old: Don't reset, keep cookies not overridden.
"""
def toQtCookieJar(PyCookieJar, QtCookieJar):
allCookies = QtCookieJar.allCookies() if keep_old else []
for pc in PyCookieJar:
qc = toQtCookie(pc)
allCookies.append(qc)
QtCookieJar.setAllCookies(allCookies)
def toQtCookie(PyCookie):
qc = QNetworkCookie(PyCookie.name, PyCookie.value)
qc.setSecure(PyCookie.secure)
if PyCookie.path_specified:
qc.setPath(PyCookie.path)
if PyCookie.domain != "":
qc.setDomain(PyCookie.domain)
if PyCookie.expires and PyCookie.expires != 0:
t = QDateTime()
t.setTime_t(PyCookie.expires)
qc.setExpirationDate(t)
# not yet handled(maybe less useful):
# py cookie.rest / QNetworkCookie.setHttpOnly()
return qc
if cookie_storage.__class__.__name__ == 'str':
cj = LWPCookieJar(cookie_storage)
cj.load()
toQtCookieJar(cj, self.cookie_jar)
elif cookie_storage.__class__.__name__.endswith('CookieJar'):
toQtCookieJar(cookie_storage, self.cookie_jar)
else:
raise ValueError('unsupported cookie_storage type.')
def open(
self,
address,
method='get',
headers={},
auth=None,
body=None,
default_popup_response=None,
wait=True,
timeout=None,
encode_url=True,
user_agent=None,
):
"""Opens a web page.
:param address: The resource URL.
:param method: The Http method.
:param headers: An optional dict of extra request headers.
:param auth: An optional tuple of HTTP auth (username, password).
:param body: An optional string containing a payload.
:param default_popup_response: the default response for any confirm/
alert/prompt popup from the Javascript (replaces the need for the
with blocks)
:param wait: If set to True (which is the default), this method
call waits for the page load to complete becore returning.
Otherwise, it just starts the page load task and it is the
caller's responsibility to wait for the load to finish by other
means (e.g. by calling wait_for_page_loaded()).
:param timeout: An optional timeout.
:param encode_url: Set to true if the url have to be encoded
:param user_agent: An optional User-Agent string.
:return: Page resource, and all loaded resources, unless wait
is False, in which case it returns None.
"""
self.logger.info('Opening %s', address)
body = body or QByteArray()
try:
method = getattr(QNetworkAccessManager, "%sOperation" % method.capitalize())
except AttributeError:
raise Error("Invalid http method %s" % method)
if user_agent is not None:
self.page.set_user_agent(user_agent)
if encode_url:
request = QNetworkRequest(QUrl(address))
else:
request = QNetworkRequest(QUrl.fromEncoded(address))
request.CacheLoadControl(0)
for header in headers:
request.setRawHeader(header, headers[header])
self._auth = auth
self._auth_attempt = 0 # Avoids reccursion
self.main_frame.load(request, method, body)
self.loaded = False
if default_popup_response is not None:
self._prompt_expected = default_popup_response
self._confirm_expected = default_popup_response
if wait:
print('waiting ... (in %d seconds)' % timeout)
return self.wait_for_page_loaded(timeout=timeout)
def scroll_to_anchor(self, anchor):
self.main_frame.scrollToAnchor(anchor)
@contextmanager
def prompt(self, value=''):
"""Statement that tells Ghost how to deal with javascript prompt().
:param value: A string or a callable value to fill in prompt.
"""
self._prompt_expected = value
yield
self._prompt_expected = None
def region_for_selector(self, selector):
"""Returns frame region for given selector as tuple.
:param selector: The targeted element.
"""
geo = self.main_frame.findFirstElement(selector).geometry()
try:
region = (geo.left(), geo.top(), geo.right(), geo.bottom())
except:
raise Error("Can't get region for selector '%s'" % selector)
return region
def save_cookies(self, cookie_storage):
"""Save to cookielib's CookieJar or Set-Cookie3 format text file.
:param cookie_storage: file location string or CookieJar instance.
"""
def toPyCookieJar(QtCookieJar, PyCookieJar):
for c in QtCookieJar.allCookies():
PyCookieJar.set_cookie(toPyCookie(c))
def toPyCookie(QtCookie):
port = None
port_specified = False
secure = QtCookie.isSecure()
name = str(QtCookie.name())
value = str(QtCookie.value())
v = str(QtCookie.path())
path_specified = bool(v != "")
path = v if path_specified else None
v = str(QtCookie.domain())
domain_specified = bool(v != "")
domain = v
if domain_specified:
domain_initial_dot = v.startswith('.')
else:
domain_initial_dot = None
v = int(QtCookie.expirationDate().toTime_t())
# Long type boundary on 32bit platforms; avoid ValueError
expires = 2147483647 if v > 2147483647 else v
rest = {}
discard = False
return Cookie(
0,
name,
value,
port,
port_specified,
domain,
domain_specified,
domain_initial_dot,
path,
path_specified,
secure,
expires,
discard,
None,
None,
rest,
)
if cookie_storage.__class__.__name__ == 'str':
cj = LWPCookieJar(cookie_storage)
toPyCookieJar(self.cookie_jar, cj)
cj.save()
elif cookie_storage.__class__.__name__.endswith('CookieJar'):
toPyCookieJar(self.cookie_jar, cookie_storage)
else:
raise ValueError('unsupported cookie_storage type.')
@can_load_page
def set_field_value(self, selector, value, blur=True):
"""Sets the value of the field matched by given selector.
:param selector: A CSS selector that target the field.
:param value: The value to fill in.
:param blur: An optional boolean that force blur when filled in.
"""
self.logger.debug('Setting value "%s" for "%s"', value, selector)
def _set_checkbox_value(el, value):
el.setFocus()
if value is True:
el.setAttribute('checked', 'checked')
else:
el.removeAttribute('checked')
def _set_checkboxes_value(els, value):
for el in els:
if el.attribute('value') == value:
_set_checkbox_value(el, True)
else:
_set_checkbox_value(el, False)
def _set_radio_value(els, value):
for el in els:
if el.attribute('value') == value:
el.setFocus()
el.setAttribute('checked', 'checked')
def _set_text_value(el, value):
el.setFocus()
el.setAttribute('value', value)
def _set_select_value(el, value):
el.setFocus()
index = 0
for option in el.findAll('option'):
if option.attribute('value') == value:
option.evaluateJavaScript('this.selected = true;')
el.evaluateJavaScript('this.selectedIndex = %d' % index)
break
index += 1
def _set_textarea_value(el, value):
el.setFocus()
el.setPlainText(value)
res, resources = None, []
element = self.main_frame.findFirstElement(selector)
if element.isNull():
raise Error('can\'t find element for "%s"' % selector)
tag_name = str(element.tagName()).lower()
if tag_name == "select":
_set_select_value(element, value)
elif tag_name == "textarea":
_set_textarea_value(element, value)
elif tag_name == "input":
type_ = str(element.attribute('type')).lower()
if type_ in (
"color",
"date",
"datatime",
"datetime-local",
"email",
"hidden",
"month",
"number",
"password",
"range",
"search",
"tel",
"text",
"time",
"url",
"week",
"",
):
_set_text_value(element, value)
elif type_ == "checkbox":
els = self.main_frame.findAllElement(selector)
if els.count() > 1:
_set_checkboxes_value(els, value)
else:
_set_checkbox_value(element, value)
elif type_ == "radio":
_set_radio_value(
self.main_frame.findAllElement(selector),
value,
)
elif type_ == "file":
self._upload_file = value
res, resources = self.click(selector)
self._upload_file = None
else:
raise Error('unsupported field tag')
for event in ('input', 'change'):
self.fire(selector, event)
if blur:
self.call(selector, 'blur')
return res, resources
def set_proxy(
self,
type_,
host='localhost',
port=8888,
user='',
password='',
):
"""Set up proxy for FURTHER connections.
:param type_: proxy type to use: \
none/socks5/https/http/default.
:param host: proxy server ip or host name.
:param port: proxy port.
"""
_types = {
'default': QNetworkProxy.DefaultProxy,
'none': QNetworkProxy.NoProxy,
'socks5': QNetworkProxy.SocksProxy,
'https': QNetworkProxy.HttpProxy,
'http': QNetworkProxy.HttpCacheProxy,
}
if type_ is None:
type_ = 'none'
type_ = type_.lower()
if type_ in ('none', 'default'):
self.manager.setProxy(QNetworkProxy(_types[type_]))
return
elif type_ in _types:
proxy = QNetworkProxy(
_types[type_],
hostName=host,
port=port,
user=user,
password=password,
)
self.manager.setProxy(proxy)
else:
raise ValueError((
'Unsupported proxy type: %s' % type_,
'\nsupported types are: none/socks5/https/http/default',
))
def set_viewport_size(self, width, height):
"""Sets the page viewport size.
:param width: An integer that sets width pixel count.
:param height: An integer that sets height pixel count.
"""
new_size = QSize(width, height)
self.webview.resize(new_size)
self.page.setPreferredContentsSize(new_size)
self.page.setViewportSize(new_size)
self.sleep()
def append_popup_message(self, message):
self.popup_messages.append(str(message))
def show(self):
"""Show current page inside a QWebView."""
self.logger.debug('Showing webview')
self.webview.show()
self.sleep()
def sleep(self, value=0.1):
started_at = time.time()
while time.time() <= (started_at + value):
time.sleep(0.01)
self.ghost._app.processEvents()
def wait_for(self, condition, timeout_message, timeout=None):
"""Waits until condition is True.
:param condition: A callable that returns the condition.
:param timeout_message: The exception message on timeout.
:param timeout: An optional timeout.
"""
timeout = self.wait_timeout if timeout is None else timeout
started_at = time.time()
while not condition():
if time.time() > (started_at + timeout):
raise TimeoutError(timeout_message)
self.sleep()
if self.wait_callback is not None:
self.wait_callback()
def wait_for_alert(self, timeout=None):
"""Waits for main frame alert().
:param timeout: An optional timeout.
"""
self.wait_for(lambda: self._alert is not None, 'User has not been alerted.', timeout)
msg = self._alert
self._alert = None
return msg, self._release_last_resources()
def wait_for_page_loaded(self, timeout=None):
"""Waits until page is loaded, assumed that a page as been required.
:param timtout: An optional timeout.
"""
self.wait_for(lambda: self.loaded, 'Unable to load requested page', timeout)
resources = self._release_last_resources()
page = None
url = self.main_frame.url().toString()
url_without_hash = url.split("#")[0]
for resource in resources:
if url == resource.url or url_without_hash == resource.url:
page = resource
self.logger.info('Page adloed %s', url)
return page, resources
def wait_for_selector(self, selector, timeout=None):
"""Waits until selector match an element on the frame.
:param selector: The selector to wait for.
:param timeout: An optional timeout.
"""
self.wait_for(
lambda: self.exists(selector),
'Can\'t find element matching "%s"' % selector,
timeout,
)
return True, self._release_last_resources()
def wait_while_selector(self, selector, timeout=None):
"""Waits until the selector no longer matchies an element on the frame.
:param selector: The selector to wait for.
:param timeout: An optional timeout.
"""
self.wait_for(
lambda: not self.exists(selector),
'Element matching "%s" is still available' % selector,
timeout,
)
return True, self._release_last_resources()
def wait_for_text(self, text, timeout=None):
"""Waits until given text appear on main frame.
:param text: The text to wait for.
:param timeout: An optional timeout.
"""
self.wait_for(
lambda: text in self.content,
'Can\'t find "%s" in current frame' % text,
timeout,
)
return True, self._release_last_resources()
def _authenticate(self, mix, authenticator):
"""Called back on basic / proxy http auth.
:param mix: The QNetworkReply or QNetworkProxy object.
:param authenticator: The QAuthenticator object.
"""
if self._auth is not None and self._auth_attempt == 0:
username, password = self._auth
authenticator.serUser(username)
authenticator.setPassword(password)
self._auth_attempt += 1
def _page_loaded(self):
"""Called back when page loaded."""
self.loaded = True
self.sleep()
def _page_load_started(self):
"""Called back when page load started."""
self.loaded = False
def _release_last_resources(self):
"""Releases last loaded resources.
:return: The released resources.
"""
last_resources = self.http_resources
self.http_resources = []
return last_resources
def _request_ended(self, reply):
"""Adds an HttpResource object to http_resources.
:param reply: The QNetworkReply object.
"""
if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute):
self.logger.debug("[%s] bytesAvailable()=%s",
str(reply.url()),
reply.bytesAvailable())
try:
content = reply.data
except AttributeError:
content = reply.readAll()
self.http_resources.append(HttpResource(
self,
reply,
content=content,
))
def _unsupported_content(self, reply):
self.logger.info("Unsupported content %s", str(reply.url()))
reply.readyRead.connect(
lambda reply=reply: self._reply_download_content(reply))
def _reply_download_content(self, reply):
"""Adds an HttpResource object to http_resources with unsupported
content.
:param reply: The QNetworkReply object.
"""
if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute):
self.http_resources.append(HttpResource(
self,
reply,
reply.readAll(),
))
def _on_manager_ssl_errors(self, reply, errors):
url = str(reply.url().toString())
if self.ignore_ssl_errors:
reply.ignoreSslErrors()
else:
self.logger.warning('SSL certificate error: %s', url)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit()
| |
"""Test MQTT fans."""
import pytest
from homeassistant.components import fan
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message
from tests.components.fan import common
DEFAULT_CONFIG = {
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
}
}
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if command fails with command topic."""
assert await async_setup_component(
hass, fan.DOMAIN, {fan.DOMAIN: {"platform": "mqtt", "name": "test"}}
)
await hass.async_block_till_done()
assert hass.states.get("fan.test") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_off": "StAtE_OfF",
"payload_on": "StAtE_On",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"payload_oscillation_off": "OsC_OfF",
"payload_oscillation_on": "OsC_On",
"speed_state_topic": "speed-state-topic",
"speed_command_topic": "speed-command-topic",
"payload_off_speed": "speed_OfF",
"payload_low_speed": "speed_lOw",
"payload_medium_speed": "speed_mEdium",
"payload_high_speed": "speed_High",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "StAtE_On")
state = hass.states.get("fan.test")
assert state.state is STATE_ON
async_fire_mqtt_message(hass, "state-topic", "StAtE_OfF")
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get("oscillating") is False
async_fire_mqtt_message(hass, "oscillation-state-topic", "OsC_On")
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is True
async_fire_mqtt_message(hass, "oscillation-state-topic", "OsC_OfF")
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is False
assert state.attributes.get("speed") == fan.SPEED_OFF
async_fire_mqtt_message(hass, "speed-state-topic", "speed_lOw")
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_LOW
async_fire_mqtt_message(hass, "speed-state-topic", "speed_mEdium")
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_MEDIUM
async_fire_mqtt_message(hass, "speed-state-topic", "speed_High")
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_HIGH
async_fire_mqtt_message(hass, "speed-state-topic", "speed_OfF")
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_OFF
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"speed_state_topic": "speed-state-topic",
"speed_command_topic": "speed-command-topic",
"state_value_template": "{{ value_json.val }}",
"oscillation_value_template": "{{ value_json.val }}",
"speed_value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", '{"val":"ON"}')
state = hass.states.get("fan.test")
assert state.state is STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val":"OFF"}')
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get("oscillating") is False
async_fire_mqtt_message(hass, "oscillation-state-topic", '{"val":"oscillate_on"}')
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is True
async_fire_mqtt_message(hass, "oscillation-state-topic", '{"val":"oscillate_off"}')
state = hass.states.get("fan.test")
assert state.attributes.get("oscillating") is False
assert state.attributes.get("speed") == fan.SPEED_OFF
async_fire_mqtt_message(hass, "speed-state-topic", '{"val":"low"}')
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_LOW
async_fire_mqtt_message(hass, "speed-state-topic", '{"val":"medium"}')
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_MEDIUM
async_fire_mqtt_message(hass, "speed-state-topic", '{"val":"high"}')
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_HIGH
async_fire_mqtt_message(hass, "speed-state-topic", '{"val":"off"}')
state = hass.states.get("fan.test")
assert state.attributes.get("speed") == fan.SPEED_OFF
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_off": "StAtE_OfF",
"payload_on": "StAtE_On",
"oscillation_command_topic": "oscillation-command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"payload_oscillation_off": "OsC_OfF",
"payload_oscillation_on": "OsC_On",
"speed_command_topic": "speed-command-topic",
"speed_state_topic": "speed-state-topic",
"payload_off_speed": "speed_OfF",
"payload_low_speed": "speed_lOw",
"payload_medium_speed": "speed_mEdium",
"payload_high_speed": "speed_High",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "StAtE_On", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "StAtE_OfF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", True)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "OsC_On", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", False)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "OsC_OfF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_LOW)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "speed_lOw", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_MEDIUM)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "speed_mEdium", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_HIGH)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "speed_High", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_OFF)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "speed_OfF", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_on_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test on with speed."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"speed_command_topic": "speed-command-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) is None
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test", speed="low")
assert mqtt_mock.async_publish.call_count == 2
mqtt_mock.async_publish.assert_any_call("command-topic", "ON", 0, False)
mqtt_mock.async_publish.assert_any_call("speed-command-topic", "low", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "low"
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock):
"""Test optimistic mode with state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"oscillation_command_topic": "oscillation-command-topic",
"speed_state_topic": "speed-state-topic",
"speed_command_topic": "speed-command-topic",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "ON", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "fan.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", True)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "oscillate_on", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_oscillate(hass, "fan.test", False)
mqtt_mock.async_publish.assert_called_once_with(
"oscillation-command-topic", "oscillate_off", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_LOW)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "low", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_MEDIUM)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "medium", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_HIGH)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "high", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", fan.SPEED_OFF)
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "off", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_set_speed(hass, "fan.test", "cUsToM")
mqtt_mock.async_publish.assert_called_once_with(
"speed-command-topic", "cUsToM", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_attributes(hass, mqtt_mock):
"""Test attributes."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"speed_command_topic": "speed-command-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(fan.ATTR_SPEED_LIST) == ["off", "low", "medium", "high"]
await common.async_turn_on(hass, "fan.test")
state = hass.states.get("fan.test")
assert state.state is STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) is None
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
await common.async_turn_off(hass, "fan.test")
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) is None
assert state.attributes.get(fan.ATTR_OSCILLATING) is None
await common.async_oscillate(hass, "fan.test", True)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) is None
assert state.attributes.get(fan.ATTR_OSCILLATING) is True
await common.async_oscillate(hass, "fan.test", False)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) is None
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
await common.async_set_speed(hass, "fan.test", fan.SPEED_LOW)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "low"
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
await common.async_set_speed(hass, "fan.test", fan.SPEED_MEDIUM)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "medium"
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
await common.async_set_speed(hass, "fan.test", fan.SPEED_HIGH)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "high"
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
await common.async_set_speed(hass, "fan.test", fan.SPEED_OFF)
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "off"
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
await common.async_set_speed(hass, "fan.test", "cUsToM")
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(fan.ATTR_SPEED) == "cUsToM"
assert state.attributes.get(fan.ATTR_OSCILLATING) is False
async def test_custom_speed_list(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"oscillation_state_topic": "oscillation-state-topic",
"speed_command_topic": "speed-command-topic",
"speed_state_topic": "speed-state-topic",
"speeds": ["off", "high"],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test")
assert state.state is STATE_OFF
assert state.attributes.get(fan.ATTR_SPEED_LIST) == ["off", "high"]
async def test_supported_features(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
fan.DOMAIN,
{
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "test1",
"command_topic": "command-topic",
},
{
"platform": "mqtt",
"name": "test2",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
},
{
"platform": "mqtt",
"name": "test3",
"command_topic": "command-topic",
"speed_command_topic": "speed-command-topic",
},
{
"platform": "mqtt",
"name": "test4",
"command_topic": "command-topic",
"oscillation_command_topic": "oscillation-command-topic",
"speed_command_topic": "speed-command-topic",
},
]
},
)
await hass.async_block_till_done()
state = hass.states.get("fan.test1")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 0
state = hass.states.get("fan.test2")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_OSCILLATE
state = hass.states.get("fan.test3")
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == fan.SUPPORT_SET_SPEED
state = hass.states.get("fan.test4")
assert (
state.attributes.get(ATTR_SUPPORTED_FEATURES)
== fan.SUPPORT_OSCILLATE | fan.SUPPORT_SET_SPEED
)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, True, "state-topic", "1"
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG, True, "state-topic", "1"
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique_id option only creates one fan per id."""
config = {
fan.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, fan.DOMAIN, config)
async def test_discovery_removal_fan(hass, mqtt_mock, caplog):
"""Test removal of discovered fan."""
data = '{ "name": "test", "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, fan.DOMAIN, data)
async def test_discovery_update_fan(hass, mqtt_mock, caplog):
"""Test update of discovered fan."""
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
await help_test_discovery_update(hass, mqtt_mock, caplog, fan.DOMAIN, data1, data2)
async def test_discovery_update_unchanged_fan(hass, mqtt_mock, caplog):
"""Test update of discovered fan."""
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
with patch(
"homeassistant.components.mqtt.fan.MqttFan.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, fan.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
await help_test_discovery_broken(hass, mqtt_mock, caplog, fan.DOMAIN, data1, data2)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT fan device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT fan device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
| |
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest, TESTFN, unlink, check_warnings,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
cwd = os.getcwd()
try:
os.chdir('..')
srcdir2 = sysconfig.get_config_var('srcdir')
finally:
os.chdir(cwd)
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
@unittest.skipUnless(sys.platform == 'linux', 'Linux-specific test')
def test_triplet_in_ext_suffix(self):
import ctypes, platform, re
machine = platform.machine()
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if re.match('(aarch64|arm|mips|ppc|powerpc|s390|sparc)', machine):
self.assertTrue('linux' in suffix, suffix)
if re.match('(i[3-6]86|x86_64)$', machine):
if ctypes.sizeof(ctypes.c_char_p()) == 4:
self.assertTrue(suffix.endswith('i386-linux-gnu.so') \
or suffix.endswith('x86_64-linux-gnux32.so'),
suffix)
else: # 8 byte pointer size
self.assertTrue(suffix.endswith('x86_64-linux-gnu.so'), suffix)
@unittest.skipUnless(sys.platform == 'darwin', 'OS X-specific test')
def test_osx_ext_suffix(self):
suffix = sysconfig.get_config_var('EXT_SUFFIX')
self.assertTrue(suffix.endswith('-darwin.so'), suffix)
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
| |
# -*- coding: utf-8 -*-
#
import matplotlib as mpl
from . import color
def draw_text(data, obj):
'''Paints text on the graph.
'''
content = []
properties = []
style = []
if isinstance(obj, mpl.text.Annotation):
ann_xy = obj.xy
ann_xycoords = obj.xycoords
ann_xytext = obj.xyann
ann_textcoords = obj.anncoords
if ann_xycoords != 'data' or ann_textcoords != 'data':
print('Warning: Anything else except for explicit positioning '
'is not supported for annotations yet :(')
return data, content
else: # Create a basic tikz arrow
arrow_style = []
if obj.arrowprops is not None:
if obj.arrowprops['arrowstyle'] is not None:
if obj.arrowprops['arrowstyle'] in ['-', '->',
'<-', '<->']:
arrow_style.append(obj.arrowprops['arrowstyle'])
data, col, _ = color.mpl_color2xcolor(
data,
obj.arrow_patch.get_ec()
)
arrow_style.append(col)
arrow_proto = '\\draw[%s] (axis cs:%.15g,%.15g) ' \
'-- (axis cs:%.15g,%.15g);\n'
the_arrow = arrow_proto % (','.join(arrow_style),
ann_xytext[0], ann_xytext[1],
ann_xy[0], ann_xy[1]
)
content.append(the_arrow)
# 1: coordinates
# 2: properties (shapes, rotation, etc)
# 3: text style
# 4: the text
# -------1--------2---3--4--
pos = obj.get_position()
text = obj.get_text()
size = obj.get_size()
bbox = obj.get_bbox_patch()
converter = mpl.colors.ColorConverter()
# without the factor 0.5, the fonts are too big most of the time.
# TODO fix this
scaling = 0.5 * size / data['font size']
if scaling != 1.0:
properties.append('scale=%.15g' % scaling)
if bbox is not None:
bbox_style = bbox.get_boxstyle()
if bbox.get_fill():
data, fc, _ = color.mpl_color2xcolor(data, bbox.get_facecolor())
if fc:
properties.append('fill=%s' % fc)
data, ec, _ = color.mpl_color2xcolor(data, bbox.get_edgecolor())
if ec:
properties.append('draw=%s' % ec)
# XXX: This is ugly, too
properties.append('line width=%.15gpt' % (bbox.get_lw() * 0.4))
properties.append('inner sep=%.15gpt'
% (bbox_style.pad * data['font size'])
)
# Rounded boxes
if isinstance(bbox_style, mpl.patches.BoxStyle.Round):
properties.append('rounded corners')
elif isinstance(bbox_style, mpl.patches.BoxStyle.RArrow):
data['tikz libs'].add('shapes.arrows')
properties.append('single arrow')
elif isinstance(bbox_style, mpl.patches.BoxStyle.LArrow):
data['tikz libs'].add('shapes.arrows')
properties.append('single arrow')
properties.append('shape border rotate=180')
elif isinstance(bbox_style, mpl.patches.BoxStyle.DArrow):
data['tikz libs'].add('shapes.arrows')
properties.append('double arrow')
elif isinstance(bbox_style, mpl.patches.BoxStyle.Circle):
properties.append('circle')
elif isinstance(bbox_style, mpl.patches.BoxStyle.Roundtooth):
properties.append('decorate')
properties.append(
'decoration={snake,amplitude=0.5,segment length=3}'
)
elif isinstance(bbox_style, mpl.patches.BoxStyle.Sawtooth):
properties.append('decorate')
properties.append(
'decoration={zigzag,amplitude=0.5,segment length=3}'
)
else:
# TODO Round4
assert isinstance(bbox_style, mpl.patches.BoxStyle.Square)
# Line style
if bbox.get_ls() == 'dotted':
properties.append('dotted')
elif bbox.get_ls() == 'dashed':
properties.append('dashed')
# TODO Check if there is there any way to extract the dashdot
# pattern from matplotlib instead of hardcoding
# an approximation?
elif bbox.get_ls() == 'dashdot':
properties.append(('dash pattern=on %.3gpt off %.3gpt on '
'%.3gpt off %.3gpt'
) % (1.0 / scaling, 3.0 / scaling,
6.0 / scaling, 3.0 / scaling)
)
else:
assert bbox.get_ls() == 'solid'
ha = obj.get_ha()
va = obj.get_va()
anchor = _transform_positioning(ha, va)
if anchor is not None:
properties.append(anchor)
data, col, _ = color.mpl_color2xcolor(
data,
converter.to_rgb(obj.get_color())
)
properties.append('text=%s' % col)
properties.append('rotate=%.1f' % obj.get_rotation())
if obj.get_style() == 'italic':
style.append('\\itshape')
else:
assert obj.get_style() == 'normal'
# From matplotlib/font_manager.py:
# weight_dict = {
# 'ultralight' : 100,
# 'light' : 200,
# 'normal' : 400,
# 'regular' : 400,
# 'book' : 400,
# 'medium' : 500,
# 'roman' : 500,
# 'semibold' : 600,
# 'demibold' : 600,
# 'demi' : 600,
# 'bold' : 700,
# 'heavy' : 800,
# 'extra bold' : 800,
# 'black' : 900}
if obj.get_weight() > 550:
style.append('\\bfseries')
if obj.axes:
# If the coordinates are relative to an axis, use `axis cs`.
tikz_pos = '(axis cs:%.15g,%.15g)' % pos
else:
# relative to the entire figure, it's a getting a littler harder. See
# <http://tex.stackexchange.com/a/274902/13262> for a solution to the
# problem:
tikz_pos = (
'({$(current bounding box.south west)!%.15g!'
'(current bounding box.south east)$}'
'|-'
'{$(current bounding box.south west)!%0.15g!'
'(current bounding box.north west)$})'
) % pos
if '\n' in text:
# http://tex.stackexchange.com/a/124114/13262
properties.append('align=%s' % ha)
# Manipulating the text here is actually against mpl2tikz's policy not
# to do that. On the other hand, newlines should translate into
# newlines.
# We might want to remove this here in the future.
text = text.replace('\n ', '\\\\')
content.append(
'\\node at %s[\n %s\n]{%s %s};\n' %
(tikz_pos, ',\n '.join(properties), ' '.join(style), text)
)
return data, content
def _transform_positioning(ha, va):
'''Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less.'''
if ha == 'center' and va == 'center':
return None
ha_mpl_to_tikz = {
'right': 'east',
'left': 'west',
'center': ''
}
va_mpl_to_tikz = {
'top': 'north',
'bottom': 'south',
'center': '',
'baseline': 'base'
}
return (
'anchor=%s %s' % (va_mpl_to_tikz[va], ha_mpl_to_tikz[ha])
).strip()
| |
import io
import unittest
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.dom import pulldom
from test.support import findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
if __name__ == "__main__":
unittest.main()
| |
# (c) 2017, Red Hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import hashlib
import os
import string
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory.group import to_safe_group_name as original_safe
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import AnsiblePlugin
from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
display = Display()
# Helper methods
def to_safe_group_name(name):
# placeholder for backwards compat
return original_safe(name, force=True, silent=True)
def detect_range(line=None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returns True if the given line contains a pattern, else False.
'''
return '[' in line
def expand_hostname_range(line=None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
(head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise AnsibleError("host range must be begin:end or begin:end:step")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise AnsibleError("host range must specify end value")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise AnsibleError("host range must specify equal-length begin and end formats")
def fill(x):
return str(x).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise AnsibleError("host range must have begin <= end")
seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
except ValueError: # not an alpha range
seq = range(int(beg), int(end) + 1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend(expand_hostname_range(hname))
else:
all_hosts.append(hname)
return all_hosts
def get_cache_plugin(plugin_name, **kwargs):
try:
cache = CacheObject(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
"to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
raise e
if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
self._options = {}
self.inventory = None
self.display = display
def parse(self, inventory, loader, path, cache=True):
''' Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
'''
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
'''
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
''' validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
'''
config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache=False)
except Exception as e:
raise AnsibleParserError(to_native(e))
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') != self.NAME:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
''' update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
'''
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
class BaseFileInventoryPlugin(BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
def __init__(self):
super(BaseFileInventoryPlugin, self).__init__()
class DeprecatedCache(object):
def __init__(self, real_cacheable):
self.real_cacheable = real_cacheable
def get(self, key):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'When expecting a KeyError, use self._cache[key] instead of using self.cache.get(key). '
'self._cache is a dictionary and will return a default value instead of raising a KeyError '
'when the key does not exist', version='2.12')
return self.real_cacheable._cache[key]
def set(self, key, value):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'To set the self._cache dictionary, use self._cache[key] = value instead of self.cache.set(key, value). '
'To force update the underlying cache plugin with the contents of self._cache before parse() is complete, '
'call self.set_cache_plugin and it will use the self._cache dictionary to update the cache plugin', version='2.12')
self.real_cacheable._cache[key] = value
self.real_cacheable.set_cache_plugin()
def __getattr__(self, name):
display.deprecated('InventoryModule should utilize self._cache instead of self.cache', version='2.12')
return self.real_cacheable._cache.__getattribute__(name)
class Cacheable(object):
_cache = CacheObject()
@property
def cache(self):
return DeprecatedCache(self)
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
def _get_cache_prefix(self, path):
''' create predictable unique prefix for plugin/inventory '''
m = hashlib.sha1()
m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
d1 = m.hexdigest()
n = hashlib.sha1()
n.update(to_bytes(path, errors='surrogate_or_strict'))
d2 = n.hexdigest()
return 's_'.join([d1[:5], d2[:5]])
def clear_cache(self):
self._cache.flush()
def update_cache_if_changed(self):
self._cache.update_cache_if_changed()
def set_cache_plugin(self):
self._cache.set_cache()
class Constructable(object):
def _compose(self, template, variables):
''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
t = self.templar
t.available_variables = variables
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
def _set_composite_vars(self, compose, variables, host, strict=False):
''' loops over compose entries to create vars for hosts '''
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
# process each 'group entry'
if groups and isinstance(groups, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
group_name = original_safe(group_name, force=True)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
if key:
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
if raw_parent_name:
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleError as e:
if strict:
raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
continue
new_raw_group_names = []
if isinstance(key, string_types):
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
name = '%s%s%s' % (gname, sep, gval)
new_raw_group_names.append(name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
| |
import os
from os.path import join
import traceback
from bs4 import BeautifulSoup
from nose.plugins import Plugin
class AdvancedLogging(Plugin):
name = "advanced-logging"
enabled = False
capture_screen = True
score = 1
_log_path = join(os.getcwd(), 'test_output')
_script_path = None
def __init__(self):
super(AdvancedLogging, self).__init__()
html_template = """
<html>
<head>
<title></title>
<style type="text/css">
.header {
font-weight: bold;
}
span.fail {
color: red;
}
span.error {
color: orange;
}
span.pass {
color: green;
}
</style>
</head>
<body><body>
</html>
"""
self.soup = BeautifulSoup(html_template)
self.html = self.soup.body
title = self.soup.title
title.string = 'Advanced log'
self.fieldset = None
def options(self, parser, env=os.environ):
parser.add_option(
"--advanced-logging", action="store_true",
dest="advancedlogging",
default=False,
help="Optional: This will enable advanced logging.")
parser.add_option(
"--disable-capture-screen", action="store_false",
dest="disablecapturescreen",
default=True,
help="Optional: This will disable capture screen on failure.")
parser.add_option(
"--advanced-log-filename", action="store",
default='AdvancedLog.html',
dest="advancedlogfilename",
help="Optional: Advanced log filename, e.g. Result.html"
"default is AdvancedLog.html")
def configure(self, options, conf):
if not options.advancedlogging:
return
self.enabled = True
self.capture_screen = options.disablecapturescreen
self.html_filename = options.advancedlogfilename
super(AdvancedLogging, self).configure(options, conf)
def addFailure(self, test, err):
err = self.formatErr(err)
span = self.soup.new_tag('span')
span.string = 'FAIL'
span['class'] = 'header fail'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
try:
if self.capture_screen:
filename = '%s.png' % test.address()[2]
full_filename = join(self._log_path, filename)
driver = test.context.uidriver.webdriver
driver.get_screenshot_as_file(full_filename)
print 'Screenshot was captured %s' % full_filename
a = self.soup.new_tag('a')
a['href'] = filename
a['target'] = '_blank'
img = self.soup.new_tag('img')
img['src'] = filename
img['alt'] = filename
img['title'] = filename
img['width'] = '800px'
img['border'] = '1'
a.append(img)
self.testdiv.append(a)
except:
pass
pre = self.soup.new_tag('pre')
pre.string = err
self.testdiv.append(pre)
def addSuccess(self, test):
span = self.soup.new_tag('span')
span.string = 'OK'
span['class'] = 'header pass'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
def addError(self, test, err):
try:
err = self.formatErr(err)
span = self.soup.new_tag('span')
span.string = 'ERROR'
span['class'] = 'header error'
self.testdiv.append(span)
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
pre = self.soup.new_tag('pre')
pre.string = err
self.testdiv.append(pre)
except:
pass
def finalize(self, result):
br = self.soup.new_tag('br')
self.html.append(br)
div1 = self.soup.new_tag('div')
div2 = self.soup.new_tag('div')
self.html.append(div1)
div1.string = "Ran %d test%s" % \
(result.testsRun, result.testsRun != 1 and 's' or '')
self.html.append(div2)
span = self.soup.new_tag('span')
div2.append(span)
if not result.wasSuccessful():
span2 = self.soup.new_tag('span')
span.string = 'FAILED'
span['class'] = 'header fail'
span2.string = '(failures=%d errors=%d)' %\
(len(result.failures), len(result.errors))
div2.append(span2)
else:
span.string = 'OK'
span['class'] = 'header pass'
full_html_filename = join(self._log_path, self.html_filename)
with open(full_html_filename, 'w') as html_file:
str_html = self.soup.prettify()
html_file.write(str_html)
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def startContext(self, ctx):
if hasattr(ctx, '__file__'):
self._script_path = ctx.__file__.replace('.pyc', '.py')
return
try:
n = ctx.__name__
except AttributeError:
n = str(ctx).replace('<', '').replace('>', '')
self.fieldset = self.soup.new_tag('fieldset')
legend = self.soup.new_tag('legend')
span1 = self.soup.new_tag('span')
span1.string = n
span1['class'] = 'header'
legend.append(span1)
if self._script_path:
span2 = self.soup.new_tag('span')
span2.string = '(%s)' % self._script_path
legend.append(span2)
self.fieldset.append(legend)
self.html.append(self.fieldset)
def stopContext(self, ctx):
self.fieldset = None
def startTest(self, test):
self.testdiv = self.soup.new_tag('div')
hr = self.soup.new_tag('hr')
self.testdiv.append(hr)
span = self.soup.new_tag('span')
span.string = test.shortDescription() or str(test)
span['class'] = 'header'
self.testdiv.append(span)
self.fieldset.append(self.testdiv)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for rc in RangeQuerySetWrapperWithProgressBar(orm.ReleaseCommit.objects.all()):
orm.ReleaseCommit.objects.filter(id=rc.id).update(
organization_id=orm.Project.objects.filter(id=rc.project_id).values_list(
'organization_id', flat=True
)[0]
)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 12, 20, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'null': 'True',
'blank': 'True'
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'hqvsaw80q8KUufkKQSZPbzYA39VxEW7r'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
symmetrical = True
| |
from __future__ import absolute_import
from django import forms
from django.db import transaction
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from django.core.context_processors import csrf
from django.utils.translation import ugettext_lazy as _
import petname
from sudo.decorators import sudo_required
from sentry.models import Authenticator
from sentry.web.frontend.base import BaseView
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response
from sentry.web.forms.accounts import TwoFactorForm
from sentry.utils import json
class SmsForm(forms.Form):
phone_number = forms.CharField(
label=_('Phone number'), max_length=40
)
class U2fForm(forms.Form):
device_name = forms.CharField(
label=_('Device name'), max_length=60, required=False,
initial=lambda: petname.Generate(2, ' ').title(),
)
class TwoFactorSettingsView(BaseView):
interface_id = None
@method_decorator(csrf_protect)
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
try:
interface = Authenticator.objects.get_interface(
request.user, self.interface_id)
except LookupError:
raise Http404
return self.configure(request, interface)
def make_context(self, request, interface):
context = csrf(request)
context['auth'] = interface
context['page'] = 'security'
return context
def delete_authenticator(self, interface):
if interface.authenticator is None:
return
user = interface.authenticator.user
interface.authenticator.delete()
# If this was an authenticator that was a backup interface we just
# deleted, then nothing happens.
if interface.is_backup_interface:
return
# If however if we delete an actual authenticator and all that
# remainds are backup interfaces, then we kill them in the
# process.
interfaces = Authenticator.objects.all_interfaces_for_user(user)
backup_interfaces = [x for x in interfaces if x.is_backup_interface]
if len(backup_interfaces) == len(interfaces):
for iface in backup_interfaces:
iface.authenticator.delete()
def remove(self, request, interface):
if 'no' in request.POST or \
not interface.is_enrolled:
return HttpResponseRedirect(reverse('sentry-account-settings-2fa'))
elif 'yes' in request.POST:
self.delete_authenticator(interface)
return HttpResponseRedirect(reverse('sentry-account-settings-2fa'))
all_interfaces = Authenticator.objects.all_interfaces_for_user(
request.user)
other_interfaces = [x for x in all_interfaces
if x.interface_id != interface.interface_id]
backup_interfaces = [x for x in other_interfaces if x.is_backup_interface]
removes_backups = backup_interfaces and \
len(backup_interfaces) == len(other_interfaces)
context = self.make_context(request, interface)
context['removes_backups'] = removes_backups
return render_to_response('sentry/account/twofactor/remove.html',
context, request)
def enroll(self, request, interface, insecure=False):
next = request.path
# Only enroll if it's either not an insecure enrollment or we are
# enrolling a backup interface when we already had a primary one.
if not insecure \
or (interface.is_backup_interface and
Authenticator.objects.user_has_2fa(request.user)):
try:
interface.enroll(request.user)
except Authenticator.AlreadyEnrolled:
# This can happen in some cases when races occur. We have
# seen this when people press the submit button twice. In
# that case just go to the overview page of 2fa
next = reverse('sentry-account-settings-2fa')
else:
if Authenticator.objects.auto_add_recovery_codes(request.user):
next = reverse('sentry-account-settings-2fa-recovery')
return HttpResponseRedirect(next)
def configure(self, request, interface):
if 'remove' in request.POST:
return self.remove(request, interface)
if 'enroll' in request.POST or \
request.GET.get('enroll') == 'yes':
return self.enroll(request, interface,
insecure='enroll' not in request.POST)
context = self.make_context(request, interface)
return render_to_response(['sentry/account/twofactor/configure_%s.html'
% self.interface_id,
'sentry/account/twofactor/configure.html'],
context, request)
class RecoveryCodeSettingsView(TwoFactorSettingsView):
interface_id = 'recovery'
def configure(self, request, interface):
if 'regenerate' in request.POST:
interface.regenerate_codes()
return HttpResponseRedirect(request.path)
return TwoFactorSettingsView.configure(self, request, interface)
class TotpSettingsView(TwoFactorSettingsView):
interface_id = 'totp'
def enroll(self, request, interface, insecure=False):
totp_secret = request.POST.get('totp_secret')
if totp_secret is not None:
interface.secret = totp_secret
if 'otp' in request.POST:
form = TwoFactorForm(request.POST)
if form.is_valid() and interface.validate_otp(
form.cleaned_data['otp']):
return TwoFactorSettingsView.enroll(self, request, interface)
else:
form.errors['__all__'] = ['Invalid confirmation code.']
else:
form = TwoFactorForm()
context = self.make_context(request, interface)
context['otp_form'] = form
context['provision_qrcode'] = interface.get_provision_qrcode(
request.user.email)
return render_to_response('sentry/account/twofactor/enroll_totp.html',
context, request)
class SmsSettingsView(TwoFactorSettingsView):
interface_id = 'sms'
def enroll(self, request, interface, insecure=False):
stage = request.POST.get('stage') or 'initial'
totp_secret = request.POST.get('totp_secret')
if totp_secret is not None:
interface.secret = totp_secret
phone_number = request.POST.get('phone_number')
if phone_number is not None:
interface.phone_number = phone_number
sms_form = SmsForm()
otp_form = TwoFactorForm()
if stage == 'pick_number':
sms_form = SmsForm(request.POST)
if sms_form.is_valid():
interface.send_text(for_enrollment=True, request=request)
stage = 'confirm'
elif stage == 'confirm':
otp_form = TwoFactorForm(request.POST)
if otp_form.is_valid() and interface.validate_otp(
otp_form.cleaned_data['otp']):
return TwoFactorSettingsView.enroll(self, request, interface)
else:
otp_form.errors['__all__'] = ['Invalid confirmation code.']
context = self.make_context(request, interface)
context['sms_form'] = sms_form
context['otp_form'] = otp_form
context['stage'] = stage
return render_to_response('sentry/account/twofactor/enroll_sms.html',
context, request)
class U2fSettingsView(TwoFactorSettingsView):
interface_id = 'u2f'
def configure(self, request, interface):
# Try to remove a key handle. If this returns `False` it means we
# are about to remove the last key handle. In that case just
# bubble through to the configure page which will pick up the
# 'remove' in the form and bring up the remove screen for the
# entire authentication method.
key_handle = request.POST.get('key_handle')
if key_handle and 'remove' in request.POST and \
interface.remove_u2f_device(key_handle):
interface.authenticator.save()
return HttpResponseRedirect(request.path)
return TwoFactorSettingsView.configure(self, request, interface)
def enroll(self, request, interface, insecure=False):
u2f_form = U2fForm()
challenge = request.POST.get('challenge')
if challenge:
enrollment_data = json.loads(challenge)
else:
enrollment_data = interface.start_enrollment()
response = request.POST.get('response')
if response:
u2f_form = U2fForm(request.POST)
if u2f_form.is_valid():
interface.try_enroll(enrollment_data, json.loads(response),
u2f_form.cleaned_data['device_name'])
return TwoFactorSettingsView.enroll(self, request, interface)
context = self.make_context(request, interface)
context['enrollment_data'] = enrollment_data
context['u2f_form'] = u2f_form
return render_to_response('sentry/account/twofactor/enroll_u2f.html',
context, request)
| |
import os, glob, collections, fnmatch
from omg import lump, util, palette
from omg.wadio import WadIO
class LumpGroup(collections.OrderedDict):
"""A dict-like object for holding a group of lumps"""
def __init__(self, name='data', lumptype=lump.Lump, config=()):
collections.OrderedDict.__init__(self)
self._name = name
self.lumptype = lumptype
self.config = config
self.__init2__()
def __init2__(self):
pass
def load(self, filename):
"""Load entries from a WAD file. All lumps from the same
section in that WAD is loaded (e.g. if this is a patch
section, all patches in the WAD will be loaded."""
iw = WAD(); iw.load(filename)
self._lumps += util.deepcopy(iw.__dict__[self._sect_name]._lumps)
def to_file(self, filename):
"""Save group as a separate WAD file."""
w = WadIO(filename)
self.save_wadio(w)
def from_glob(self, globpattern):
"""Create lumps from files matching the glob pattern."""
for p in glob.glob(globpattern):
name = util.fixname(os.path.basename(p[:p.rfind('.')]))
self[name] = self.lumptype(from_file=p)
def save_wadio(self, wadio):
"""Save to a WadIO object."""
for m in self:
wadio.insert(m, self[m].data)
def copy(self):
"""Creates a deep copy."""
a = self.__class__(self._name, self.lumptype, self.config)
for k in self:
a[k] = self[k].copy()
return a
def __add__(self, other):
"""Adds two dicts, copying items shallowly"""
c = self.__class__(self._name, self.lumptype, self.config)
c.update(self)
c.update(other)
return c
class MarkerGroup(LumpGroup):
"""Group for lumps found between markers, e.g. sprites"""
def __init2__(self):
self.prefix = self.config + "*_START"
self.suffix = self.config + "*_END"
# In case group opens with XX_ and ends with X_
self.abssuffix = self.config + "_END"
def load_wadio(self, wadio):
"""Load all matching lumps that have not already
been flagged as read from the given WadIO object."""
inside = False
startedwith, endswith = "", ""
for i in range(len(wadio.entries)):
if wadio.entries[i].been_read:
inside = False
continue
name = wadio.entries[i].name
if inside:
if fnmatch.fnmatchcase(name, endswith) or fnmatch.fnmatchcase(name, self.abssuffix):
inside = False
else:
if wadio.entries[i].size != 0:
self[name] = self.lumptype(wadio.read(i))
wadio.entries[i].been_read = True
else:
# print name, self.prefix, fnmatch.fnmatchcase(name, self.prefix)
if fnmatch.fnmatchcase(name, self.prefix):
endswith = name.replace("START", "END")
inside = True
wadio.entries[i].been_read = True
def save_wadio(self, wadio):
"""Save to a WadIO object."""
if len(self) == 0:
return
wadio.insert(self.prefix.replace('*', ''), '')
LumpGroup.save_wadio(self, wadio)
wadio.insert(self.suffix.replace('*', ''), '')
class HeaderGroup(LumpGroup):
"""Group for lumps arranged header-tail (e.g. maps)"""
def __init2__(self):
self.headers = self.config[0]
self.tail = self.config[1]
def load_wadio(self, wadio):
"""Load all matching lumps that have not already
been flagged as read from the given WadIO object."""
numlumps = len(wadio.entries)
i = 0
while i < numlumps:
if wadio.entries[i].been_read:
i += 1
continue
name = wadio.entries[i].name
added = False
for head in self.headers:
if fnmatch.fnmatchcase(name, head):
added = True
self[name] = NameGroup()
wadio.entries[i].been_read = True
i += 1
while i < numlumps and util.inwclist(wadio.entries[i].name, self.tail):
self[name][wadio.entries[i].name] = \
self.lumptype(wadio.read(i))
wadio.entries[i].been_read = True
i += 1
if not added:
i += 1
def save_wadio(self, wadio):
"""Save to a WadIO object."""
for h in self:
hs = self[h]
wadio.insert(h, "")
for t in self.tail:
if t in hs:
wadio.insert(t, hs[t].data)
class NameGroup(LumpGroup):
"""Group for lumps recognized by special names"""
def __init2__(self):
self.names = self.config
def load_wadio(self, wadio):
"""Load all matching lumps that have not already
been flagged as read from the given WadIO object."""
for i, entry in ((i, e)
for (i, e) in enumerate(wadio.entries)
if not e.been_read and util.inwclist(e.name, self.names)):
self[entry.name] = self.lumptype(wadio.read(i))
entry.been_read = True
class TxdefGroup(NameGroup):
"""Group for texture definition lumps"""
def __init2__(self):
self.names = ['TEXTURE?', 'PNAMES']
def __add__(self, other):
from omg import txdef
a = txdef.Textures()
a.from_lumps(self)
a.from_lumps(other)
return a.to_lumps()
def save_wadio(self, wadio):
NameGroup.save_wadio(self, wadio)
#---------------------------------------------------------------------
#
# This defines the default structure for WAD files.
#
# First some lists...
_mapheaders = ['E?M?', 'MAP??*']
_maptail = ['THINGS', 'LINEDEFS', 'SIDEDEFS', # Must be in order
'VERTEXES', 'SEGS', 'SSECTORS',
'NODES', 'SECTORS', 'REJECT',
'BLOCKMAP', 'BEHAVIOR', 'SCRIPT*']
_glmapheaders = ['GL_E?M?', 'GL_MAP??']
_glmaptail = ['GL_VERT', 'GL_SEGS', 'GL_SSECT', 'GL_NODES']
_graphics = ['TITLEPIC', 'CWILV*', 'WI*', 'M_*',
'INTERPIC', 'BRDR*', 'PFUB?', 'ST*',
'VICTORY2', 'CREDIT', 'END?', 'WI*',
'BOSSBACK', 'ENDPIC', 'HELP', 'BOX??',
'AMMNUM?', 'HELP1', 'DIG*']
# The default structure object.
# Must be in order: markers first, ['*'] name group last
defstruct = [
[MarkerGroup, 'sprites', lump.Graphic, 'S'],
[MarkerGroup, 'patches', lump.Graphic, 'P'],
[MarkerGroup, 'flats', lump.Flat, 'F'],
[MarkerGroup, 'colormaps', lump.Lump, 'C'],
[MarkerGroup, 'ztextures', lump.Graphic, 'TX'],
[HeaderGroup, 'maps', lump.Lump, [_mapheaders, _maptail]],
[HeaderGroup, 'glmaps', lump.Lump, [_glmapheaders, _glmaptail]],
[NameGroup, 'music', lump.Music, ['D_*']],
[NameGroup, 'sounds', lump.Sound, ['DS*', 'DP*']],
[TxdefGroup, 'txdefs', lump.Lump, ['TEXTURE?', 'PNAMES']],
[NameGroup, 'graphics', lump.Graphic, _graphics],
[NameGroup, 'data', lump.Lump, ['*']]
]
write_order = ['data', 'colormaps', 'maps', 'glmaps', 'txdefs',
'sounds', 'music', 'graphics', 'sprites', 'patches', 'flats',
'ztextures']
class WAD:
"""A memory-resident, abstract representation of a WAD file. Lumps
are stored in subsections of the WAD. Loading/saving and handling
the sections follows the structure specification.
Initialization:
new = WAD([from_file, structure])
Source may be a string representing a path to a file to load from.
By default, an empty WAD is created.
Structure may be used to specify a custom lump
categorization/loading configuration.
Member data:
.structure Structure definition.
.palette Palette (not implemented yet)
.sprites, etc Sections containing lumps, as specified by
the structure definition"""
def __init__(self, from_file=None, structure=defstruct):
"""Create a new WAD. The optional `source` argument may be a
string specifying a path to a file or a WadIO object.
If omitted, an empty WAD is created. A WADStructure object
may be passed as the `structure` argument to apply a custom
section structure. By default, the structure specified in the
defdata module is used."""
self.__category = 'root'
self.palette = palette.default
self.structure = structure
self.groups = []
for group_def in self.structure:
instance = group_def[0](*tuple(group_def[1:]))
self.__dict__[group_def[1]] = instance
self.groups.append(instance)
if from_file:
self.from_file(from_file)
def from_file(self, source):
"""Load contents from a file. `source` may be a string
specifying a path to a file or a WadIO object."""
if isinstance(source, WadIO):
w = source
elif isinstance(source, str):
assert os.path.exists(source)
w = WadIO(source)
else:
raise TypeError, "Expected WadIO or file path string"
for group in self.groups:
group.load_wadio(w)
def to_file(self, filename):
"""Save contents to a WAD file. Caution: if a file with the given name
already exists, it will be overwritten. However, the existing file will
be kept as <filename>.tmp until the operation has finished, to stay safe
in case of failure."""
use_backup = os.path.exists(filename)
tmpfilename = filename + ".tmp"
if use_backup:
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
os.rename(filename, tmpfilename)
w = WadIO(filename)
for group in write_order:
self.__dict__[group].save_wadio(w)
w.save()
if use_backup:
os.remove(tmpfilename)
def __add__(self, other):
assert isinstance(other, WAD)
w = WAD(structure=self.structure)
for group_def in self.structure:
name = group_def[1]
w.__dict__[name] = self.__dict__[name] + other.__dict__[name]
return w
def copy(self):
return util.deepcopy(self)
| |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
script = Script()
foundation = DynamicLibrary("Foundation")
foundation.GCC_PREFIX_HEADER = 'CoreFoundation/Base.subproj/CoreFoundation_Prefix.h'
if Configuration.current.target.sdk == OSType.Linux:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_LINUX '
foundation.LDFLAGS = '-Wl,@./CoreFoundation/linux.ld -lswiftGlibc `icu-config --ldflags` -Wl,-defsym,__CFConstantStringClassReference=_TMC10Foundation19_NSCFConstantString '
elif Configuration.current.target.sdk == OSType.MacOSX:
foundation.CFLAGS = '-DDEPLOYMENT_TARGET_MACOSX '
foundation.LDFLAGS = '-licucore -twolevel_namespace -Wl,-alias_list,CoreFoundation/Base.subproj/DarwinSymbolAliases -sectcreate __UNICODE __csbitmaps CoreFoundation/CharacterSets/CFCharacterSetBitmaps.bitmap -sectcreate __UNICODE __properties CoreFoundation/CharacterSets/CFUniCharPropertyDatabase.data -sectcreate __UNICODE __data CoreFoundation/CharacterSets/CFUnicodeData-L.mapping -segprot __UNICODE r r '
# For now, we do not distinguish between public and private headers (they are all private to Foundation)
# These are really part of CF, which should ultimately be a separate target
foundation.ROOT_HEADERS_FOLDER_PATH = "/usr/lib/swift"
foundation.PUBLIC_HEADERS_FOLDER_PATH = "/usr/lib/swift/CoreFoundation"
foundation.PRIVATE_HEADERS_FOLDER_PATH = "/usr/lib/swift/CoreFoundation"
foundation.PROJECT_HEADERS_FOLDER_PATH = "/usr/lib/swift/CoreFoundation"
foundation.PUBLIC_MODULE_FOLDER_PATH = "/usr/lib/swift/CoreFoundation"
foundation.CFLAGS += " ".join([
'-DU_SHOW_DRAFT_API',
'-DCF_BUILDING_CF',
'-DDEPLOYMENT_RUNTIME_SWIFT',
'-fconstant-cfstrings',
'-fexceptions',
'-Wno-shorten-64-to-32',
'-Wno-deprecated-declarations',
'-Wno-unreachable-code',
'-Wno-conditional-uninitialized',
'-Wno-unused-variable',
'-Wno-int-conversion',
'-Wno-unused-function',
'-I/usr/include/libxml2',
'-I./',
])
swift_cflags = [
'-I${BUILD_DIR}/Foundation/usr/lib/swift'
]
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
swift_cflags += [
'-I${XCTEST_BUILD_DIR}',
'-L${XCTEST_BUILD_DIR}',
]
foundation.SWIFTCFLAGS = " ".join(swift_cflags)
foundation.LDFLAGS += '-lpthread -ldl -lm -lswiftCore -lxml2 '
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
foundation.LDFLAGS += '-L${XCTEST_BUILD_DIR}'
headers = CopyHeaders(
module = 'CoreFoundation/Base.subproj/linux.modulemap',
public = [
'CoreFoundation/Stream.subproj/CFStream.h',
'CoreFoundation/String.subproj/CFStringEncodingExt.h',
'CoreFoundation/Base.subproj/SwiftRuntime/CoreFoundation.h',
'CoreFoundation/Base.subproj/SwiftRuntime/TargetConditionals.h',
'CoreFoundation/RunLoop.subproj/CFMessagePort.h',
'CoreFoundation/Collections.subproj/CFBinaryHeap.h',
'CoreFoundation/PlugIn.subproj/CFBundle.h',
'CoreFoundation/Locale.subproj/CFCalendar.h',
'CoreFoundation/Collections.subproj/CFBitVector.h',
'CoreFoundation/Base.subproj/CFAvailability.h',
'CoreFoundation/Collections.subproj/CFTree.h',
'CoreFoundation/NumberDate.subproj/CFTimeZone.h',
'CoreFoundation/Error.subproj/CFError.h',
'CoreFoundation/Collections.subproj/CFBag.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn.h',
'CoreFoundation/Parsing.subproj/CFXMLParser.h',
'CoreFoundation/String.subproj/CFString.h',
'CoreFoundation/Collections.subproj/CFSet.h',
'CoreFoundation/Base.subproj/CFUUID.h',
'CoreFoundation/NumberDate.subproj/CFDate.h',
'CoreFoundation/Collections.subproj/CFDictionary.h',
'CoreFoundation/Base.subproj/CFByteOrder.h',
'CoreFoundation/AppServices.subproj/CFUserNotification.h',
'CoreFoundation/Base.subproj/CFBase.h',
'CoreFoundation/Preferences.subproj/CFPreferences.h',
'CoreFoundation/Locale.subproj/CFLocale.h',
'CoreFoundation/RunLoop.subproj/CFSocket.h',
'CoreFoundation/Parsing.subproj/CFPropertyList.h',
'CoreFoundation/Collections.subproj/CFArray.h',
'CoreFoundation/RunLoop.subproj/CFRunLoop.h',
'CoreFoundation/URL.subproj/CFURLAccess.h',
'CoreFoundation/Locale.subproj/CFDateFormatter.h',
'CoreFoundation/RunLoop.subproj/CFMachPort.h',
'CoreFoundation/PlugIn.subproj/CFPlugInCOM.h',
'CoreFoundation/Base.subproj/CFUtilities.h',
'CoreFoundation/Parsing.subproj/CFXMLNode.h',
'CoreFoundation/URL.subproj/CFURLComponents.h',
'CoreFoundation/URL.subproj/CFURL.h',
'CoreFoundation/Locale.subproj/CFNumberFormatter.h',
'CoreFoundation/String.subproj/CFCharacterSet.h',
'CoreFoundation/NumberDate.subproj/CFNumber.h',
'CoreFoundation/Collections.subproj/CFData.h',
],
private = [
'CoreFoundation/Base.subproj/ForSwiftFoundationOnly.h',
'CoreFoundation/Base.subproj/ForFoundationOnly.h',
'CoreFoundation/String.subproj/CFBurstTrie.h',
'CoreFoundation/Error.subproj/CFError_Private.h',
'CoreFoundation/URL.subproj/CFURLPriv.h',
'CoreFoundation/Base.subproj/CFLogUtilities.h',
'CoreFoundation/PlugIn.subproj/CFBundlePriv.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.h',
'CoreFoundation/Stream.subproj/CFStreamAbstract.h',
'CoreFoundation/Base.subproj/CFInternal.h',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.h',
'CoreFoundation/Parsing.subproj/CFXMLInterface.h',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.h',
'CoreFoundation/String.subproj/CFStringLocalizedFormattingInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_Internal.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterPriv.h',
'CoreFoundation/Collections.subproj/CFBasicHash.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.h',
'CoreFoundation/Stream.subproj/CFStreamInternal.h',
'CoreFoundation/PlugIn.subproj/CFBundle_BinaryTypes.h',
'CoreFoundation/Locale.subproj/CFICULogging.h',
'CoreFoundation/Locale.subproj/CFLocaleInternal.h',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.h',
'CoreFoundation/Base.subproj/CFPriv.h',
'CoreFoundation/StringEncodings.subproj/CFUniCharPriv.h',
'CoreFoundation/URL.subproj/CFURL.inc.h',
'CoreFoundation/NumberDate.subproj/CFBigNumber.h',
'CoreFoundation/StringEncodings.subproj/CFUniChar.h',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverterExt.h',
'CoreFoundation/Collections.subproj/CFStorage.h',
'CoreFoundation/Base.subproj/CFRuntime.h',
'CoreFoundation/String.subproj/CFStringDefaultEncoding.h',
'CoreFoundation/String.subproj/CFCharacterSetPriv.h',
'CoreFoundation/Stream.subproj/CFStreamPriv.h',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.h',
],
project = [
])
foundation.add_phase(headers)
sources = CompileSources([
'closure/data.c',
'closure/runtime.c',
'uuid/uuid.c',
# 'CoreFoundation/AppServices.subproj/CFUserNotification.c',
'CoreFoundation/Base.subproj/CFBase.c',
'CoreFoundation/Base.subproj/CFFileUtilities.c',
'CoreFoundation/Base.subproj/CFPlatform.c',
'CoreFoundation/Base.subproj/CFRuntime.c',
'CoreFoundation/Base.subproj/CFSortFunctions.c',
'CoreFoundation/Base.subproj/CFSystemDirectories.c',
'CoreFoundation/Base.subproj/CFUtilities.c',
'CoreFoundation/Base.subproj/CFUUID.c',
'CoreFoundation/Collections.subproj/CFArray.c',
'CoreFoundation/Collections.subproj/CFBag.c',
'CoreFoundation/Collections.subproj/CFBasicHash.c',
'CoreFoundation/Collections.subproj/CFBinaryHeap.c',
'CoreFoundation/Collections.subproj/CFBitVector.c',
'CoreFoundation/Collections.subproj/CFData.c',
'CoreFoundation/Collections.subproj/CFDictionary.c',
'CoreFoundation/Collections.subproj/CFSet.c',
'CoreFoundation/Collections.subproj/CFStorage.c',
'CoreFoundation/Collections.subproj/CFTree.c',
'CoreFoundation/Error.subproj/CFError.c',
'CoreFoundation/Locale.subproj/CFCalendar.c',
'CoreFoundation/Locale.subproj/CFDateFormatter.c',
'CoreFoundation/Locale.subproj/CFLocale.c',
'CoreFoundation/Locale.subproj/CFLocaleIdentifier.c',
'CoreFoundation/Locale.subproj/CFLocaleKeys.c',
'CoreFoundation/Locale.subproj/CFNumberFormatter.c',
'CoreFoundation/NumberDate.subproj/CFBigNumber.c',
'CoreFoundation/NumberDate.subproj/CFDate.c',
'CoreFoundation/NumberDate.subproj/CFNumber.c',
'CoreFoundation/NumberDate.subproj/CFTimeZone.c',
'CoreFoundation/Parsing.subproj/CFBinaryPList.c',
'CoreFoundation/Parsing.subproj/CFOldStylePList.c',
'CoreFoundation/Parsing.subproj/CFPropertyList.c',
'CoreFoundation/Parsing.subproj/CFXMLInputStream.c',
'CoreFoundation/Parsing.subproj/CFXMLNode.c',
'CoreFoundation/Parsing.subproj/CFXMLParser.c',
'CoreFoundation/Parsing.subproj/CFXMLTree.c',
'CoreFoundation/Parsing.subproj/CFXMLInterface.c',
'CoreFoundation/PlugIn.subproj/CFBundle.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Binary.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Grok.c',
'CoreFoundation/PlugIn.subproj/CFBundle_InfoPlist.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Locale.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Resources.c',
'CoreFoundation/PlugIn.subproj/CFBundle_Strings.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Factory.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_Instance.c',
'CoreFoundation/PlugIn.subproj/CFPlugIn_PlugIn.c',
'CoreFoundation/Preferences.subproj/CFApplicationPreferences.c',
'CoreFoundation/Preferences.subproj/CFPreferences.c',
# 'CoreFoundation/RunLoop.subproj/CFMachPort.c',
# 'CoreFoundation/RunLoop.subproj/CFMessagePort.c',
# 'CoreFoundation/RunLoop.subproj/CFRunLoop.c',
# 'CoreFoundation/RunLoop.subproj/CFSocket.c',
# 'CoreFoundation/Stream.subproj/CFConcreteStreams.c',
# 'CoreFoundation/Stream.subproj/CFSocketStream.c',
# 'CoreFoundation/Stream.subproj/CFStream.c',
'CoreFoundation/String.subproj/CFBurstTrie.c',
'CoreFoundation/String.subproj/CFCharacterSet.c',
'CoreFoundation/String.subproj/CFString.c',
'CoreFoundation/String.subproj/CFStringEncodings.c',
'CoreFoundation/String.subproj/CFStringScanner.c',
'CoreFoundation/String.subproj/CFStringUtilities.c',
'CoreFoundation/StringEncodings.subproj/CFBuiltinConverters.c',
'CoreFoundation/StringEncodings.subproj/CFICUConverters.c',
'CoreFoundation/StringEncodings.subproj/CFPlatformConverters.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingConverter.c',
'CoreFoundation/StringEncodings.subproj/CFStringEncodingDatabase.c',
'CoreFoundation/StringEncodings.subproj/CFUniChar.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodeDecomposition.c',
'CoreFoundation/StringEncodings.subproj/CFUnicodePrecomposition.c',
'CoreFoundation/URL.subproj/CFURL.c',
'CoreFoundation/URL.subproj/CFURLAccess.c',
'CoreFoundation/URL.subproj/CFURLComponents.c',
'CoreFoundation/URL.subproj/CFURLComponents_URIParser.c',
'CoreFoundation/String.subproj/CFCharacterSetData.S',
'CoreFoundation/String.subproj/CFUnicodeDataL.S',
'CoreFoundation/String.subproj/CFUniCharPropertyDatabase.S',
])
sources.add_dependency(headers)
foundation.add_phase(sources)
swift_sources = CompileSwiftSources([
'Foundation/NSObject.swift',
'Foundation/NSAffineTransform.swift',
'Foundation/NSArray.swift',
'Foundation/NSAttributedString.swift',
'Foundation/NSBundle.swift',
'Foundation/NSByteCountFormatter.swift',
'Foundation/NSCache.swift',
'Foundation/NSCalendar.swift',
'Foundation/NSCFArray.swift',
'Foundation/NSCFDictionary.swift',
'Foundation/NSCFSet.swift',
'Foundation/NSCFString.swift',
'Foundation/NSCharacterSet.swift',
'Foundation/NSCoder.swift',
'Foundation/NSComparisonPredicate.swift',
'Foundation/NSCompoundPredicate.swift',
'Foundation/NSData.swift',
'Foundation/NSDate.swift',
'Foundation/NSDateComponentsFormatter.swift',
'Foundation/NSDateFormatter.swift',
'Foundation/NSDateIntervalFormatter.swift',
'Foundation/NSDecimal.swift',
'Foundation/NSDecimalNumber.swift',
'Foundation/NSDictionary.swift',
'Foundation/NSEnergyFormatter.swift',
'Foundation/NSEnumerator.swift',
'Foundation/NSError.swift',
'Foundation/NSExpression.swift',
'Foundation/NSFileHandle.swift',
'Foundation/NSFileManager.swift',
'Foundation/NSFormatter.swift',
'Foundation/NSGeometry.swift',
'Foundation/NSHost.swift',
'Foundation/NSHTTPCookie.swift',
'Foundation/NSHTTPCookieStorage.swift',
'Foundation/NSIndexPath.swift',
'Foundation/NSIndexSet.swift',
'Foundation/NSJSONSerialization.swift',
'Foundation/NSKeyedArchiver.swift',
'Foundation/NSLengthFormatter.swift',
'Foundation/NSLocale.swift',
'Foundation/NSLock.swift',
'Foundation/NSMassFormatter.swift',
'Foundation/NSNotification.swift',
'Foundation/NSNotificationQueue.swift',
'Foundation/NSNull.swift',
'Foundation/NSNumber.swift',
'Foundation/NSNumberFormatter.swift',
'Foundation/NSObjCRuntime.swift',
'Foundation/NSOperation.swift',
'Foundation/NSOrderedSet.swift',
'Foundation/NSPathUtilities.swift',
'Foundation/NSPersonNameComponents.swift',
'Foundation/NSPersonNameComponentsFormatter.swift',
'Foundation/NSPort.swift',
'Foundation/NSPortMessage.swift',
'Foundation/NSPredicate.swift',
'Foundation/NSProcessInfo.swift',
'Foundation/NSProgress.swift',
'Foundation/NSPropertyList.swift',
'Foundation/NSRange.swift',
'Foundation/NSRegularExpression.swift',
# 'Foundation/NSRunLoop.swift',
'Foundation/NSScanner.swift',
'Foundation/NSSet.swift',
'Foundation/NSSortDescriptor.swift',
'Foundation/NSStream.swift',
'Foundation/NSString.swift',
'Foundation/NSSwiftRuntime.swift',
'Foundation/NSTask.swift',
'Foundation/NSTextCheckingResult.swift',
'Foundation/NSThread.swift',
# 'Foundation/NSTimer.swift',
'Foundation/NSTimeZone.swift',
'Foundation/NSURL.swift',
'Foundation/NSURLAuthenticationChallenge.swift',
'Foundation/NSURLCache.swift',
'Foundation/NSURLCredential.swift',
'Foundation/NSURLCredentialStorage.swift',
'Foundation/NSURLError.swift',
'Foundation/NSURLProtectionSpace.swift',
'Foundation/NSURLProtocol.swift',
'Foundation/NSURLRequest.swift',
'Foundation/NSURLResponse.swift',
'Foundation/NSURLSession.swift',
'Foundation/NSUserDefaults.swift',
'Foundation/NSUUID.swift',
'Foundation/NSValue.swift',
'Foundation/NSXMLDocument.swift',
'Foundation/NSXMLDTD.swift',
'Foundation/NSXMLDTDNode.swift',
'Foundation/NSXMLElement.swift',
'Foundation/NSXMLNode.swift',
'Foundation/NSXMLNodeOptions.swift',
'Foundation/NSXMLParser.swift',
'Foundation/FoundationErrors.swift',
])
swift_sources.add_dependency(headers)
foundation.add_phase(swift_sources)
foundation_tests_resources = CopyResources('TestFoundation', [
'TestFoundation/Resources/Info.plist',
'TestFoundation/Resources/NSURLTestData.plist',
'TestFoundation/Resources/Test.plist',
])
# TODO: Probably this should be another 'product', but for now it's simply a phase
foundation_tests = SwiftExecutable('TestFoundation', [
'TestFoundation/main.swift',
] + glob.glob('./TestFoundation/Test*.swift')) # all TestSomething.swift are considered sources to the test project in the TestFoundation directory
foundation_tests.add_dependency(foundation_tests_resources)
foundation.add_phase(foundation_tests_resources)
foundation.add_phase(foundation_tests)
plutil = SwiftExecutable('plutil', ['Tools/plutil/main.swift'])
foundation.add_phase(plutil)
script.add_product(foundation)
extra_script = """
rule InstallFoundation
command = mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
cp "${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}" "${DSTROOT}/${PREFIX}/lib/swift/${OS}"; $
mkdir -p "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftmodule" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
cp "${BUILD_DIR}/Foundation/Foundation.swiftdoc" "${DSTROOT}/${PREFIX}/lib/swift/${OS}/${ARCH}/"; $
mkdir -p "${DSTROOT}/${PREFIX}/local/include"; $
rsync -r "${BUILD_DIR}/Foundation/${PREFIX}/lib/swift/CoreFoundation" "${DSTROOT}/${PREFIX}/lib/swift/"
build ${BUILD_DIR}/.install: InstallFoundation ${BUILD_DIR}/Foundation/${DYLIB_PREFIX}Foundation${DYLIB_SUFFIX}
build install: phony | ${BUILD_DIR}/.install
"""
if "XCTEST_BUILD_DIR" in Configuration.current.variables:
extra_script += """
rule RunTestFoundation
command = echo "**** RUNNING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/:${XCTEST_BUILD_DIR} ${BUILD_DIR}/TestFoundation/TestFoundation\\n**** DEBUGGING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/:${XCTEST_BUILD_DIR} lldb ${BUILD_DIR}/TestFoundation/TestFoundation\\n"
description = Building Tests
build ${BUILD_DIR}/.test: RunTestFoundation | TestFoundation
build test: phony | ${BUILD_DIR}/.test
"""
else:
extra_script += """
rule RunTestFoundation
command = echo "**** RUNNING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/ ${BUILD_DIR}/TestFoundation/TestFoundation\\n**** DEBUGGING TESTS ****\\nexecute:\\nLD_LIBRARY_PATH=${BUILD_DIR}/Foundation/ lldb ${BUILD_DIR}/TestFoundation/TestFoundation\\n"
description = Building Tests
build ${BUILD_DIR}/.test: RunTestFoundation | TestFoundation
build test: phony | ${BUILD_DIR}/.test
"""
script.add_text(extra_script)
script.generate()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import re
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
while True:
new_name = "%s_U%d" % (arg.name, i)
if new_name not in used_names:
arg.name = new_name
break
i += 1
used_names.add(arg.name)
else:
arg.name = name
arg.type = t.dtype.as_datatype_enum
return arg
def _get_node_def(op):
return op._node_def # pylint: disable=protected-access
def _get_op_def(op):
return op.op_def or op_def_registry.get_registered_ops()[op.type]
def _is_in_placeholders(op, func_arg_placeholders):
return op.values() and (op.values()[0].name in func_arg_placeholders)
def _create_input_dict(function_graph, func_arg_placeholders):
"""Create a mapping from graph tensor names to function tensor names."""
input_dict = {}
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.values()[0].name] = op.values()[0].name
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
def _add_op_node(op, func, input_dict):
"""Converts an op to a function def node and add it to `func`."""
# Add an entry in func.node_def
# Note that extend() makes a copy in this case, see:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields
func.node_def.extend([_get_node_def(op)])
node_def = func.node_def[-1]
for i in range(len(node_def.input)):
if not node_def.input[i].startswith("^"):
assert node_def.input[i] in input_dict, (
"%s missing from %s" % (node_def.input[i], input_dict.items()))
node_def.input[i] = input_dict[node_def.input[i]]
def _graph_to_function_def(graph, inputs, outputs, out_names=None):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops present in the graph. The
graph effectively becomes the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
Args:
graph: Graph.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
out_names: Optional list of string names for the outputs.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if out_names is specified and the wrong length.
"""
func = function_pb2.FunctionDef()
func.signature.name = "_"
used_names = set()
func.signature.input_arg.extend([_tensor_to_argdef(i, used_names=used_names)
for i in inputs])
if out_names is None:
used_names = set()
func.signature.output_arg.extend([
_tensor_to_argdef(o, used_names=used_names) for o in outputs])
elif len(outputs) != len(out_names):
raise ValueError(
"Length of out_names (%d) does not match number of outputs (%d): %s" %
(len(out_names), len(outputs), ", ".join(out_names)))
elif len(out_names) != len(set(out_names)):
raise ValueError(
"Must not have duplicates in out_names: %s" % ", ".join(out_names))
else:
func.signature.output_arg.extend([
_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
func_arg_placeholders = set([i.name for i in inputs])
input_dict = _create_input_dict(graph, func_arg_placeholders)
for op in graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
continue
_add_op_node(op, func, input_dict)
if out_names is None:
for index, o in enumerate(outputs):
k = func.signature.output_arg[index].name
func.ret[k] = input_dict[o.name]
else:
for o, n in zip(outputs, out_names):
func.ret[n] = input_dict[o.name]
return func
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" %
(len(sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
with ops.name_scope(name, func_name, inputs) as name:
op = g.create_op(
func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
op_def=sig,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _get_func_name(func):
_, func = tf_decorator.unwrap(func)
if callable(func):
if tf_inspect.isfunction(func):
return func.__name__
elif tf_inspect.ismethod(func):
return "%s.%s" % (func.__self__.__name__, func.__name__)
else: # Probably a class instance with __call__
return type(func)
else:
raise ValueError("Argument must be callable")
class _FuncGraph(ops.Graph):
"""A helper for construction a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of every inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitue the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
self._captured = {}
self.extra_inputs = []
self.extra_args = []
self.extra_vars = []
def getvar(self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if isinstance(var, resource_variable_ops.ResourceVariable):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if x.graph is not self:
# Referring to a tensor from other graph.
if x in self._captured:
# Captured already.
inputs[i] = self._captured[x]
else:
# Substitute with a placeholder.
self.extra_inputs.append(x)
ph = array_ops.placeholder(x.dtype, shape=x.get_shape())
# pylint: disable=protected-access
ph._handle_shape = x._handle_shape
ph._handle_dtype = x._handle_dtype
# pylint: enable=protected-access
inputs[i] = ph
self._captured[x] = ph
self.extra_args.append(ph)
return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
**kwargs)
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._extra_kwargs = kwargs
self._definition = None # Constructed lazily.
self._sub_functions = dict() # Constructed with definition.
self._args = []
assert isinstance(input_types, (list, tuple))
for i in range(len(input_types)):
argname = argnames[i] if i < len(argnames) else ("arg%d" % i)
argtype = input_types[i]
self._args.append((argname, argtype))
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
return self._definition
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Its gradient function's name."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _FuncGraph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
self._extra_inputs = temp_graph.extra_inputs
inputs.extend(temp_graph.extra_args)
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Build the FunctionDef
self._definition = _graph_to_function_def(
temp_graph, inputs, outputs, out_names=self._out_names)
# Extra kwargs are treated as attrs on the function def.
sig_pre_func_name = self._func_name or _get_func_name(self._func)
kwargs_attr = _parse_kwargs_as_attrs(
sig_pre_func_name, **self._extra_kwargs)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg,
self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([_get_func_name(self._func), self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# pylint: disable=protected-access
# If 'g' has an identical function already, do nothing.
prev = g._get_function(self.name)
if prev and (prev._hash_str == self._hash_str):
return
# Adds this function into 'g'.
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._definition.signature, *args, **kwargs)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(dtypes.as_dtype(arg.type)
for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
result._definition = fdef
# Captured inputs are added as regular inputs to a function when it's
# serialized, i.e. any extra inputs from the original function are now
# included in `result`._args
result._extra_inputs = []
result._hash_str = result._create_hash_str(
result._definition.signature.input_arg,
result._definition.signature.output_arg,
result._definition.node_def)
# pylint: enable=protected-access
return result
def _from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient: return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [fdef for fdef in lib.function
if func_to_grad[fdef.signature.name] is None]
if not ready:
raise ValueError("FunctionDefLibrary contains cyclic gradient functions!\n"
+ str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]: assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def _type_list_to_str(types):
if any([_ not in _DTYPE_TO_STR for _ in types]):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(self._func, self._argnames, input_types, name,
None, self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type)
for _ in defined.definition.signature.output_arg
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the
default graph and adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions are frozen in a graph as soon as the graph is used to
create a session. Therefore, nodes using the function must be created in the
graph before the corresponding session is created.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.Constant([1.0])
b = tf.Constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is either a
`_DefinedFunction` or a `Declare` object. The gradient
function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("func %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(func, argnames, self._input_types,
self._func_name, self._grad_func,
self._python_grad_func,
out_names=self._out_names, **self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(func, [], [], self._func_name, self._grad_func,
self._python_grad_func,
out_names=self._out_names, **self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(func, argnames, self._func_name, self._grad_func,
self._python_grad_func,
out_names=self._out_names, **self._extra_kwargs)
class Declare(object):
"""Declares a TensorFlow function.
The object represents a TensorFlow function which will be defined
later during a graph construction.
For example,
# Declares a function Foo, which takes a tf.int32 named "n" and a
# tf.float32 named "x" as inputs and returns a tf.float32 named "z"
# as its output.
foo = Declare("Foo", [("n", tf.int32), ("x", tf.float32)],
[("z", tf.float32)])
# Defines a function Bar calls Foo.
@tf.Defun(tf.float32)
def Bar(x):
return foo(6, x)
# Defines Foo, with output named "z".
@tf.Defun(tf.int32, tf.float32, out_names=["z"])
def Foo(n, x):
... # Calculation.
return result
"""
def __init__(self, func_name, inputs, outputs):
"""Creates a `Declare` object.
Args:
func_name: The name of the function.
inputs: A list of (name, data type) pairs of function arguments.
outputs: A list of (name, data type) pairs of function return values.
"""
self._sig = op_def_pb2.OpDef()
self._sig.name = func_name
def _to_argdef_list(args):
names = [n for n, t in args]
if len(names) != len(set(names)):
raise ValueError("Expected names to all be unique: %s" % str(names))
return [op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n)
for n, t in args]
self._sig.input_arg.extend(_to_argdef_list(inputs))
self._sig.output_arg.extend(_to_argdef_list(outputs))
def __call__(self, *inputs, **kwargs):
inputs = [ops.convert_to_tensor(_) for _ in inputs]
return _call(self._sig, *inputs, **kwargs)[0]
| |
import numpy as np
from numpy.testing import assert_almost_equal
from menpo.math import eigenvalue_decomposition, pca, ipca
# Positive semi-definite matrix
cov_matrix = np.array([[3, 1], [1, 3]])
# Data values taken from:
# http://www.cs.otago.ac.nz/cosc453/student_tutorials/principal_components.pdf
# Tested values are equal
large_samples_data_matrix = np.array(
[
[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2.0, 1.6],
[1.0, 1.1],
[1.5, 1.6],
[1.1, 0.9],
]
)
centered_eigenvectors_s = np.array([[0.6778734, 0.73517866], [-0.73517866, 0.6778734]])
non_centered_eigenvectors_s = np.array(
[[0.68647784, 0.72715072], [-0.72715072, 0.68647784]]
)
mean_vector_s = np.array([1.81, 1.91])
eigenvalues_no_centre_s = np.array([8.97738481, 0.04928186])
eigenvalues_centered_s = np.array([1.28402771, 0.0490834])
centered_eigenvectors_f = np.array(
[
[
-0.09901475,
0.19802951,
0.69310328,
0.29704426,
-0.09901475,
0.39605902,
-0.39605902,
0.09901475,
0.09901475,
-0.19802951,
]
]
)
non_centered_eigenvectors_f = np.array(
[
[
0.38507927,
0.09481302,
0.40261598,
0.32307722,
0.4794398,
0.39407387,
0.28217662,
0.16535718,
0.24399096,
0.15681507,
],
[
-0.25575629,
0.17561812,
0.58718113,
0.19319469,
-0.29239933,
0.27184299,
-0.5344514,
0.04200527,
0.01146941,
-0.27333287,
],
]
)
mean_vector_f = np.array([2.45, 0.6, 2.55, 2.05, 3.05, 2.5, 1.8, 1.05, 1.55, 1.0])
eigenvalues_no_centre_f = np.array([80.79646326, 0.44353674])
eigenvalues_centered_f = np.array([0.51])
def test_pcd_samples_nocentre():
output = pca(large_samples_data_matrix, centre=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_s)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_s)
assert_almost_equal(mean_vector, [0.0, 0.0])
def test_pcd_samples_yescentre():
output = pca(large_samples_data_matrix, centre=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_s)
assert_almost_equal(eigenvectors, centered_eigenvectors_s)
assert_almost_equal(mean_vector, mean_vector_s)
def test_pcd_features_nocentre():
output = pca(large_samples_data_matrix.T, centre=False)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)
assert_almost_equal(mean_vector, np.zeros(10))
def test_pcd_features_nocentre_inplace():
# important to copy as this will now destructively effect the input data
# matrix (due to inplace)
output = pca(large_samples_data_matrix.T.copy(), centre=False, inplace=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_no_centre_f)
assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)
assert_almost_equal(mean_vector, np.zeros(10))
def test_pcd_features_yescentre():
output = pca(large_samples_data_matrix.T, centre=True)
eigenvectors, eigenvalues, mean_vector = output
assert_almost_equal(eigenvalues, eigenvalues_centered_f)
assert_almost_equal(eigenvectors, centered_eigenvectors_f)
assert_almost_equal(mean_vector, mean_vector_f)
def test_eigenvalue_decomposition_default_epsilon():
pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix)
assert_almost_equal(pos_eigenvalues, [4.0, 2.0])
sqrt_one_over_2 = np.sqrt(2.0) / 2.0
assert_almost_equal(
pos_eigenvectors,
[[sqrt_one_over_2, -sqrt_one_over_2], [sqrt_one_over_2, sqrt_one_over_2]],
)
def test_eigenvalue_decomposition_large_epsilon():
pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix, eps=0.5)
assert_almost_equal(pos_eigenvalues, [4.0])
sqrt_one_over_2 = np.sqrt(2.0) / 2.0
assert_almost_equal(pos_eigenvectors, [[sqrt_one_over_2], [sqrt_one_over_2]])
def test_ipca_samples_yescentre():
n_a = large_samples_data_matrix.shape[0] // 2
A = large_samples_data_matrix[:n_a, :]
U_a, l_a, m_a = pca(A, centre=True)
B = large_samples_data_matrix[n_a:, :]
i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)
b_U, b_l, b_m = pca(large_samples_data_matrix, centre=True)
assert_almost_equal(np.abs(i_U), np.abs(b_U))
assert_almost_equal(i_l, b_l)
assert_almost_equal(i_m, b_m)
def test_ipca_samples_nocentre():
n_a = large_samples_data_matrix.shape[0] // 2
A = large_samples_data_matrix[:n_a, :]
U_a, l_a, m_a = pca(A, centre=False)
B = large_samples_data_matrix[n_a:, :]
i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)
b_U, b_l, b_m = pca(large_samples_data_matrix, centre=False)
assert_almost_equal(np.abs(i_U), np.abs(b_U))
assert_almost_equal(i_l, b_l)
assert_almost_equal(i_m, b_m)
def test_ipca_features_yescentre():
C = np.vstack((large_samples_data_matrix.T, large_samples_data_matrix.T))
n_a = C.shape[0] // 2
A = C[:n_a, :]
U_a, l_a, m_a = pca(A, centre=True)
B = C[n_a:, :]
i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)
b_U, b_l, b_m = pca(C, centre=True)
assert_almost_equal(np.abs(i_U), np.abs(b_U))
assert_almost_equal(i_l, b_l)
assert_almost_equal(i_m, b_m)
def test_ipca_features_nocentre():
C = np.vstack((large_samples_data_matrix.T, large_samples_data_matrix.T))
n_a = C.shape[0] // 2
A = C[:n_a, :]
U_a, l_a, m_a = pca(A, centre=False)
B = C[n_a:, :]
i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)
b_U, b_l, b_m = pca(C, centre=False)
assert_almost_equal(np.abs(i_U), np.abs(b_U))
assert_almost_equal(i_l, b_l)
assert_almost_equal(i_m, b_m)
| |
#!/usr/bin/env python
"""This script does x.
Example:
Attributes:
Todo:
"""
import os
import sys
import glob
import numpy as np
import pandas as pd
import radical.analytics as ra
def initialize_entity(ename=None):
entities = {'session': {'sid' : [], # Session ID
'session' : [], # RA session objects
'experiment' : [], # Experiment ID
'TTC' : [], # Time to completion
'nhost' : [], # #host for CU execution
'nunit' : [], # #units
'nunit_done' : [], # #active units
'nunit_failed' : [], # #failed units
'npilot' : [], # #pilots
'npilot_active': [], # #active pilots
'ncore' : [], # #cores
'ncore_active' : []}, # #active cores
'pilot' : {'pid' : [], # Pilot ID
'sid' : [], # Session ID
'hid' : [], # Host ID
'ncore' : [], # #cores
'nunit' : [], # #units executed
'experiment' : []}, # Experiment ID
'unit' : {'uid' : [], # Unit ID
'sid' : [], # Session ID
'pid' : [], # Pilot ID
'hid' : [], # Host ID
'experiment' : []}} # Experiment ID
# Add the duration label of each state of each entity.
for duration in pdm.keys():
entities['session'][duration] = []
entities['pilot'][duration] = []
for duration in udm.keys():
entities['session'][duration] = []
entities['unit'][duration] = []
# Return the empty data structure of the requested entity.
if ename in ['session', 'pilot', 'unit']:
return entities[ename]
else:
error = 'Cannot itialize entity %s' % ename
print error
sys.exit(1)
def load_df(ename=None):
if ename in ['session', 'pilot', 'unit']:
df = pd.DataFrame(initialize_entity(ename=ename))
if os.path.isfile(csvs[ename]):
df = pd.read_csv(csvs[ename], index_col=0)
return df
else:
error = 'Cannot itialize entity %s' % ename
print error
sys.exit(1)
def store_df(new_df, stored=pd.DataFrame(), ename=None):
# skip storing if no new data are passed.
if new_df.empty:
print 'WARNING: attempting to store an empty DF.'
else:
if ename == 'session':
new_sessions = new_df.drop('session', axis=1)
if stored.empty:
sessions = new_sessions
else:
sessions = stored.append(new_sessions)
sessions.to_csv(csvs[ename])
elif ename in ['pilot', 'unit']:
if stored.empty:
df = new_df
else:
df = stored.append(new_df)
df.reset_index(inplace=True, drop=True)
df.to_csv(csvs[ename])
else:
error = 'Cannot store DF to %s' % ename
print error
sys.exit(1)
def parse_osg_hostid(hostid):
'''
Heuristic: eliminate node-specific information from hostID.
'''
domain = None
# Split domain name from IP.
host = hostid.split(':')
# Split domain name into words.
words = host[0].split('.')
# Get the words in the domain name that do not contain
# numbers. Most hostnames have no number but there are
# exceptions.
literals = [l for l in words if not
any((number in set('0123456789')) for number in l)]
# Check for exceptions:
# a. every word of the domain name has a number
if len(literals) == 0:
# Some hostname use '-' instead of '.' as word separator.
# The parser would have returned a single word and the
# any of that word may have a number.
if '-' in host[0]:
words = host[0].split('-')
literals = [l for l in words if not
any((number in set('0123456789')) for number in l)]
# FIXME: We do not check the size of literals.
domain = '.'.join(literals)
# Some hostnames may have only the name of the node. We
# have to keep the IP to decide later on whether two nodes
# are likely to belong to the same cluster.
elif 'nod' in host[0]:
domain = '.'.join(host)
# FIXME: ad hoc parsing
elif 'n0' in host[0]:
domain = 'n0x.10.2.x.x'
# The hostname is identified by an alphanumeric string
else:
domain = '.'.join(host)
# Some hostnames DO have numbers in their name.
elif len(literals) == 1:
domain = '.'.join(words[1:])
# Some hostname are just simple to parse.
else:
domain = '.'.join(literals)
# FIXME: When everything else fails, ad hoc manipulations of
# domain string.
if 'its.osg' in domain:
domain = 'its.osg'
elif 'nodo' in domain:
domain = 'nodo'
elif 'bu.edu' in domain:
domain = 'bu.edu'
return domain
def load_pilots(sid, exp, sra_pilots, pdm, pu_rels):
sys.stdout.write('\n%s --- %s' % (exp, sid))
ps = initialize_entity(ename='pilot')
# Did we already store pilots of this session?
stored_pilots = load_df(ename='pilot')
stored_pids = []
if stored_pilots['sid'].any():
stored_pilots_sid = stored_pilots.loc[
stored_pilots['sid'] == sid].copy()
stored_pids = stored_pilots_sid['pid'].values.tolist()
# Derive properties and duration for each pilot.
for pid in sorted(sra_pilots.list('uid')):
# Skip session if its pilots have been already stored.
if pid in stored_pids:
sys.stdout.write('\n%s already in %s' % (pid, csvs['pilot']))
continue
# Pilot properties.
sys.stdout.write('\n' + pid + ': ')
ps['pid'].append(pid)
ps['sid'].append(sid)
ps['experiment'].append(exp)
# Host ID.
pentity = sra_pilots.get(uid=pid)[0]
if pentity.cfg['hostid']:
ps['hid'].append(parse_osg_hostid(pentity.cfg['hostid']))
else:
ps['hid'].append(None)
# Number of cores of the pilot.
ps['ncore'].append(pentity.description['cores'])
# Number of units executed.
ps['nunit'].append(len(pu_rels[pid]))
# Pilot durations.
for duration in pdm.keys():
if duration not in ps.keys():
ps[duration] = []
try:
ps[duration].append(pentity.duration(pdm[duration]))
sys.stdout.write(' %s' % duration)
except:
print '\nWARNING: Failed to calculate duration %s' % \
duration
ps[duration].append(None)
# Store pilots DF to csv and reload into memory to return the complete
# DF for the given sid.
if ps['pid']:
pilots = pd.DataFrame(ps)
store_df(pilots, stored=stored_pilots, ename='pilot')
stored_pilots = load_df(ename='pilot')
print '\nstored in %s.' % csvs['pilot']
# Returns the DF of the stored pilots if no new pilots have been added;
# the DF with the old and new pilots otherwise.
return stored_pilots
def load_units(sid, exp, sra_units, udm, pilots, sra, pu_rels):
sys.stdout.write('\n%s --- %s' % (exp, sid))
us = initialize_entity(ename='unit')
# Did we already store units of this session?
stored_units = load_df(ename='unit')
stored_uids = []
if stored_units['sid'].any():
stored_units_sid = stored_units.loc[
stored_units['sid'] == sid].copy()
stored_uids = stored_units_sid['uid'].values.tolist()
# Derive properties and duration for each unit.
for uid in sorted(sra_units.list('uid')):
# Skip session if its pilots have been already stored.
if uid in stored_uids:
sys.stdout.write('\n%s already stored in %s' %
(uid, csvs['unit']))
continue
# Properties.
sys.stdout.write('\n' + uid + ': ')
us['uid'].append(uid)
us['sid'].append(sid)
us['experiment'].append(exp)
# Durations.
uentity = sra_units.get(uid=uid)[0]
for duration in udm.keys():
if duration not in us.keys():
us[duration] = []
try:
# TODO: this is a temporary fix for inconsistent state model.
if duration == 'U_AGENT_EXECUTING':
if 'AGENT_STAGING_OUTPUT_PENDING' in \
uentity.states.keys() and \
'FAILED' in uentity.states.keys():
us[duration].append(None)
continue
us[duration].append(uentity.duration(udm[duration]))
sys.stdout.write(' %s' % duration)
except:
print '\nWARNING: Failed to calculate duration %s' % \
duration
us[duration].append(None)
# pilot and host on which the unit has been executed.
punit = [key[0] for key in pu_rels.items() if uid in key[1]][0]
hid = pilots[(pilots['sid'] == sid) &
(pilots['pid'] == punit)]['hid'].tolist()[0]
us['pid'].append(punit)
us['hid'].append(hid)
# Store unit DF to csv and reload into memory to return the complete
# DF for the given sid.
if us['pid']:
units = pd.DataFrame(us)
store_df(units, stored=stored_units, ename='unit')
stored_units = load_df(ename='unit')
print '\nstored in %s.' % csvs['unit']
# Returns the DF of the stored pilots if no new pilots have been added;
# the DF with the old and new pilots otherwise.
return stored_units
def load_session(sid, exp, sra_session, sra_pilots, sra_units,
pdm, udm, pilots, units):
# IF this session has been already stored get out, nothing to do here.
stored_sessions = load_df(ename='session')
if sid in stored_sessions.index.tolist():
sys.stdout.write('%s already stored in %s' % (sid, csvs['session']))
return False
sys.stdout.write('\n%s --- %s' % (exp, sid))
s = initialize_entity(ename='session')
# Session properties: pilots and units.
# sp = sra_session.filter(etype='pilot', inplace=False)
# su = sra_session.filter(etype='unit', inplace=False)
s['sid'].append(sid)
s['session'].append(None)
s['experiment'].append(exp)
s['TTC'].append(sra_session.ttc)
s['nhost'].append(len(pilots.loc[pilots['sid'] == sid]['hid'].unique()))
s['nunit'].append(len(sra_units.get()))
s['npilot'].append(len(sra_pilots.get()))
s['npilot_active'].append(len(sra_pilots.timestamps(state='PMGR_ACTIVE')))
s['nunit_done'].append(len(sra_units.timestamps(state='DONE')))
s['nunit_failed'].append(len(sra_units.timestamps(state='FAILED')))
# Number of cores requested and used by the session's pilots. Make a copy of
# the pilots DF with only the columns we need to limit memory overhead.
pcores = pilots[pilots.sid == sid][['P_LRMS_RUNNING', 'ncore']]
s['ncore'].append(pcores.ncore.sum())
s['ncore_active'].append(pcores[pcores.P_LRMS_RUNNING > 0].ncore.sum())
pcores = None
# Pilots total durations. NOTE: s initialization guarantees
# the existence of duration keys.
for duration in pdm.keys():
s[duration].append(sra_pilots.duration(pdm[duration]))
# Units total durations. NOTE: s initialization guarantees the
# existence of duration keys.
for duration in udm.keys():
s[duration].append(sra_units.duration(udm[duration]))
# Store session.
session = pd.DataFrame(s, index=[sid])
store_df(session, stored=stored_sessions, ename='session')
print '\nstored in %s' % csvs['session']
return True
# -----------------------------------------------------------------------------
if __name__ == '__main__':
datadir = '../data/'
experiment_tag = 'exp'
# Global constants
# File names where to save the DF of each entity of each session.
csvs = {'session': '%ssessions.csv' % datadir,
'pilot' : '%spilots.csv' % datadir,
'unit' : '%sunits.csv' % datadir}
# Model of pilot durations.
pdm = {'P_PMGR_SCHEDULING': ['NEW',
'PMGR_LAUNCHING_PENDING'],
'P_PMGR_QUEUING' : ['PMGR_LAUNCHING_PENDING',
'PMGR_LAUNCHING'],
'P_LRMS_SUBMITTING': ['PMGR_LAUNCHING',
'PMGR_ACTIVE_PENDING'],
'P_LRMS_QUEUING' : ['PMGR_ACTIVE_PENDING',
'PMGR_ACTIVE'],
'P_LRMS_RUNNING' : ['PMGR_ACTIVE',
['DONE', 'CANCELED', 'FAILED']]}
# Model of unit durations.
udm = {'U_UMGR_SCHEDULING' : ['NEW',
'UMGR_SCHEDULING_PENDING'],
'U_UMGR_BINDING' : ['UMGR_SCHEDULING_PENDING',
'UMGR_SCHEDULING'],
# 'I_UMGR_SCHEDULING' : ['UMGR_SCHEDULING',
# 'UMGR_STAGING_INPUT_PENDING'],
# 'I_UMGR_QUEING' : ['UMGR_STAGING_INPUT_PENDING',
# 'UMGR_STAGING_INPUT'],
# 'I_AGENT_SCHEDULING' : ['UMGR_STAGING_INPUT',
# 'AGENT_STAGING_INPUT_PENDING'],
# 'I_AGENT_QUEUING' : ['AGENT_STAGING_INPUT_PENDING',
# 'AGENT_STAGING_INPUT'],
# 'I_AGENT_TRANSFERRING': ['AGENT_STAGING_INPUT',
# 'AGENT_SCHEDULING_PENDING'],
'U_AGENT_QUEUING' : ['AGENT_SCHEDULING_PENDING',
'AGENT_SCHEDULING'],
'U_AGENT_SCHEDULING' : ['AGENT_SCHEDULING',
'AGENT_EXECUTING_PENDING'],
'U_AGENT_QUEUING_EXEC': ['AGENT_EXECUTING_PENDING',
'AGENT_EXECUTING'],
'U_AGENT_EXECUTING' : ['AGENT_EXECUTING',
'AGENT_STAGING_OUTPUT_PENDING']}
# 'O_AGENT_QUEUING' : ['AGENT_STAGING_OUTPUT_PENDING',
# 'AGENT_STAGING_OUTPUT'],
# 'O_UMGR_SCHEDULING' : ['AGENT_STAGING_OUTPUT',
# 'UMGR_STAGING_OUTPUT_PENDING'],
# 'O_UMGR_QUEUING' : ['UMGR_STAGING_OUTPUT_PENDING',
# 'UMGR_STAGING_OUTPUT'],
# 'O_UMGR_TRANSFERRING' : ['UMGR_STAGING_OUTPUT',
# ['DONE', 'CANCELED', 'FAILED']]}
# Get sessions ID, experiment number and RA object. Assume:
# datadir/exp*/sessiondir/session.json.
for path in glob.glob('%s/%s*' % (datadir, experiment_tag)):
for sdir in glob.glob('%s/*' % path):
# Session ID and session experiment.
sid = glob.glob('%s/*.json' % sdir)[0].split('/')[-1:][0][:-5]
exp = path.split('/')[-1:][0]
# Consistency check: SID of json file name is the same SID of
# directory name.
if sid == sdir.split('/')[-1:][0]:
# RA objects cannot be serialize: every RA session object need
# to be constructed at every run.
sra_session = ra.Session(sid, 'radical.pilot', src=sdir)
# Pilot-unit relationship dictionary
pu_rels = sra_session.describe('relations', ['pilot', 'unit'])
# Pilots of sra: dervie properties and durations.
print '\n\n%s -- %s -- Loading pilots:' % (exp, sid)
sra_pilots = sra_session.filter(etype='pilot', inplace=False)
pilots = load_pilots(sid, exp, sra_pilots, pdm, pu_rels)
# Units of sra: dervie properties and durations.
print '\n\n%s -- %s -- Loading units:' % (exp, sid)
sra_units = sra_session.filter(etype='unit', inplace=False)
units = load_units(sid, exp, sra_units, udm, pilots,
sra_session, pu_rels)
# Session of sra: derive properties and total durations.
print '\n\n%s -- %s -- Loading session:\n' % (exp, sid)
load_session(sid, exp, sra_session, sra_pilots, sra_units,
pdm, udm, pilots, units)
else:
error = 'ERROR: session folder and json file name differ'
print '%s: %s != %s' % (error, sdir, sid)
| |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_floating_ip
----------------------------------
Functional tests for floating IP resource.
"""
import random
import string
import time
from novaclient import exceptions as nova_exc
from shade import openstack_cloud
from shade import meta
from shade.exc import OpenStackCloudException
from shade.exc import OpenStackCloudTimeout
from shade.tests import base
from shade.tests.functional.util import pick_flavor, pick_image
def _iterate_timeout(timeout, message):
start = time.time()
count = 0
while (timeout is None) or (time.time() < start + timeout):
count += 1
yield count
time.sleep(2)
raise OpenStackCloudTimeout(message)
class TestFloatingIP(base.TestCase):
timeout = 60
# Generate a random name for these tests
new_item_name = 'test_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
def setUp(self):
super(TestFloatingIP, self).setUp()
# Shell should have OS-* envvars from openrc, typically loaded by job
self.cloud = openstack_cloud()
self.nova = self.cloud.nova_client
if self.cloud.has_service('network'):
self.neutron = self.cloud.neutron_client
self.flavor = pick_flavor(self.nova.flavors.list())
if self.flavor is None:
self.assertFalse('no sensible flavor available')
self.image = pick_image(self.nova.images.list())
if self.image is None:
self.assertFalse('no sensible image available')
self.addCleanup(self._cleanup_network)
self.addCleanup(self._cleanup_servers)
def _cleanup_network(self):
exception_list = list()
# Delete stale networks as well as networks created for this test
if self.cloud.has_service('network'):
# Delete routers
for r in self.cloud.list_routers():
try:
if r['name'].startswith(self.new_item_name):
# ToDo: update_router currently won't allow removing
# external_gateway_info
router = {
'external_gateway_info': None
}
self.neutron.update_router(
router=r['id'], body={'router': router})
# ToDo: Shade currently doesn't have methods for this
for s in self.cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.neutron.remove_interface_router(
router=r['id'],
body={'subnet_id': s['id']})
except Exception:
pass
self.cloud.delete_router(name_or_id=r['id'])
except Exception as e:
exception_list.append(e)
continue
# Delete subnets
for s in self.cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.cloud.delete_subnet(name_or_id=s['id'])
except Exception as e:
exception_list.append(e)
continue
# Delete networks
for n in self.cloud.list_networks():
if n['name'].startswith(self.new_item_name):
try:
self.cloud.delete_network(name_or_id=n['id'])
except Exception as e:
exception_list.append(e)
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_servers(self):
exception_list = list()
# Delete stale servers as well as server created for this test
for i in self.nova.servers.list():
if i.name.startswith(self.new_item_name):
self.nova.servers.delete(i)
for _ in _iterate_timeout(
self.timeout, "Timeout deleting servers"):
try:
self.nova.servers.get(server=i)
except nova_exc.NotFound:
break
except Exception as e:
exception_list.append(e)
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_ips(self, ips):
exception_list = list()
for ip in ips:
try:
self.cloud.delete_floating_ip(ip)
except Exception as e:
exception_list.append(e)
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _setup_networks(self):
if self.cloud.has_service('network'):
# Create a network
self.test_net = self.cloud.create_network(
name=self.new_item_name + '_net')
# Create a subnet on it
self.test_subnet = self.cloud.create_subnet(
subnet_name=self.new_item_name + '_subnet',
network_name_or_id=self.test_net['id'],
cidr='172.24.4.0/24',
enable_dhcp=True
)
# Create a router
self.test_router = self.cloud.create_router(
name=self.new_item_name + '_router')
# Attach the router to an external network
ext_nets = self.cloud.search_networks(
filters={'router:external': True})
self.cloud.update_router(
name_or_id=self.test_router['id'],
ext_gateway_net_id=ext_nets[0]['id'])
# Attach the router to the internal subnet
self.neutron.add_interface_router(
router=self.test_router['id'],
body={'subnet_id': self.test_subnet['id']})
# Select the network for creating new servers
self.nic = {'net-id': self.test_net['id']}
else:
# ToDo: remove once we have list/get methods for nova networks
nets = self.cloud.nova_client.networks.list()
self.nic = {'net-id': nets[0].id}
def test_add_auto_ip(self):
self._setup_networks()
new_server = self.cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in _iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.cloud, new_server)
if ip is not None:
break
new_server = self.cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, [ip])
def test_detach_ip_from_server(self):
self._setup_networks()
new_server = self.cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in _iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.cloud, new_server)
if ip is not None:
break
new_server = self.cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, [ip])
f_ip = self.cloud.get_floating_ip(
id=None, filters={'floating_ip_address': ip})
self.cloud.detach_ip_from_server(
server_id=new_server.id, floating_ip_id=f_ip['id'])
| |
"""Encapsulates a text product holding METARs."""
import re
from datetime import timezone, timedelta
try:
from zoneinfo import ZoneInfo # type: ignore
except ImportError:
from backports.zoneinfo import ZoneInfo
from metar.Metar import Metar
from metar.Metar import ParserError as MetarParserError
from pyiem.nws.product import TextProduct
from pyiem.observation import Observation
from pyiem.reference import TRACE_VALUE, TWEET_CHARS
from pyiem import datatypes
from pyiem.util import drct2text, LOG
NIL_RE = re.compile(r"[\s\n]NIL")
ERROR_RE = re.compile("Unparsed groups in body '(?P<msg>.*)' while processing")
TORNADO_RE = re.compile(r" \+FC |TORNADO")
FUNNEL_RE = re.compile(r" FC |FUNNEL")
# Match what looks like SA formatted messages
SA_RE = re.compile(r"^[A-Z]{3}\sSA")
# Sites we should route to Jabber
JABBER_SITES = {}
# Keep track of Wind alerts to prevent dups
WIND_ALERTS = {}
# Wind speed threshold in kts for alerting
WIND_ALERT_THRESHOLD_KTS = 50.0
def wind_logic(iem, this):
"""Hairy logic for now we handle winds."""
# Explicit storages
if this.wind_speed:
iem.data["sknt"] = this.wind_speed.value("KT")
if this.wind_gust:
iem.data["gust"] = this.wind_gust.value("KT")
if this.wind_dir:
iem.data["drct"] = float(this.wind_dir.value())
if this.wind_speed_peak:
iem.data["peak_wind_gust"] = this.wind_speed_peak.value("KT")
if this.wind_dir_peak:
iem.data["peak_wind_drct"] = this.wind_dir_peak.value()
if this.peak_wind_time:
iem.data["peak_wind_time"] = this.peak_wind_time.replace(
tzinfo=timezone.utc
)
# Figure out if we have a new max_drct
old_max_wind = max(
[iem.data.get("max_sknt", 0) or 0, iem.data.get("max_gust", 0) or 0]
)
new_max_wind = max(
[iem.data.get("sknt", 0) or 0, iem.data.get("gust", 0) or 0]
)
# if our sknt or gust is a new max, use drct
if new_max_wind > old_max_wind:
iem.data["max_drct"] = iem.data.get("drct", 0)
# if our PK WND is greater than all yall, use PK WND
# TODO: PK WND potentially could be from last hour / thus yesterday?
if (
this.wind_speed_peak
and this.wind_dir_peak
and this.wind_speed_peak.value("KT") > old_max_wind
and this.wind_speed_peak.value("KT") > new_max_wind
):
iem.data["max_drct"] = this.wind_dir_peak.value()
iem.data["max_gust_ts"] = this.peak_wind_time.replace(
tzinfo=timezone.utc
)
iem.data["max_gust"] = this.wind_speed_peak.value("KT")
def trace(pobj):
"""Convert this precip object to a numeric value"""
if pobj is None:
return None
val = pobj.value("IN")
if val == 0:
# IEM denotation of trace
return TRACE_VALUE
return val
def to_metar(textprod, text):
"""Create a METAR object, if possible"""
# Do some cleaning and whitespace trimming
text = sanitize(text)
if len(text) < 14: # arb
return
attempt = 1
mtr = None
original_text = text
valid = textprod.valid
while attempt < 6 and mtr is None:
try:
mtr = METARReport(text, month=valid.month, year=valid.year)
except MetarParserError as inst:
tokens = ERROR_RE.findall(str(inst))
if tokens:
if tokens[0] == text or text.startswith(tokens[0]):
return
# So tokens contains a series of groups that needs updated
newtext = text
for token in tokens[0].split():
newtext = newtext.replace(" %s" % (token,), "")
if newtext != text:
text = newtext
if str(inst).find("day is out of range for month") > -1:
if valid.day < 10:
valid = valid.replace(day=1) - timedelta(days=1)
attempt += 1
if mtr is not None:
# Attempt to figure out more things
if mtr.station_id is None:
LOG.warning("Aborting due to station_id being None |%s|", text)
return None
if mtr.time is None:
LOG.warning("Aborting due to time being None |%s|", text)
return None
# don't allow data more than an hour into the future
ceiling = (textprod.utcnow + timedelta(hours=1)).replace(tzinfo=None)
if mtr.time > ceiling:
# careful, we may have obs from the previous month
if ceiling.day < 5 and mtr.time.day > 15:
prevmonth = ceiling - timedelta(days=10)
mtr.time = mtr.time.replace(
year=prevmonth.year, month=prevmonth.month
)
else:
LOG.warning(
"Aborting due to time in the future "
"ceiling: %s mtr.time: %s",
ceiling,
mtr.time,
)
return None
mtr.code = original_text
mtr.iemid = (
mtr.station_id[-3:] if mtr.station_id[0] == "K" else mtr.station_id
)
mtr.network = textprod.nwsli_provider.get(mtr.iemid, {}).get("network")
mtr.tzname = textprod.nwsli_provider.get(mtr.iemid, {}).get("tzname")
return mtr
def sanitize(text):
"""Clean our text string with METAR data"""
text = re.sub("\015", " ", text)
# Remove any multiple whitespace, bad chars
text = (
text.encode("utf-8", "ignore")
.replace(b"\xa0", b" ")
.replace(b"\001", b"")
.replace(b"\003", b"")
.decode("utf-8", errors="ignore")
)
text = " ".join(text.strip().split())
# Look to see that our METAR starts with A-Z
if re.match("^[0-9]", text):
tokens = text.split()
text = " ".join(tokens[1:])
return text
def _is_same_day(valid, tzname, hours=6):
"""Can we trust a six hour total?"""
try:
tzinfo = ZoneInfo(tzname)
except Exception:
return False
lts = valid.astimezone(tzinfo)
# TODO we should likely somehow compute this in standard time, shrug
return lts.day == (lts - timedelta(hours=hours)).day
class METARReport(Metar):
"""Provide some additional functionality over baseline METAR"""
def __init__(self, text, **kwargs):
"""Wrapper"""
Metar.__init__(self, text, **kwargs)
self.iemid = None
self.network = None
self.tzname = None
def wind_message(self):
"""Convert this into a Jabber style message"""
drct = 0
sknt = 0
time = self.time.replace(tzinfo=timezone.utc)
if self.wind_gust:
sknt = self.wind_gust.value("KT")
if self.wind_dir:
drct = self.wind_dir.value()
if self.wind_speed_peak:
v1 = self.wind_speed_peak.value("KT")
d1 = self.wind_dir_peak.value()
t1 = self.peak_wind_time.replace(tzinfo=timezone.utc)
if v1 > sknt:
sknt = v1
drct = d1
time = t1
key = f"{self.station_id};{sknt};{time}"
if key in WIND_ALERTS:
return None
WIND_ALERTS[key] = 1
speed = datatypes.speed(sknt, "KT")
return ("gust of %.0f knots (%.1f mph) from %s @ %s") % (
speed.value("KT"),
speed.value("MPH"),
drct2text(drct),
time.strftime("%H%MZ"),
)
def over_wind_threshold(self):
"""Is this METAR over the wind threshold for alerting"""
if (
self.wind_gust
and self.wind_gust.value("KT") >= WIND_ALERT_THRESHOLD_KTS
):
return True
if (
self.wind_speed_peak
and self.wind_speed_peak.value("KT") >= WIND_ALERT_THRESHOLD_KTS
):
return True
return False
def to_iemaccess(self, txn, force_current_log=False, skip_current=False):
"""Persist parsed data to IEMAccess Database.
Args:
txn (psycopg2.cursor): database cursor / transaction
force_current_log (boolean): should this ob always go to current_log
skip_current (boolean): should this ob always skip current table
"""
gts = self.time.replace(tzinfo=timezone.utc)
iem = Observation(self.iemid, self.network, gts)
# Load the observation from the database, if the same time exists!
iem.load(txn)
# Need to figure out if we have a duplicate ob, if so, check
# the length of the raw data, if greater, take the temps
if iem.data["raw"] is None or len(iem.data["raw"]) < len(self.code):
if self.temp:
val = self.temp.value("F")
# Place reasonable bounds on the temperature before saving it!
if val > -90 and val < 150:
iem.data["tmpf"] = round(val, 1)
if self.dewpt:
val = self.dewpt.value("F")
# Place reasonable bounds on the temperature before saving it!
if val > -150 and val < 100:
iem.data["dwpf"] = round(val, 1)
# Database only allows len 254
iem.data["raw"] = self.code[:254]
# Always take a COR
if self.code.find(" COR ") > -1:
iem.data["raw"] = self.code[:254]
wind_logic(iem, self)
if self.max_temp_6hr:
iem.data["max_tmpf_6hr"] = round(self.max_temp_6hr.value("F"), 1)
if self.tzname and _is_same_day(iem.data["valid"], self.tzname):
iem.data["max_tmpf_cond"] = iem.data["max_tmpf_6hr"]
if self.min_temp_6hr:
iem.data["min_tmpf_6hr"] = round(self.min_temp_6hr.value("F"), 1)
if self.tzname and _is_same_day(iem.data["valid"], self.tzname):
iem.data["min_tmpf_cond"] = iem.data["min_tmpf_6hr"]
if self.max_temp_24hr:
iem.data["max_tmpf_24hr"] = round(self.max_temp_24hr.value("F"), 1)
if self.min_temp_24hr:
iem.data["min_tmpf_24hr"] = round(self.min_temp_24hr.value("F"), 1)
if self.precip_3hr:
iem.data["p03i"] = trace(self.precip_3hr)
if self.precip_6hr:
iem.data["p06i"] = trace(self.precip_6hr)
if self.precip_24hr:
iem.data["p24i"] = trace(self.precip_24hr)
# We assume the value is zero, sad!
iem.data["phour"] = 0
if self.precip_1hr:
iem.data["phour"] = trace(self.precip_1hr)
if self.snowdepth:
# NOTE snowd is a summary variable that wants to be daily, this
# METAR value is more instantaneous, so goes to current table
iem.data["snowdepth"] = self.snowdepth.value("IN")
if self.vis:
iem.data["vsby"] = self.vis.value("SM")
if self.press:
iem.data["alti"] = self.press.value("IN")
if self.press_sea_level:
iem.data["mslp"] = self.press_sea_level.value("MB")
if self.press_sea_level and self.press:
alti = self.press.value("MB")
mslp = self.press_sea_level.value("MB")
if abs(alti - mslp) > 25:
LOG.warning(
"PRESSURE ERROR %s %s ALTI: %s MSLP: %s",
iem.data["station"],
iem.data["valid"],
alti,
mslp,
)
if alti > mslp:
iem.data["mslp"] += 100.0
else:
iem.data["mslp"] -= 100.0
# Do something with sky coverage
for i in range(len(self.sky)):
(cov, hgh, _) = self.sky[i]
iem.data["skyc%s" % (i + 1)] = cov
if hgh is not None:
iem.data["skyl%s" % (i + 1)] = hgh.value("FT")
# Presentwx
if self.weather:
pwx = []
for wx in self.weather:
val = "".join([a for a in wx if a is not None])
if val == "" or val == len(val) * "/":
continue
pwx.append(val)
iem.data["wxcodes"] = pwx
# Ice Accretion
for hr in [1, 3, 6]:
key = "ice_accretion_%shr" % (hr,)
iem.data[key] = trace(getattr(self, key))
return iem, iem.save(txn, force_current_log, skip_current)
class METARCollective(TextProduct):
"""
A TextProduct containing METAR information
"""
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
"""Constructor
Args:
text (string): the raw string to process"""
TextProduct.__init__(self, text, utcnow, ugc_provider, nwsli_provider)
self.metars = []
self.split_and_parse()
def get_jabbers(self, uri, _uri2=None):
"""Make this into jabber messages"""
jmsgs = []
for mtr in self.metars:
msg = None
for weatheri in mtr.weather:
for wx in weatheri:
if wx is not None and "GR" in wx:
msg = "Hail"
if TORNADO_RE.findall(mtr.code):
msg = "Tornado"
elif FUNNEL_RE.findall(mtr.code):
msg = "Funnel Cloud"
# Search for Peak wind gust info....
elif mtr.over_wind_threshold():
_msg = mtr.wind_message()
if _msg:
msg = _msg
elif mtr.station_id in JABBER_SITES:
# suck
if JABBER_SITES[mtr.station_id] != mtr.time:
JABBER_SITES[mtr.station_id] = mtr.time
channels = ["METAR.%s" % (mtr.station_id,)]
if mtr.type == "SPECI":
channels.append("SPECI.%s" % (mtr.station_id,))
mstr = "%s %s" % (mtr.type, mtr.code)
jmsgs.append(
[mstr, mstr, dict(channels=",".join(channels))]
)
if msg:
row = self.nwsli_provider.get(mtr.iemid, {})
wfo = row.get("wfo")
if wfo is None or wfo == "":
LOG.warning(
"Unknown WFO for id: %s, skipping alert", mtr.iemid
)
continue
channels = ["METAR.%s" % (mtr.station_id,)]
if mtr.type == "SPECI":
channels.append("SPECI.%s" % (mtr.station_id,))
channels.append(wfo)
st = row.get("state")
nm = row.get("name")
extra = ""
if mtr.code.find("$") > 0:
extra = "(Caution: Maintenance Check Indicator)"
url = ("%s%s") % (uri, mtr.network)
jtxt = ("%s,%s (%s) ASOS %s reports %s\n%s %s") % (
nm,
st,
mtr.iemid,
extra,
msg,
mtr.code,
url,
)
jhtml = (
f'<p><a href="{url}">{nm},{st}</a> ({mtr.iemid}) ASOS '
f"{extra} reports <strong>{msg}</strong>"
f"<br/>{mtr.code}</p>"
)
xtra = {
"channels": ",".join(channels),
"lat": str(row.get("lat")),
"long": str(row.get("lon")),
}
xtra["twitter"] = (
("%s,%s (%s) ASOS reports %s -- %s")
% (nm, st, mtr.iemid, msg, mtr.code)
)[:TWEET_CHARS]
jmsgs.append([jtxt, jhtml, xtra])
return jmsgs
def split_and_parse(self):
"""Create METAR objects as we find products in the text"""
# skip the top three lines
lines = self.unixtext.split("\n")
if lines[0] == "\001":
content = "\n".join(lines[3:])
elif len(lines[0]) < 5:
content = "\n".join(lines[2:])
else:
self.warnings.append(
("WMO header split_and_parse fail: %s") % (self.unixtext,)
)
content = "\n".join(lines)
# Tokenize on the '=', which splits a product with METARs
tokens = content.split("=")
for token in tokens:
# Dump METARs that have NIL in them
prefix = "METAR" if self.afos != "SPECI" else "SPECI"
if NIL_RE.search(token):
continue
if token.find("METAR") > -1:
token = token[(token.find("METAR") + 5) :]
# unsure why this LWIS existed
# elif token.find("LWIS ") > -1:
# token = token[token.find("LWIS ")+5:]
elif token.find("SPECI") > -1:
token = token[(token.find("SPECI") + 5) :]
prefix = "SPECI"
elif len(token.strip()) < 5:
continue
res = to_metar(self, token)
if res:
res.type = prefix
self.metars.append(res)
def parser(text, utcnow=None, ugc_provider=None, nwsli_provider=None):
"""Helper function"""
return METARCollective(text, utcnow, ugc_provider, nwsli_provider)
| |
"""Landlab Driver for running Landscape Evolution Experiments with
- Soil weathering
- Soil diffusion
- Detachment-limited river erosion
- tectonic uplift
- vegetation modulation of erosion effects
Created by: Manuel Schmid, University of Tuebingen, 07.04.2017
"""
## Import necessary Python and Landlab Modules
import numpy as np
from landlab import RasterModelGrid
from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
from landlab.components import FlowRouter
from landlab.components import DepthDependentDiffuser
from landlab.components import ExponentialWeatherer
from landlab.components import StreamPowerEroder
from landlab.components import LinearDiffuser
from landlab.components import FastscapeEroder
from landlab.components import DepressionFinderAndRouter
from landlab.components import drainage_density
from landlab import imshow_grid
from landlab.io.netcdf import write_netcdf
from landlab.io.netcdf import read_netcdf
from matplotlib import pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['agg.path.chunksize'] = 200000000
import time
#---------------------------Parameter Definitions------------------------------#
##Model Grid:##
ncols = 101
nrows = 101
dx = 100
##Model runtime:##
#Defines total model runtime
totalT = int(15.e6)
#Defines spinup-time before transient
ssT = int(15.e6)
#vegi step-function timestep
sfT = int(7.e6)
#vegi step-function amplitude
sfA = -0.3
#Defines time-step
dt = int(1000)
#Number of total-timestep (nt) and spin-up timesteps (ssnt)
nt = int(totalT / dt)
ssnt = int(ssT / dt)
ssntSF = int(sfT / dt)
#time-vector (total and transient), used for plotting later
timeVec = np.arange(0, totalT, dt)
transTimeVec = np.arange(0, (totalT - ssT), dt)
##Uplift:##
#upliftRate
upliftRate = 2e-4 #m/yr
uplift_per_step = upliftRate * dt
##Fluvial Erosion:
##Fluvial erosion is implement as stream-power-law
##E = K * A^m * S^n
##For comparison look up usual Tucker, Whipple etc.
#ksp = 0.4
ksp = 2e-7
msp = 0.5
nsp = 1
thresholdSP = 2.e-4
critArea = 1e6 #Minimum area for channel formation
##Hillslope Erosion:
##hillslope erosion is implement as linear diffusion which depends on soil depth
##for comparison look up Johnstone and Hilley 2014.
##Linear Diffusivity is modified after Istanbulluoglu 2015, and represented as as
##numpy-array. You need to define the "Base-diffusivity":
linDiffBase = 6e-2 #m^2/yr
alphaDiff = 0.2 #After Istanbulluoglu
soilTransportDepth = 1 #m
initalSoilDepth = 0 #m
#topographic_elevation - soil_depth
initialBedrockElevation = 0 #m
##Bedrock Weathering:
#weathering of bedrock is implemented as exponential weathering function after
#Ahnert 1976
maxSoilProductionRate = 0.00015 #m/yr
soilProductionDepth = 0.2 #m
##Vegetation Implementation
##This is stuff that is needed to use the vegetation-modulation after
##Istanbulluoglu 2015
aqDens = 1000. #Density of water
grav = 9.81 #Gravitation Acceleration
nSoil = 0.01 #Mannings Number for bare soil [-]
nVRef = 0.6 #Mannungs Number for Vegetation
vRef = 1. #Reference Vegetation for Maximum Mannings Roughness
w = 1. #Scaling Factor for vegetation influence
##Output production
##This Driver is set up to produce NetCDFs, .pngs, etc.
##For more detail look in the Plotting section of the main loop
##and comment stuff that you don't want produced
#Interval in which output is produced
outInt = 10000 #yrs
#Number of total produced outputs
no = totalT / outInt
#number of zeros for file_naming. Don't meddle with this.
zp = len(str(int(no)))
print("finished with parameter-initiation")
print("---------------------")
#---------------------------------Grid Setup-----------------------------------#
#This initiates a Modelgrid with dimensions nrows x ncols and spatial scaling of dx
mg = RasterModelGrid((nrows,ncols), dx)
#only uncomment this if there is a pre-existing topography you want to load.
#right now this only works if the topo was saved in numpys .npy format.
try:
topoSeed = np.load('topoSeed.npy')
print('loaded topoSeed.npy')
except:
print('There is no file containing a initial topography')
#Initate all the fields that are needed for calculations
mg.add_zeros('node','topographic__elevation')
mg.add_zeros('node','bedrock__elevation')
mg.at_node['bedrock__elevation'][:] = initialBedrockElevation
#checks if standart topo is used. if not creates own
if 'topoSeed' in locals():
mg.at_node['topographic__elevation'] += topoSeed
#mg.at_node['bedrock__elevation'] += topoSeed
print('Using pre-existing topography from file topoSeed.npy')
else:
mg.at_node['topographic__elevation'] += np.random.rand(mg.at_node.size)/10000
#mg.at_node['bedrock__elevation'] += np.random.rand(mg.at_node.size)/10000
print('No pre-existing topography. Creating own random noise topo.')
mg.add_zeros('node','soil__depth')
mg.at_node['soil__depth'][:] = initalSoilDepth
mg.add_zeros('node','vegetation__density')
#Create boundary conditions of the model grid (either closed or fixed-head)
for edge in (mg.nodes_at_left_edge,mg.nodes_at_right_edge, mg.nodes_at_top_edge):
mg.status_at_node[edge] = CLOSED_BOUNDARY
for edge in (mg.nodes_at_bottom_edge):
mg.status_at_node[edge] = FIXED_VALUE_BOUNDARY
print("finished with setup of modelgrid")
print("---------------------")
##---------------------------------Vegi implementation--------------------------#
##Set up a timeseries for vegetation-densities
##This basically assumes that for a spin-up time (ssT) we have constant vegetation
##cover (vp) and after that we get change vegetation cover as a sin-function
vp = .8
vegiTimeseries = np.zeros(int(totalT / dt)) + vp
#This part sets the modification of the vegi-distribution. comment/uncomment
#for usage
#this modifies the vegiTimeseries array with a sinusoidal curve
#vegiTimeseries[ssnt:] = (np.sin(0.000003 * transTimeVec) + 1) / 10 + (vp + 0.1)
#this incorporates a vegi step-function at timestep sfT with amplitude sfA
vegiTimeseries[ssntSF:] = vp - sfA
mg.at_node['vegetation__density'][:] = vp
#This maps the vegetation density on the nodes to the links between the nodes
vegiLinks = mg.map_mean_of_link_nodes_to_link('vegetation__density')
##These are the necesseray calculations for implementing the vegetation__density
##in the fluvial routines
nSoil_to_15 = np.power(nSoil, 1.5)
Ford = aqDens * grav * nSoil_to_15
n_v_frac = nSoil + (nVRef * ((mg.at_node['vegetation__density'] / vRef)**w)) #self.vd = VARIABLE!
#n_v_frac_to_w = np.power(n_v_frac, w)
#Prefect = np.power(n_v_frac_to_w, 0.9)
Prefect = np.power(n_v_frac, 0.9)
Kv = ksp * Ford/Prefect
##These are the calcultions to calculate the linear diffusivity based on vegis
linDiff = mg.zeros('node', dtype = float)
linDiff = linDiffBase * np.exp(-alphaDiff * vegiLinks)
print("finished setting up the vegetation fields and Kdiff and Kriv")
print("---------------------")
##---------------------------------Array initialization---------------------#
##This initializes all the arrays that are used to store data during the runtime
##of the model. this is mostly for plotting purposed and to create the .txt
##outputs. This potentially takes up a lot of space, so check if needed.
dhdtA = [] #Vector containing dhdt values for each node per timestep
meandhdt = [] #contains mean elevation change per timestep
mean_E = [] #contains the mean "erosion" rate out of Massbalance
mean_hill_E = [] #contains mean hillslope erosion rate
mean_riv_E = [] #contains mean river erosion rate
mean_dd = [] #contains mean drainage density
mean_K_riv = [] #contains mean K-value for spl
mean_K_diff = [] #contains mean K-value for ld
mean_slope = [] #mean slope within model-area
max_slope = [] #maximum slope within model area
min_slope = [] #minimum slope within model area
mean_elev = [] #mean elevation within model area
max_elev = [] #maximum elevation within model area
min_elev = [] #minimum elevation within model area
vegi_P_mean = [] #mostly for bugfixing because Manu is stupid fuckup without brain and life and fuck you
mean_SD = [] #mean soil depth
##---------------------------------Component initialization---------------------#
#sp = StreamPowerEroder(mg,
# K_sp = Kv,
# m_sp = msp,
# n_sp = nsp,
# threshold_sp=thresholdSP)
fc = FastscapeEroder(mg,
K_sp = Kv,
m_sp = msp,
n_sp = nsp,
threshold_sp = 0,
rainfall_intensity = 1)
fr = FlowRouter(mg)
lm = DepressionFinderAndRouter(mg)
expw = ExponentialWeatherer(mg,
max_soil_production_rate = maxSoilProductionRate,
soil_production_decay_depth = soilProductionDepth)
dld = DepthDependentDiffuser(mg,
linear_diffusivity = linDiff,
soil_transport_decay_depth = soilTransportDepth)
ld = LinearDiffuser(mg, linear_diffusivity = linDiff)
print("finished with the initialization of the erosion components")
print("---------------------")
##---------------------------------Main Loop------------------------------------#
t0 = time.time()
elapsed_time = 0
print("starting with main loop.")
print("---------------------")
#Create incremental counter for controlling progress of mainloop
counter = 0
#Create Limits for DHDT plot. Move this somewhere else later..
DHDTLowLim = upliftRate - (upliftRate * 1)
DHDTHighLim = upliftRate + (upliftRate * 1)
while elapsed_time < totalT:
#create copy of "old" topography
z0 = mg.at_node['topographic__elevation'].copy()
#Call the erosion routines.
#expw.run_one_step(dt=dt)
#dld.run_one_step(dt=dt)
ld.run_one_step(dt=dt)
fr.run_one_step()
lm.map_depressions()
floodedNodes = np.where(lm.flood_status==3)[0]
fc.run_one_step(dt=dt, flooded_nodes = floodedNodes)
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_per_step #add uplift
mg.at_node['bedrock__elevation'][mg.core_nodes] += uplift_per_step #add uplift
#look for nodes where river incises below current soil thickness
bad_nodes = mg.at_node['topographic__elevation'] < mg.at_node['bedrock__elevation']
#redefine bedrock to current channel elevation
mg.at_node['bedrock__elevation'][bad_nodes] = mg.at_node['topographic__elevation'][bad_nodes]
#calculate drainage_density
channel_mask = mg.at_node['drainage_area'] > critArea
dd = drainage_density.DrainageDensity(mg, channel__mask = channel_mask)
mean_dd.append(dd.calc_drainage_density())
#Calculate dhdt and E
dh = (mg.at_node['topographic__elevation'] - z0)
dhdt = dh/dt
erosionMatrix = upliftRate - dhdt
mean_E.append(np.mean(erosionMatrix))
#Calculate river erosion rate, based on critical area threshold
dh_riv = mg.at_node['topographic__elevation'][np.where(mg.at_node['drainage_area'] > critArea)]\
- z0[np.where(mg.at_node['drainage_area'] > critArea)]
dhdt_riv = dh_riv/dt
mean_riv_E.append(np.mean(upliftRate - dhdt_riv))
#Calculate hillslope erosion rate
dh_hill = mg.at_node['topographic__elevation'][np.where(mg.at_node['drainage_area'] <= critArea)]\
- z0[np.where(mg.at_node['drainage_area'] <= critArea)]
dhdt_hill = dh_hill/dt
mean_hill_E.append(np.mean(upliftRate - dhdt_hill))
#update vegetation__density
mg.at_node['vegetation__density'][:] = vegiTimeseries[int(elapsed_time/dt)-1]
vegiLinks = mg.map_mean_of_link_nodes_to_link('vegetation__density')
#update linDiff
linDiff = linDiffBase*np.exp(-alphaDiff * vegiLinks)
#reinitialize diffuser
# dld = DepthDependentDiffuser(mg,
# linear_diffusivity = linDiff,
# soil_transport_decay_depth = soilTransportDepth)
#
ld = LinearDiffuser(mg, linear_diffusivity = linDiff)
#update K_sp
n_v_frac = nSoil + (nVRef * (mg.at_node['vegetation__density'] / vRef)) #self.vd = VARIABLE!
n_v_frac_to_w = np.power(n_v_frac, w)
Prefect = np.power(n_v_frac_to_w, 0.9)
Kv = ksp * Ford/Prefect
fc = FastscapeEroder(mg,
K_sp = Kv,
m_sp = msp,
n_sp = nsp,
threshold_sp = thresholdSP,
rainfall_intensity = 1)
#Calculate and save mean K-values
#save mean_K_diff and mean_K_riv
mean_K_riv.append(np.mean(Kv))
mean_K_diff.append(np.mean(linDiff))
#Calculate and save mean, max, min slopes
mean_slope.append(np.mean(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
max_slope.append(np.max(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
min_slope.append(np.min(mg.at_node['topographic__steepest_slope'][mg.core_nodes]))
#calculate and save mean, max, min elevation
mean_elev.append(np.mean(mg.at_node['topographic__elevation'][mg.core_nodes]))
max_elev.append(np.max(mg.at_node['topographic__elevation'][mg.core_nodes]))
min_elev.append(np.min(mg.at_node['topographic__elevation'][mg.core_nodes]))
#SoilDepth
mean_SD.append(np.mean(mg.at_node['soil__depth'][mg.core_nodes]))
counter += 1
#print(counter)
#Run the output loop every outInt-times
if elapsed_time % outInt == 0:
print('Elapsed Time:' , elapsed_time,', writing output!')
##Create DEM
plt.figure()
imshow_grid(mg,'topographic__elevation',grid_units=['m','m'],var_name = 'Elevation',cmap='terrain')
plt.savefig('./DEM/DEM_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create Flow Accumulation Map
plt.figure()
imshow_grid(mg,fr.drainage_area,grid_units=['m','m'],var_name =
'Drainage Area',cmap='bone')
plt.savefig('./ACC/ACC_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create Slope - Area Map
plt.figure()
plt.loglog(mg.at_node['drainage_area'][np.where(mg.at_node['drainage_area'] > 0)],
mg.at_node['topographic__steepest_slope'][np.where(mg.at_node['drainage_area'] > 0)],
marker='.',linestyle='None')
plt.xlabel('Area')
plt.ylabel('Slope')
plt.savefig('./SA/SA_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
##Create NetCDF Output
#write_netcdf('./NC/output{}'.format(elapsed_time)+'__'+str(int(elapsed_time/outInt)).zfill(zp)+'.nc',
# mg,format='NETCDF4')
##Create erosion_diffmaps
plt.figure()
imshow_grid(mg,erosionMatrix,grid_units=['m','m'],var_name='Erosion m/yr',cmap='jet',limits=[DHDTLowLim,DHDTHighLim])
plt.savefig('./DHDT/eMap_'+str(int(elapsed_time/outInt)).zfill(zp)+'.png')
plt.close()
#plt.figure()
#imshow_grid(mg,'soil__depth',grid_units=['m','m'],var_name=
# 'Elevation',cmap='terrain')
#plt.savefig('./SoilDepth/SD_'+str(int(elapsed_time/outInt)).zfill(zp)+'png')
#plt.close()
elapsed_time += dt #update elapsed time
tE = time.time()
print()
print('End of Main Loop. So far it took {}s to get here. No worries homeboy...'.format(tE-t0))
##---------------------------------Plotting-------------------------------------#
## OUTPUT OF EROSION RATES AND DIFFMAPS (BETA! NEEDS TO GO INTO SEPERATE CLASS
## TO KEEP RUNFILE NEAT AND SLEEK
#E-t:
#fig, ax1 = plt.subplots(figsize = [11,7])
#ax2 = ax1.twinx()
#ax1.plot(timeVec, mean_hill_E, 'k', alpha = 0.6, linewidth = 2.5)
#ax1.plot(timeVec, mean_riv_E, 'k--', alpha = 0.6, linewidth = 2.5)
##ax1.set_ylim([upliftRate*0.9,upliftRate*1.1])
#ax1.plot(timeVec, mean_E, 'r', linewidth = 4.7)
#ax2.plot(timeVec,100*vegiTimeseries,'g', linewidth = 4)
##ax2.set_ylim([0,100])
#ax1.set_xlabel('years', fontsize = 22)
#ax1.set_ylabel('Erosion rate', color='k', fontsize = 22)
#ax2.set_ylabel('Vegetation cover [%]', color='k', fontsize = 22)
#ax1.legend(['Hillslope Erosion','Fluvial Erosion', 'Total Erosion'], loc = 3, fontsize = 18)
#ax2.legend(['Vegetation Cover'], loc = 4, fontsize = 18)
#plt.savefig('./VegiEros_dualy.png',dpi = 720)
#plt.close()
#Plot Vegi_erosion_rate
fig, axarr = plt.subplots(6, sharex = True, figsize = [11,14])
axarr[0].plot(timeVec, vegiTimeseries,'g', linewidth = 2.5)
axarr[0].set_title('Mean Surface Vegetation', fontsize = 12)
axarr[0].set_ylabel('Vegetation cover')
axarr[1].plot(timeVec, mean_elev, 'k', linewidth = 2.5)
axarr[1].plot(timeVec, max_elev, 'k--', linewidth = 2, alpha = 0.5)
axarr[1].plot(timeVec, min_elev, 'k--', linewidth = 2, alpha = 0.5)
axarr[1].set_title('Mean Elevation', fontsize = 12)
axarr[1].set_ylabel('Mean Elevation [m]')
#axarr[1].set_ylim([0,80])
axarr[2].plot(timeVec, np.degrees(np.arctan(mean_slope)), 'r', linewidth = 2.5)
axarr[2].plot(timeVec, np.degrees(np.arctan(max_slope)), 'r--', linewidth = 2.0, alpha = 0.5)
axarr[2].plot(timeVec, np.degrees(np.arctan(min_slope)), 'r--', linewidth = 2.0, alpha = 0.5)
#axarr[2].set_ylim([0,10])
axarr[2].set_title('Mean Slope', fontsize = 12)
axarr[2].set_ylabel('Mean Slope [deg]')
axarr[3].plot(timeVec,mean_dd, 'b', linewidth = 2.5)
axarr[3].set_title('Mean Drainage Density')
axarr[3].set_ylabel('Drainage Density')
axarr[4].plot(timeVec, mean_hill_E, 'g--', linewidth = 2.0, alpha = 0.5)
axarr[4].plot(timeVec, mean_riv_E, 'b--', linewidth = 2.0, alpha = 0.5)
axarr[4].plot(timeVec, mean_E, 'r--', linewidth = 2.2, alpha = 0.8)
axarr[4].legend(['Hillsl.', 'Rivers','Mean'])
axarr[4].set_title("Erosion rates")
axarr[4].set_ylabel('Erosion rate [m/yr]')
axarr[5].plot(timeVec, mean_SD, 'k', linewidth = 2.5)
axarr[5].set_title("Soil Depth")
axarr[5].set_ylabel("Soil Depth [m]")
axarr[5].set_xlabel("Model Years", fontsize = 12)
plt.savefig('./Multiplot_absolut.png',dpi = 720)
plt.close()
#Multiplot with normalized differentations
#vegi_timeseries_diff = np.diff(vegiTimeseries)/(np.max(np.diff(vegiTimeseries)))
#mean_elev_diff = np.diff(mean_elev)/(np.max(np.abs((np.diff(mean_elev)))))
#mean_slope_diff = np.diff(mean_slope)/(np.max(np.abs(np.diff(mean_slope))))
#mean_hill_E_diff = np.diff(mean_hill_E)/(np.max(np.abs(np.diff(mean_hill_E))))
#mean_riv_E_diff = np.diff(mean_riv_E)/(np.max(np.abs(np.diff(mean_riv_E))))
#mean_E_diff = np.diff(mean_E)/np.max(np.abs(np.diff(mean_E)))
#mean_dd_diff = np.diff(mean_dd)/np.max(np.abs(np.diff(mean_dd)))
#timeVec_diff = np.delete(timeVec, -1)
#fig, axarr = plt.subplots(5, sharex = True, figsize = [11,14])
#axarr[0].plot(timeVec_diff, vegi_timeseries_diff,'g', linewidth = 2.5)
#axarr[0].plot(timeVec,vegiTimeseries,'g--',alpha=0.5)
#axarr[0].set_title('Change In Mean Surface Vegetation', fontsize = 12)
#axarr[0].set_ylabel('Vegetation cover change')
#axarr[1].plot(timeVec_diff, mean_elev_diff, 'k', linewidth = 2.5)
#axarr[1].set_title('Change In Mean Elevation', fontsize = 12)
#axarr[1].set_ylabel('dh/dt')
#axarr[2].plot(timeVec_diff, mean_slope_diff, 'r', linewidth = 2.5)
#axarr[2].set_title('Change In Mean Slope', fontsize = 12)
#axarr[2].set_ylabel('dS/dt')
#axarr[3].plot(timeVec_diff,mean_dd_diff, 'b', linewidth = 2.5)
#axarr[3].set_title('Change In Mean Drainage Density')
#axarr[3].set_ylabel('d(dd)/dt')
##axarr[4].plot(timeVec_diff, mean_hill_E_diff, 'g--', linewidth = 2.0, alpha = 0.5)
##axarr[4].plot(timeVec_diff, mean_riv_E_diff, 'b--', linewidth = 2.0, alpha = 0.5)
#axarr[4].plot(timeVec_diff, mean_E_diff, 'r--', linewidth = 2.2, alpha = 0.8)
##axarr[4].legend(['Hillsl.', 'Rivers','Mean'])
#axarr[4].set_title("Change In Erosion Rates")
#axarr[4].set_ylabel('dE/dt')
#axarr[4].set_xlabel('Model Years', fontsize = 12)
#plt.savefig('./Multiplot_diff.png',dpi = 720)
#plt.close()
#Save the most useful output arrays as CSV file for later plotting
np.savetxt('./CSVOutput/MeanSlope.csv', mean_slope)
np.savetxt('./CSVOutput/MaxElev.csv', max_slope)
np.savetxt('./CSVOutput/MeanElev.csv', mean_elev)
np.savetxt('./CSVOutput/MaxElev.csv', max_elev)
np.savetxt('./CSVOutput/MeanRiverErosion.csv', mean_riv_E)
np.savetxt('./CSVOutput/MeanHillslopeErosion.csv', mean_hill_E)
np.savetxt('./CSVOutput/MeanErosion.csv', mean_E)
np.savetxt('./CSVOutput/VegetationDensity.csv', vegiTimeseries)
np.savetxt('./CSVOutput/Timeseries.csv', timeVec)
np.savetxt('./CSVOutput/Vegi_bugfix.csv', vegi_P_mean)
np.savetxt('./CSVOutput/MeanSoilthick.csv', mean_SD)
#bugfixing because manu is a fucking dumb asshole
plt.plot(vegi_P_mean)
plt.savefig('./vegi_P_bugfix.png', dpi = 720)
plt.close()
print("FINALLY! TADA! IT IS DONE! LOOK AT ALL THE OUTPUT I MADE!!!!")
| |
"""
Utility code that provides classes helpful in choosing a suitable TVTK
class. It does this by providing a list of all the classes along with
the option to be able to search for the documentation.
The nice thing about the UI is that it performs some kind of completion
on names typed by the user, plus it allows users to search through the
TVTK class docs very easily. Once a search string is typed the
completion and available lists are modified so you can do completion of
the searched class names. If a unique enough string is typed the class
docs are shown.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import vtk
import types
import inspect
# Enthought library imports.
from traits.api import HasTraits, Property, List, Str, \
Instance, Button, Int
from traitsui.api import View, Group, Item, EnumEditor,\
ListEditor, TextEditor
from tvtk.api import tvtk
from tvtk.common import get_tvtk_name
################################################################################
# Utility functions.
################################################################################
def get_tvtk_class_names():
"""Returns 4 lists:
1. A list of all the TVTK class names that are not abstract.
2. A list of the TVTK sources (have only outputs and no inputs)
3. A list of the TVTK filters (both inputs and outputs)
4. A list of the TVTK sinks (only inputs and no outputs)
"""
# Shut of VTK warnings for the time being.
o = vtk.vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
all = []
src = []
filter = []
sink = []
for name in dir(vtk):
if name.startswith('vtk') and not name.startswith('vtkQt'):
klass = getattr(vtk, name)
try:
c = klass()
except (TypeError, NotImplementedError):
continue
tvtk_name = get_tvtk_name(name)
all.append(tvtk_name)
has_input = has_output = False
if hasattr(klass, 'GetNumberOfInputPorts'):
if c.GetNumberOfInputPorts() > 0:
has_input = True
if hasattr(klass, 'GetNumberOfOutputPorts'):
if c.GetNumberOfOutputPorts() > 0:
has_output = True
if has_input:
if has_output:
filter.append(tvtk_name)
else:
sink.append(tvtk_name)
elif has_output:
src.append(tvtk_name)
o.SetGlobalWarningDisplay(w)
result = (all, src, filter, sink)
for x in result:
x.sort()
return result
def get_func_doc(func, fname):
"""Returns function documentation."""
if inspect.isfunction(func):
func_obj = func
elif inspect.ismethod(func):
func_obj = func.__func__
else:
return ''
args, vargs, vkw = inspect.getargs(func_obj.__code__)
defaults = func_obj.__defaults__
doc = fname + inspect.formatargspec(args, vargs, vkw, defaults)
d = inspect.getdoc(func)
if d is not None:
doc += '\n\n' + d + '\n\n'
return doc
def get_tvtk_class_doc(obj):
"""Return's the objects documentation."""
doc = obj.__doc__ + '\nTraits:\n-------------------\n\n'
ignore = ['trait_added', 'trait_modified']
for key, trait in obj.traits().items():
if key.startswith('_') or key.endswith('_') or key in ignore:
continue
doc += '\n%s: %s'%(key, trait.help)
doc += '\nMethods:\n----------------------\n\n'
traits = obj.trait_names()
for name in dir(obj):
if name in traits or name.startswith('_'):
continue
if name.find('trait') > -1 and name != 'update_traits':
continue
func = getattr(obj, name)
if callable(func):
doc += '\n' + get_func_doc(func, name)
return doc
# GLOBALS
TVTK_CLASSES, TVTK_SOURCES, TVTK_FILTERS, TVTK_SINKS = get_tvtk_class_names()
################################################################################
# `DocSearch` class.
################################################################################
class DocSearch(object):
"""A simple class that provides a method to search through class
documentation. This code is taken from mayavi-1.x's ivtk.VtkHelp
"""
# These are class attributes to prevent regenerating them everytime
# this class is instantiated.
VTK_CLASSES = []
VTK_CLASS_DOC = []
def __init__(self):
self.vtk_classes = self.VTK_CLASSES
self.vtk_c_doc = self.VTK_CLASS_DOC
if len(self.VTK_CLASSES) == 0:
self._setup_data()
def _setup_data(self):
self.vtk_classes = [x for x in dir(vtk) if x.startswith('vtk')]
n = len(self.vtk_classes)
# Store the class docs in the list given below.
self.vtk_c_doc = ['']*n
# setup the data.
for i in range(n):
c = self.vtk_classes[i]
try:
doc = getattr(vtk, c).__doc__.lower()
self.vtk_c_doc[i] = doc
except AttributeError:
pass
def search(self, word):
""" Search for word in class documentation and return matching
classes. This is also case insensitive. The searching
supports the 'and' and 'or' keywords that allow for fairly
complex searches. A space between words assumes that the two
words appear one after the other.
Parameters
----------
word -- name to search for.
"""
assert type(word) is str, \
"Sorry, passed argument, %s is not a string."%word
if len(word.strip()) == 0:
return []
lword = word.lower().strip()
tmp_list = lword.split()
wlist = []
prev = ""
for w in tmp_list:
z = w.strip()
if z in ('and', 'or'):
if prev and prev not in ('and', 'or'):
wlist.append(prev)
wlist.append(z)
prev = z
else:
if prev and prev not in ('and', 'or'):
prev = prev + ' ' + z
else:
prev = z
if prev in ('and', 'or'):
del wlist[-1]
elif prev:
wlist.append(prev)
ret = []
i = 0
vtk_classes = self.vtk_classes
vtk_c_doc = self.vtk_c_doc
N = len(vtk_classes)
while i < N:
stored_test = 0
do_test = ''
for w in wlist:
if w == 'and':
do_test = 'and'
elif w == 'or':
do_test = 'or'
else:
test = (vtk_c_doc[i].find(w) > -1)
if do_test == 'and':
stored_test = stored_test and test
elif do_test == 'or':
stored_test = stored_test or test
elif do_test == '':
stored_test = test
if stored_test:
ret.append(vtk_classes[i])
i = i + 1
return [get_tvtk_name(x) for x in ret]
_search_help_doc = """
Help on Searching
---------------------------------------
To search for a particular TVTK class, type in the 'class_name' text entry
widget. The class names are all case sensitive. You may also select
the class from the list of available class names at the top.
As you type you will see completion options in the completions
list, the instant a complete match is found the class documentation will
be show in the bottom.
You can also search the TVTK class documentation for strings (case
insensitive). The search option supports the 'and' and 'or' keywords to
do advanced searches. Press <Enter>/<Return> to perform the search.
The top 25 hits will show up in the completions, to view a particular
hit either select the choice from the available ones or type in the
name in the 'class_name' entry box. To clear the search string click
the 'Clear search' button or erase the search string manually.
"""
################################################################################
# `TVTKClassChooser` class.
################################################################################
class TVTKClassChooser(HasTraits):
# The selected object, is None if no valid class_name was made.
object = Property
# The TVTK class name to choose.
class_name = Str('', desc='class name of TVTK class (case sensitive)')
# The string to search for in the class docs -- the search supports
# 'and' and 'or' keywords.
search = Str('', desc='string to search in TVTK class documentation '\
'supports the "and" and "or" keywords. '\
'press <Enter> to start search. '\
'This is case insensitive.')
clear_search = Button
# The class documentation.
doc = Str(_search_help_doc)
# Completions for the choice of class.
completions = List(Str)
# List of available class names as strings.
available = List(TVTK_CLASSES)
########################################
# Private traits.
finder = Instance(DocSearch)
n_completion = Int(25)
########################################
# View related traits.
view = View(Group(Item(name='class_name',
editor=EnumEditor(name='available')),
Item(name='class_name',
has_focus=True
),
Item(name='search',
editor=TextEditor(enter_set=True,
auto_set=False)
),
Item(name='clear_search',
show_label=False),
Item('_'),
Item(name='completions',
editor=ListEditor(columns=3),
style='readonly'
),
Item(name='doc',
resizable=True,
label='Documentation',
style='custom')
),
id='tvtk_doc',
resizable=True,
width=800,
height=600,
title='TVTK class chooser',
buttons = ["OK", "Cancel"]
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(TVTKClassChooser, self).__init__(**traits)
self._orig_available = list(self.available)
######################################################################
# Non-public interface.
######################################################################
def _get_object(self):
o = None
if len(self.class_name) > 0:
try:
o = getattr(tvtk, self.class_name)()
except (AttributeError, TypeError):
pass
return o
def _class_name_changed(self, value):
av = self.available
comp = [x for x in av if x.startswith(value)]
self.completions = comp[:self.n_completion]
if len(comp) == 1 and value != comp[0]:
self.class_name = comp[0]
o = self.object
if o is not None:
self.doc = get_tvtk_class_doc(o)
else:
self.doc = _search_help_doc
def _finder_default(self):
return DocSearch()
def _clear_search_fired(self):
self.search = ''
def _search_changed(self, value):
if len(value) < 3:
self.available = self._orig_available
return
f = self.finder
result = f.search(str(value))
if len(result) == 0:
self.available = self._orig_available
elif len(result) == 1:
self.class_name = result[0]
else:
self.available = result
self.completions = result[:self.n_completion]
################################################################################
# `TVTKSourceChooser` class.
################################################################################
class TVTKSourceChooser(TVTKClassChooser):
available = List(TVTK_SOURCES)
################################################################################
# `TVTKFilterChooser` class.
################################################################################
class TVTKFilterChooser(TVTKClassChooser):
available = List(TVTK_FILTERS)
################################################################################
# `TVTKSinkChooser` class.
################################################################################
class TVTKSinkChooser(TVTKClassChooser):
available = List(TVTK_SINKS)
def main():
"""Pops up a class chooser which doubles as a nice help search
documentation tool.
"""
s = TVTKClassChooser()
s.configure_traits()
if __name__ == '__main__':
main()
| |
"""Support for MQTT message handling."""
from __future__ import annotations
import asyncio
from functools import lru_cache, partial, wraps
import inspect
from itertools import groupby
import logging
from operator import attrgetter
import ssl
import time
from typing import Any, Awaitable, Callable, Union, cast
import uuid
import attr
import certifi
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import websocket_api
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_DISCOVERY,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import (
CoreState,
Event,
HassJob,
HomeAssistant,
ServiceCall,
callback,
)
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.helpers import config_validation as cv, event, template
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.typing import ConfigType, ServiceDataType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.logging import catch_log_exception
# Loading the config flow file will register the flow
from . import debug_info, discovery
from .const import (
ATTR_PAYLOAD,
ATTR_QOS,
ATTR_RETAIN,
ATTR_TOPIC,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_WILL_MESSAGE,
DATA_MQTT_CONFIG,
DEFAULT_BIRTH,
DEFAULT_DISCOVERY,
DEFAULT_PREFIX,
DEFAULT_QOS,
DEFAULT_RETAIN,
DEFAULT_WILL,
DOMAIN,
MQTT_CONNECTED,
MQTT_DISCONNECTED,
PROTOCOL_311,
)
from .discovery import LAST_DISCOVERY
from .models import (
AsyncMessageCallbackType,
MessageCallbackType,
PublishMessage,
PublishPayloadType,
ReceiveMessage,
ReceivePayloadType,
)
from .util import _VALID_QOS_SCHEMA, valid_publish_topic, valid_subscribe_topic
_LOGGER = logging.getLogger(__name__)
DATA_MQTT = "mqtt"
SERVICE_PUBLISH = "publish"
SERVICE_DUMP = "dump"
CONF_DISCOVERY_PREFIX = "discovery_prefix"
CONF_KEEPALIVE = "keepalive"
CONF_CERTIFICATE = "certificate"
CONF_CLIENT_KEY = "client_key"
CONF_CLIENT_CERT = "client_cert"
CONF_TLS_INSECURE = "tls_insecure"
CONF_TLS_VERSION = "tls_version"
CONF_COMMAND_TOPIC = "command_topic"
CONF_TOPIC = "topic"
PROTOCOL_31 = "3.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_TLS_PROTOCOL = "auto"
ATTR_PAYLOAD_TEMPLATE = "payload_template"
MAX_RECONNECT_WAIT = 300 # seconds
CONNECTION_SUCCESS = "connection_success"
CONNECTION_FAILED = "connection_failed"
CONNECTION_FAILED_RECOVERABLE = "connection_failed_recoverable"
DISCOVERY_COOLDOWN = 2
TIMEOUT_ACK = 10
PLATFORMS = [
"alarm_control_panel",
"binary_sensor",
"camera",
"climate",
"cover",
"fan",
"humidifier",
"light",
"lock",
"number",
"scene",
"sensor",
"switch",
"vacuum",
]
CLIENT_KEY_AUTH_MSG = (
"client_key and client_cert must both be present in "
"the MQTT broker configuration"
)
MQTT_WILL_BIRTH_SCHEMA = vol.Schema(
{
vol.Inclusive(ATTR_TOPIC, "topic_payload"): valid_publish_topic,
vol.Inclusive(ATTR_PAYLOAD, "topic_payload"): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
def embedded_broker_deprecated(value):
"""Warn user that embedded MQTT broker is deprecated."""
_LOGGER.warning(
"The embedded MQTT broker has been deprecated and will stop working"
"after June 5th, 2019. Use an external broker instead. For"
"instructions, see https://www.home-assistant.io/docs/mqtt/broker"
)
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_TLS_VERSION),
vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any("auto", cv.isfile),
vol.Inclusive(
CONF_CLIENT_KEY, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Inclusive(
CONF_CLIENT_CERT, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(
CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL
): vol.Any("auto", "1.0", "1.1", "1.2"),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
vol.Optional(
CONF_WILL_MESSAGE, default=DEFAULT_WILL
): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(
CONF_BIRTH_MESSAGE, default=DEFAULT_BIRTH
): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
# discovery_prefix must be a valid publish topic because if no
# state topic is specified, it will be created with the given prefix.
vol.Optional(
CONF_DISCOVERY_PREFIX, default=DEFAULT_PREFIX
): valid_publish_topic,
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_BASE = {vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA}
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
}
)
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
SubscribePayloadType = Union[str, bytes] # Only bytes if encoding is None
def _build_publish_data(topic: Any, qos: int, retain: bool) -> ServiceDataType:
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
@bind_hass
def publish(hass: HomeAssistant, topic, payload, qos=None, retain=None) -> None:
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
@bind_hass
def async_publish(
hass: HomeAssistant, topic: Any, payload, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
@bind_hass
def publish_template(
hass: HomeAssistant, topic, payload_template, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic."""
hass.add_job(async_publish_template, hass, topic, payload_template, qos, retain)
@bind_hass
def async_publish_template(
hass: HomeAssistant, topic, payload_template, qos=None, retain=None
) -> None:
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
AsyncDeprecatedMessageCallbackType = Callable[
[str, ReceivePayloadType, int], Awaitable[None]
]
DeprecatedMessageCallbackType = Callable[[str, ReceivePayloadType, int], None]
def wrap_msg_callback(
msg_callback: AsyncDeprecatedMessageCallbackType | DeprecatedMessageCallbackType,
) -> AsyncMessageCallbackType | MessageCallbackType:
"""Wrap an MQTT message callback to support deprecated signature."""
# Check for partials to properly determine if coroutine function
check_func = msg_callback
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func: AsyncMessageCallbackType | MessageCallbackType
if asyncio.iscoroutinefunction(check_func):
@wraps(msg_callback)
async def async_wrapper(msg: ReceiveMessage) -> None:
"""Call with deprecated signature."""
await cast(AsyncDeprecatedMessageCallbackType, msg_callback)(
msg.topic, msg.payload, msg.qos
)
wrapper_func = async_wrapper
else:
@wraps(msg_callback)
def wrapper(msg: ReceiveMessage) -> None:
"""Call with deprecated signature."""
msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = wrapper
return wrapper_func
@bind_hass
async def async_subscribe(
hass: HomeAssistant,
topic: str,
msg_callback: AsyncMessageCallbackType
| MessageCallbackType
| DeprecatedMessageCallbackType
| AsyncDeprecatedMessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str | None = "utf-8",
):
"""Subscribe to an MQTT topic.
Call the return value to unsubscribe.
"""
# Count callback parameters which don't have a default value
non_default = 0
if msg_callback:
non_default = sum(
p.default == inspect.Parameter.empty
for _, p in inspect.signature(msg_callback).parameters.items()
)
wrapped_msg_callback = msg_callback
# If we have 3 parameters with no default value, wrap the callback
if non_default == 3:
module = inspect.getmodule(msg_callback)
_LOGGER.warning(
"Signature of MQTT msg_callback '%s.%s' is deprecated",
module.__name__ if module else "<unknown>",
msg_callback.__name__,
)
wrapped_msg_callback = wrap_msg_callback(
cast(DeprecatedMessageCallbackType, msg_callback)
)
async_remove = await hass.data[DATA_MQTT].async_subscribe(
topic,
catch_log_exception(
wrapped_msg_callback,
lambda msg: (
f"Exception in {msg_callback.__name__} when handling msg on "
f"'{msg.topic}': '{msg.payload}'"
),
),
qos,
encoding,
)
return async_remove
@bind_hass
def subscribe(
hass: HomeAssistant,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str = "utf-8",
) -> Callable[[], None]:
"""Subscribe to an MQTT topic."""
async_remove = asyncio.run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
async def _async_setup_discovery(
hass: HomeAssistant, conf: ConfigType, config_entry
) -> None:
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
await discovery.async_start(hass, conf[CONF_DISCOVERY_PREFIX], config_entry)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Start the MQTT protocol service."""
conf: ConfigType | None = config.get(DOMAIN)
websocket_api.async_register_command(hass, websocket_subscribe)
websocket_api.async_register_command(hass, websocket_remove_device)
websocket_api.async_register_command(hass, websocket_mqtt_info)
if conf is None:
# If we have a config entry, setup is done by that config entry.
# If there is no config entry, this should fail.
return bool(hass.config_entries.async_entries(DOMAIN))
conf = dict(conf)
hass.data[DATA_MQTT_CONFIG] = conf
# Only import if we haven't before.
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
def _merge_config(entry, conf):
"""Merge configuration.yaml config with config entry."""
return {**conf, **entry.data}
async def async_setup_entry(hass, entry):
"""Load a config entry."""
conf = hass.data.get(DATA_MQTT_CONFIG)
# Config entry was created because user had configuration.yaml entry
# They removed that, so remove entry.
if conf is None and entry.source == config_entries.SOURCE_IMPORT:
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
# If user didn't have configuration.yaml config, generate defaults
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: dict(entry.data)})[DOMAIN]
elif any(key in conf for key in entry.data):
shared_keys = conf.keys() & entry.data.keys()
override = {k: entry.data[k] for k in shared_keys}
if CONF_PASSWORD in override:
override[CONF_PASSWORD] = "********"
_LOGGER.info(
"Data in your configuration entry is going to override your "
"configuration.yaml: %s",
override,
)
conf = _merge_config(entry, conf)
hass.data[DATA_MQTT] = MQTT(
hass,
entry,
conf,
)
await hass.data[DATA_MQTT].async_connect()
async def async_stop_mqtt(_event: Event):
"""Stop MQTT component."""
await hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
async def async_publish_service(call: ServiceCall):
"""Handle MQTT publish service calls."""
msg_topic: str = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos: int = call.data[ATTR_QOS]
retain: bool = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = template.Template(payload_template, hass).async_render(
parse_result=False
)
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to %s: rendering payload template of "
"%s failed because %s",
msg_topic,
payload_template,
exc,
)
return
await hass.data[DATA_MQTT].async_publish(msg_topic, payload, qos, retain)
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service, schema=MQTT_PUBLISH_SCHEMA
)
async def async_dump_service(call: ServiceCall):
"""Handle MQTT dump service calls."""
messages = []
@callback
def collect_msg(msg):
messages.append((msg.topic, msg.payload.replace("\n", "")))
unsub = await async_subscribe(hass, call.data["topic"], collect_msg)
def write_dump():
with open(hass.config.path("mqtt_dump.txt"), "wt", encoding="utf8") as fp:
for msg in messages:
fp.write(",".join(msg) + "\n")
async def finish_dump(_):
"""Write dump to file."""
unsub()
await hass.async_add_executor_job(write_dump)
event.async_call_later(hass, call.data["duration"], finish_dump)
hass.services.async_register(
DOMAIN,
SERVICE_DUMP,
async_dump_service,
schema=vol.Schema(
{
vol.Required("topic"): valid_subscribe_topic,
vol.Optional("duration", default=5): int,
}
),
)
if conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, conf, entry)
return True
@attr.s(slots=True, frozen=True)
class Subscription:
"""Class to hold data about an active subscription."""
topic: str = attr.ib()
matcher: Any = attr.ib()
job: HassJob = attr.ib()
qos: int = attr.ib(default=0)
encoding: str | None = attr.ib(default="utf-8")
class MQTT:
"""Home Assistant MQTT client."""
def __init__(
self,
hass: HomeAssistant,
config_entry,
conf,
) -> None:
"""Initialize Home Assistant MQTT client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
self.hass = hass
self.config_entry = config_entry
self.conf = conf
self.subscriptions: list[Subscription] = []
self.connected = False
self._ha_started = asyncio.Event()
self._last_subscribe = time.time()
self._mqttc: mqtt.Client = None
self._paho_lock = asyncio.Lock()
self._pending_operations: dict[str, asyncio.Event] = {}
if self.hass.state == CoreState.running:
self._ha_started.set()
else:
@callback
def ha_started(_):
self._ha_started.set()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, ha_started)
self.init_client()
self.config_entry.add_update_listener(self.async_config_entry_updated)
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is config entry options changing.
"""
self = hass.data[DATA_MQTT]
conf = hass.data.get(DATA_MQTT_CONFIG)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: dict(entry.data)})[DOMAIN]
self.conf = _merge_config(entry, conf)
await self.async_disconnect()
self.init_client()
await self.async_connect()
await discovery.async_stop(hass)
if self.conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, self.conf, entry)
def init_client(self):
"""Initialize paho client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
if self.conf[CONF_PROTOCOL] == PROTOCOL_31:
proto: int = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
client_id = self.conf.get(CONF_CLIENT_ID)
if client_id is None:
# PAHO MQTT relies on the MQTT server to generate random client IDs.
# However, that feature is not mandatory so we generate our own.
client_id = mqtt.base62(uuid.uuid4().int, padding=22)
self._mqttc = mqtt.Client(client_id, protocol=proto)
# Enable logging
self._mqttc.enable_logger()
username = self.conf.get(CONF_USERNAME)
password = self.conf.get(CONF_PASSWORD)
if username is not None:
self._mqttc.username_pw_set(username, password)
certificate = self.conf.get(CONF_CERTIFICATE)
if certificate == "auto":
certificate = certifi.where()
client_key = self.conf.get(CONF_CLIENT_KEY)
client_cert = self.conf.get(CONF_CLIENT_CERT)
tls_insecure = self.conf.get(CONF_TLS_INSECURE)
if certificate is not None:
self._mqttc.tls_set(
certificate,
certfile=client_cert,
keyfile=client_key,
tls_version=ssl.PROTOCOL_TLS,
)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
self._mqttc.on_publish = self._mqtt_on_callback
self._mqttc.on_subscribe = self._mqtt_on_callback
self._mqttc.on_unsubscribe = self._mqtt_on_callback
if (
CONF_WILL_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_WILL_MESSAGE]
):
will_message = PublishMessage(**self.conf[CONF_WILL_MESSAGE])
else:
will_message = None
if will_message is not None:
self._mqttc.will_set(
topic=will_message.topic,
payload=will_message.payload,
qos=will_message.qos,
retain=will_message.retain,
)
async def async_publish(
self, topic: str, payload: PublishPayloadType, qos: int, retain: bool
) -> None:
"""Publish a MQTT message."""
async with self._paho_lock:
msg_info = await self.hass.async_add_executor_job(
self._mqttc.publish, topic, payload, qos, retain
)
_LOGGER.debug(
"Transmitting message on %s: '%s', mid: %s",
topic,
payload,
msg_info.mid,
)
_raise_on_error(msg_info.rc)
await self._wait_for_mid(msg_info.mid)
async def async_connect(self) -> None:
"""Connect to the host. Does not process messages yet."""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
result: int | None = None
try:
result = await self.hass.async_add_executor_job(
self._mqttc.connect,
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
self.conf[CONF_KEEPALIVE],
)
except OSError as err:
_LOGGER.error("Failed to connect to MQTT server due to exception: %s", err)
if result is not None and result != 0:
_LOGGER.error(
"Failed to connect to MQTT server: %s", mqtt.error_string(result)
)
self._mqttc.loop_start()
async def async_disconnect(self):
"""Stop the MQTT client."""
def stop():
"""Stop the MQTT client."""
# Do not disconnect, we want the broker to always publish will
self._mqttc.loop_stop()
await self.hass.async_add_executor_job(stop)
async def async_subscribe(
self,
topic: str,
msg_callback: MessageCallbackType,
qos: int,
encoding: str | None = None,
) -> Callable[[], None]:
"""Set up a subscription to a topic with the provided qos.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("Topic needs to be a string!")
subscription = Subscription(
topic, _matcher_for_topic(topic), HassJob(msg_callback), qos, encoding
)
self.subscriptions.append(subscription)
self._matching_subscriptions.cache_clear()
# Only subscribe if currently connected.
if self.connected:
self._last_subscribe = time.time()
await self._async_perform_subscription(topic, qos)
@callback
def async_remove() -> None:
"""Remove subscription."""
if subscription not in self.subscriptions:
raise HomeAssistantError("Can't remove subscription twice")
self.subscriptions.remove(subscription)
self._matching_subscriptions.cache_clear()
if any(other.topic == topic for other in self.subscriptions):
# Other subscriptions on topic remaining - don't unsubscribe.
return
# Only unsubscribe if currently connected.
if self.connected:
self.hass.async_create_task(self._async_unsubscribe(topic))
return async_remove
async def _async_unsubscribe(self, topic: str) -> None:
"""Unsubscribe from a topic.
This method is a coroutine.
"""
async with self._paho_lock:
result: int | None = None
result, mid = await self.hass.async_add_executor_job(
self._mqttc.unsubscribe, topic
)
_LOGGER.debug("Unsubscribing from %s, mid: %s", topic, mid)
_raise_on_error(result)
await self._wait_for_mid(mid)
async def _async_perform_subscription(self, topic: str, qos: int) -> None:
"""Perform a paho-mqtt subscription."""
async with self._paho_lock:
result: int | None = None
result, mid = await self.hass.async_add_executor_job(
self._mqttc.subscribe, topic, qos
)
_LOGGER.debug("Subscribing to %s, mid: %s", topic, mid)
_raise_on_error(result)
await self._wait_for_mid(mid)
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code: int) -> None:
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error(
"Unable to connect to the MQTT broker: %s",
mqtt.connack_string(result_code),
)
return
self.connected = True
dispatcher_send(self.hass, MQTT_CONNECTED)
_LOGGER.info(
"Connected to MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
# Group subscriptions to only re-subscribe once for each topic.
keyfunc = attrgetter("topic")
for topic, subs in groupby(sorted(self.subscriptions, key=keyfunc), keyfunc):
# Re-subscribe with the highest requested qos
max_qos = max(subscription.qos for subscription in subs)
self.hass.add_job(self._async_perform_subscription, topic, max_qos)
if (
CONF_BIRTH_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_BIRTH_MESSAGE]
):
async def publish_birth_message(birth_message):
await self._ha_started.wait() # Wait for Home Assistant to start
await self._discovery_cooldown() # Wait for MQTT discovery to cool down
await self.async_publish(
topic=birth_message.topic,
payload=birth_message.payload,
qos=birth_message.qos,
retain=birth_message.retain,
)
birth_message = PublishMessage(**self.conf[CONF_BIRTH_MESSAGE])
asyncio.run_coroutine_threadsafe(
publish_birth_message(birth_message), self.hass.loop
)
def _mqtt_on_message(self, _mqttc, _userdata, msg) -> None:
"""Message received callback."""
self.hass.add_job(self._mqtt_handle_message, msg)
@lru_cache(2048)
def _matching_subscriptions(self, topic):
subscriptions = []
for subscription in self.subscriptions:
if subscription.matcher(topic):
subscriptions.append(subscription)
return subscriptions
@callback
def _mqtt_handle_message(self, msg) -> None:
_LOGGER.debug(
"Received message on %s%s: %s",
msg.topic,
" (retained)" if msg.retain else "",
msg.payload[0:8192],
)
timestamp = dt_util.utcnow()
subscriptions = self._matching_subscriptions(msg.topic)
for subscription in subscriptions:
payload: SubscribePayloadType = msg.payload
if subscription.encoding is not None:
try:
payload = msg.payload.decode(subscription.encoding)
except (AttributeError, UnicodeDecodeError):
_LOGGER.warning(
"Can't decode payload %s on %s with encoding %s (for %s)",
msg.payload[0:8192],
msg.topic,
subscription.encoding,
subscription.job,
)
continue
self.hass.async_run_hass_job(
subscription.job,
ReceiveMessage(
msg.topic,
payload,
msg.qos,
msg.retain,
subscription.topic,
timestamp,
),
)
def _mqtt_on_callback(self, _mqttc, _userdata, mid, _granted_qos=None) -> None:
"""Publish / Subscribe / Unsubscribe callback."""
self.hass.add_job(self._mqtt_handle_mid, mid)
@callback
def _mqtt_handle_mid(self, mid) -> None:
# Create the mid event if not created, either _mqtt_handle_mid or _wait_for_mid
# may be executed first.
if mid not in self._pending_operations:
self._pending_operations[mid] = asyncio.Event()
self._pending_operations[mid].set()
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code: int) -> None:
"""Disconnected callback."""
self.connected = False
dispatcher_send(self.hass, MQTT_DISCONNECTED)
_LOGGER.warning(
"Disconnected from MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
async def _wait_for_mid(self, mid):
"""Wait for ACK from broker."""
# Create the mid event if not created, either _mqtt_handle_mid or _wait_for_mid
# may be executed first.
if mid not in self._pending_operations:
self._pending_operations[mid] = asyncio.Event()
try:
await asyncio.wait_for(self._pending_operations[mid].wait(), TIMEOUT_ACK)
except asyncio.TimeoutError:
_LOGGER.warning(
"No ACK from MQTT server in %s seconds (mid: %s)", TIMEOUT_ACK, mid
)
finally:
del self._pending_operations[mid]
async def _discovery_cooldown(self):
now = time.time()
# Reset discovery and subscribe cooldowns
self.hass.data[LAST_DISCOVERY] = now
self._last_subscribe = now
last_discovery = self.hass.data[LAST_DISCOVERY]
last_subscribe = self._last_subscribe
wait_until = max(
last_discovery + DISCOVERY_COOLDOWN, last_subscribe + DISCOVERY_COOLDOWN
)
while now < wait_until:
await asyncio.sleep(wait_until - now)
now = time.time()
last_discovery = self.hass.data[LAST_DISCOVERY]
last_subscribe = self._last_subscribe
wait_until = max(
last_discovery + DISCOVERY_COOLDOWN, last_subscribe + DISCOVERY_COOLDOWN
)
def _raise_on_error(result_code: int | None) -> None:
"""Raise error if error result."""
# pylint: disable=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code is not None and result_code != 0:
raise HomeAssistantError(
f"Error talking to MQTT: {mqtt.error_string(result_code)}"
)
def _matcher_for_topic(subscription: str) -> Any:
# pylint: disable=import-outside-toplevel
from paho.mqtt.matcher import MQTTMatcher
matcher = MQTTMatcher()
matcher[subscription] = True
return lambda topic: next(matcher.iter_match(topic), False)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/debug_info", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_mqtt_info(hass, connection, msg):
"""Get MQTT debug info for device."""
device_id = msg["device_id"]
mqtt_info = await debug_info.info_for_device(hass, device_id)
connection.send_result(msg["id"], mqtt_info)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/remove", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_remove_device(hass, connection, msg):
"""Delete device."""
device_id = msg["device_id"]
dev_registry = await hass.helpers.device_registry.async_get_registry()
device = dev_registry.async_get(device_id)
if not device:
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Device not found"
)
return
for config_entry in device.config_entries:
config_entry = hass.config_entries.async_get_entry(config_entry)
# Only delete the device if it belongs to an MQTT device entry
if config_entry.domain == DOMAIN:
dev_registry.async_remove_device(device_id)
connection.send_message(websocket_api.result_message(msg["id"]))
return
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Non MQTT device"
)
@websocket_api.websocket_command(
{
vol.Required("type"): "mqtt/subscribe",
vol.Required("topic"): valid_subscribe_topic,
}
)
@websocket_api.async_response
async def websocket_subscribe(hass, connection, msg):
"""Subscribe to a MQTT topic."""
if not connection.user.is_admin:
raise Unauthorized
async def forward_messages(mqttmsg: ReceiveMessage):
"""Forward events to websocket."""
connection.send_message(
websocket_api.event_message(
msg["id"],
{
"topic": mqttmsg.topic,
"payload": mqttmsg.payload,
"qos": mqttmsg.qos,
"retain": mqttmsg.retain,
},
)
)
connection.subscriptions[msg["id"]] = await async_subscribe(
hass, msg["topic"], forward_messages
)
connection.send_message(websocket_api.result_message(msg["id"]))
ConnectionStatusCallback = Callable[[bool], None]
@callback
def async_subscribe_connection_status(
hass: HomeAssistant, connection_status_callback: ConnectionStatusCallback
) -> Callable[[], None]:
"""Subscribe to MQTT connection changes."""
connection_status_callback_job = HassJob(connection_status_callback)
async def connected():
task = hass.async_run_hass_job(connection_status_callback_job, True)
if task:
await task
async def disconnected():
task = hass.async_run_hass_job(connection_status_callback_job, False)
if task:
await task
subscriptions = {
"connect": async_dispatcher_connect(hass, MQTT_CONNECTED, connected),
"disconnect": async_dispatcher_connect(hass, MQTT_DISCONNECTED, disconnected),
}
@callback
def unsubscribe():
subscriptions["connect"]()
subscriptions["disconnect"]()
return unsubscribe
def is_connected(hass: HomeAssistant) -> bool:
"""Return if MQTT client is connected."""
return hass.data[DATA_MQTT].connected
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import log as logging
from nova.network.quantum import manager as quantum_manager
from nova.network.quantum import melange_connection
from nova import test
from nova import utils
from nova.network import manager
LOG = logging.getLogger('nova.tests.quantum_network')
# this class can be used for unit functional/testing on nova,
# as it does not actually make remote calls to the Quantum service
class FakeQuantumClientConnection(object):
def __init__(self):
self.nets = {}
def get_networks_for_tenant(self, tenant_id):
net_ids = []
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
net_ids.append(net_id)
return {'networks': net_ids}
def create_network(self, tenant_id, network_name, **kwargs):
uuid = str(utils.gen_uuid())
self.nets[uuid] = {'net-name': network_name,
'tenant-id': tenant_id,
'ports': {}}
return uuid
def delete_network(self, tenant_id, net_id):
if self.nets[net_id]['tenant-id'] == tenant_id:
del self.nets[net_id]
def network_exists(self, tenant_id, net_id):
try:
return self.nets[net_id]['tenant-id'] == tenant_id
except KeyError:
return False
def get_network_name(self, tenant_id, net_id):
return self.nets[net_id]['net-name']
def _confirm_not_attached(self, interface_id):
for n in self.nets.values():
for p in n['ports'].values():
if p['attachment-id'] == interface_id:
raise Exception(_("interface '%s' is already attached" %
interface_id))
def create_and_attach_port(self, tenant_id, net_id, interface_id,
**kwargs):
if not self.network_exists(tenant_id, net_id):
raise Exception(
_("network %(net_id)s does not exist for tenant %(tenant_id)"
% locals()))
self._confirm_not_attached(interface_id)
uuid = str(utils.gen_uuid())
self.nets[net_id]['ports'][uuid] = \
{"port-state": "ACTIVE",
"attachment-id": interface_id}
def detach_and_delete_port(self, tenant_id, net_id, port_id):
if not self.network_exists(tenant_id, net_id):
raise exception.NotFound(
_("network %(net_id)s does not exist "
"for tenant %(tenant_id)s" % locals()))
del self.nets[net_id]['ports'][port_id]
def get_port_by_attachment(self, tenant_id, net_id, attachment_id):
for nid, n in self.nets.items():
if nid == net_id and n['tenant-id'] == tenant_id:
for port_id, p in n['ports'].items():
if p['attachment-id'] == attachment_id:
return port_id
return None
def get_attached_ports(self, tenant_id, net_id):
ports = []
for nid, n in self.nets.items():
if nid == net_id and n['tenant-id'] == tenant_id:
for port_id, p in n['ports'].items():
ports.append({'port-id': port_id,
'attachment': p['attachment-id']})
return ports
def get_networks(self, tenant_id):
nets = []
for nid, n in self.nets.items():
if n['tenant-id'] == tenant_id:
x = {'id': nid}
nets.append(x)
return {'networks': nets}
networks = [{'label': 'project1-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:1db8::/64',
'gateway_v6': '2001:1db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'vpn_public_address': None,
'project_id': 'fake_project1',
'priority': 1},
{'label': 'project2-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:1db9::/64',
'gateway_v6': '2001:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'project_id': 'fake_project2',
'priority': 1},
{'label': "public",
'injected': False,
'multi_host': False,
'cidr': '10.0.0.0/24',
'cidr_v6': '2001:1dba::/64',
'gateway_v6': '2001:1dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '10.0.0.1',
'broadcast': '10.0.0.255',
'dns1': '10.0.0.1',
'dns2': '10.0.0.2',
'vlan': None,
'host': None,
'project_id': None,
'priority': 0},
{'label': "project2-net2",
'injected': False,
'multi_host': False,
'cidr': '9.0.0.0/24',
'cidr_v6': '2001:1dbb::/64',
'gateway_v6': '2001:1dbb::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '9.0.0.1',
'broadcast': '9.0.0.255',
'dns1': '9.0.0.1',
'dns2': '9.0.0.2',
'vlan': None,
'host': None,
'project_id': "fake_project2",
'priority': 2}]
# this is a base class to be used by all other Quantum Test classes
class QuantumNovaTestCase(test.TestCase):
def setUp(self):
super(QuantumNovaTestCase, self).setUp()
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=FakeQuantumClientConnection())
# Tests seem to create some networks by default, which
# we don't want. So we delete them.
ctx = context.RequestContext('user1', 'fake_project1').elevated()
for n in db.network_get_all(ctx):
db.network_delete_safe(ctx, n['id'])
# Other unit tests (e.g., test_compute.py) have a nasty
# habit of of creating fixed IPs and not cleaning up, which
# can confuse these tests, so we remove all existing fixed
# ips before starting.
session = get_session()
result = session.query(models.FixedIp).all()
with session.begin():
for fip_ref in result:
session.delete(fip_ref)
def _create_network(self, n):
ctx = context.RequestContext('user1', n['project_id'])
nwks = self.net_man.create_networks(
ctx,
label=n['label'], cidr=n['cidr'],
multi_host=n['multi_host'],
num_networks=1, network_size=256,
cidr_v6=n['cidr_v6'],
gateway=n['gateway'],
gateway_v6=n['gateway_v6'], bridge=None,
bridge_interface=None, dns1=n['dns1'],
dns2=n['dns2'],
project_id=n['project_id'],
priority=n['priority'])
n['uuid'] = nwks[0]['uuid']
class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
self._create_network(n)
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
db_nets = db.network_get_all(ctx.elevated())
for x in db_nets:
if x['label'] == n['label']:
n['uuid'] = x['uuid']
self.net_man.delete_network(ctx, None, n['uuid'])
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
def func(arg1, arg2):
pass
def func2(arg1, arg2, arg3):
pass
def func1(arg1):
pass
self.net_man.driver.update_dhcp_hostfile_with_text = func
self.net_man.driver.restart_dhcp = func2
self.net_man.driver.kill_dhcp = func1
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
self.assertTrue(
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_allocate_and_deallocate_instance_dynamic(self):
self._create_nets()
project_id = "fake_project2"
ctx = context.RequestContext('user1', project_id)
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in
net_ids['networks']]
self.net_man.validate_networks(ctx, requested_networks)
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
def func(arg1, arg2):
pass
def func1(arg1):
pass
def func2(arg1, arg2, arg3):
pass
self.net_man.driver.update_dhcp_hostfile_with_text = func
self.net_man.driver.restart_dhcp = func2
self.net_man.driver.kill_dhcp = func1
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
nw_info[1][0]['cidr'].startswith("9."))
self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
nw_info[1][1]['ips'][0]['ip'].startswith("9."))
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_validate_bad_network(self):
ctx = context.RequestContext('user1', 'fake_project1')
self.assertRaises(exception.NetworkNotFound,
self.net_man.validate_networks, ctx, [("", None)])
class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
def test_local_mac_address_creation(self):
self.flags(use_melange_mac_generation=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(manager.FlatManager, "generate_mac_address",
lambda x: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids['networks']]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
def test_melange_mac_address_creation(self):
self.flags(use_melange_mac_generation=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids['networks']]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
def test_port_securty(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids['networks']]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure we get the appropriate mac set in allowed_address_pairs
# if port security is enabled.
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(pairs[0]['mac_address'] == fake_mac)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
self.net_man.q_conn.create_and_attach_port = \
_instrumented_create_and_attach_port
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
def test_port_securty_negative(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids['networks']]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure no pairs are passed in if port security is turned off
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(len(pairs) == 0)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
self.net_man.q_conn.create_and_attach_port = \
_instrumented_create_and_attach_port
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0][1]['mac'], fake_mac)
| |
"""WebsocketProtocol76 from tornado 3.2.2 for tornado >= 4.0
The contents of this file are Copyright (c) Tornado
Used under the Apache 2.0 license
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import functools
import hashlib
import struct
import time
import tornado.escape
import tornado.web
from tornado.log import gen_log, app_log
from tornado.util import bytes_type, unicode_type
from tornado.websocket import WebSocketHandler, WebSocketProtocol13
class AllowDraftWebSocketHandler(WebSocketHandler):
"""Restore Draft76 support for tornado 4
Remove when we can run tests without phantomjs + qt4
"""
# get is unmodified except between the BEGIN/END PATCH lines
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
self.finish("Can \"Upgrade\" only to \"WebSocket\".")
return
# Connection header should be upgrade. Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(
lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
self.finish("\"Connection\" must be \"Upgrade\".")
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
self.finish("Cross origin websockets not allowed")
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
self.ws_connection = WebSocketProtocol13(self)
self.ws_connection.accept_connection()
#--------------- BEGIN PATCH ----------------
elif (self.allow_draft76() and
"Sec-WebSocket-Version" not in self.request.headers):
self.ws_connection = WebSocketProtocol76(self)
self.ws_connection.accept_connection()
#--------------- END PATCH ----------------
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.stream.close()
# 3.2 methods removed in 4.0:
def allow_draft76(self):
"""Using this class allows draft76 connections by default"""
return True
def get_websocket_scheme(self):
"""Return the url scheme used for this request, either "ws" or "wss".
This is normally decided by HTTPServer, but applications
may wish to override this if they are using an SSL proxy
that does not provide the X-Scheme header as understood
by HTTPServer.
Note that this is only used by the draft76 protocol.
"""
return "wss" if self.request.protocol == "https" else "ws"
# No modifications from tornado-3.2.2 below this line
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions properly and closes this WebSocket if an exception
is uncaught.
"""
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
return wrapper
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class WebSocketProtocol76(WebSocketProtocol):
"""Implementation of the WebSockets protocol, version hixie-76.
This class provides basic functionality to process WebSockets requests as
specified in
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
"""
def __init__(self, handler):
WebSocketProtocol.__init__(self, handler)
self.challenge = None
self._waiting = None
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
gen_log.debug("Malformed WebSocket request received")
self._abort()
return
scheme = self.handler.get_websocket_scheme()
# draft76 only allows a single subprotocol
subprotocol_header = ''
subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None)
if subprotocol:
selected = self.handler.select_subprotocol([subprotocol])
if selected:
assert selected == subprotocol
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
# Write the initial headers before attempting to read the challenge.
# This is necessary when using proxies (such as HAProxy), which
# need to see the Upgrade headers before passing through the
# non-HTTP traffic that follows.
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: TornadoServer/%(version)s\r\n"
"Sec-WebSocket-Origin: %(origin)s\r\n"
"Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n"
"%(subprotocol)s"
"\r\n" % (dict(
version=tornado.version,
origin=self.request.headers["Origin"],
scheme=scheme,
host=self.request.host,
uri=self.request.uri,
subprotocol=subprotocol_header))))
self.stream.read_bytes(8, self._handle_challenge)
def challenge_response(self, challenge):
"""Generates the challenge response that's needed in the handshake
The challenge parameter should be the raw bytes as sent from the
client.
"""
key_1 = self.request.headers.get("Sec-Websocket-Key1")
key_2 = self.request.headers.get("Sec-Websocket-Key2")
try:
part_1 = self._calculate_part(key_1)
part_2 = self._calculate_part(key_2)
except ValueError:
raise ValueError("Invalid Keys/Challenge")
return self._generate_challenge_response(part_1, part_2, challenge)
def _handle_challenge(self, challenge):
try:
challenge_response = self.challenge_response(challenge)
except ValueError:
gen_log.debug("Malformed key data in WebSocket request")
self._abort()
return
self._write_response(challenge_response)
def _write_response(self, challenge):
self.stream.write(challenge)
self.async_callback(self.handler.open)(
*self.handler.open_args, **self.handler.open_kwargs)
self._receive_message()
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Origin", "Host", "Sec-Websocket-Key1",
"Sec-Websocket-Key2")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
def _calculate_part(self, key):
"""Processes the key headers and calculates their key value.
Raises ValueError when feed invalid key."""
# pyflakes complains about variable reuse if both of these lines use
# 'c'
number = int(''.join(c for c in key if c.isdigit()))
spaces = len([c2 for c2 in key if c2.isspace()])
try:
key_number = number // spaces
except (ValueError, ZeroDivisionError):
raise ValueError
return struct.pack(">I", key_number)
def _generate_challenge_response(self, part_1, part_2, part_3):
m = hashlib.md5()
m.update(part_1)
m.update(part_2)
m.update(part_3)
return m.digest()
def _receive_message(self):
self.stream.read_bytes(1, self._on_frame_type)
def _on_frame_type(self, byte):
frame_type = ord(byte)
if frame_type == 0x00:
self.stream.read_until(b"\xff", self._on_end_delimiter)
elif frame_type == 0xff:
self.stream.read_bytes(1, self._on_length_indicator)
else:
self._abort()
def _on_end_delimiter(self, frame):
if not self.client_terminated:
self.async_callback(self.handler.on_message)(
frame[:-1].decode("utf-8", "replace"))
if not self.client_terminated:
self._receive_message()
def _on_length_indicator(self, byte):
if ord(byte) != 0x00:
self._abort()
return
self.client_terminated = True
self.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
raise ValueError(
"Binary messages not supported by this version of websockets")
if isinstance(message, unicode_type):
message = message.encode("utf-8")
assert isinstance(message, bytes_type)
self.stream.write(b"\x00" + message + b"\xff")
def write_ping(self, data):
"""Send ping frame."""
raise ValueError(
"Ping messages not supported by this version of websockets")
def close(self):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
self.stream.write("\xff\x00")
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
self._waiting = self.stream.io_loop.add_timeout(
time.time() + 5, self._abort)
| |
"""
The NeuralAgent class wraps a deep Q-network for training and testing
in the Arcade learning environment.
Author: Nathan Sprague
"""
import os
import cPickle
import time
import logging
import random
import numpy as np
import ale_data_set
import sys
sys.setrecursionlimit(10000)
class NeuralAgent(object):
randGenerator = random.Random()
def __init__(self, q_network, epsilon_start, epsilon_min,
epsilon_decay, replay_memory_size, exp_pref,
replay_start_size, update_frequency,
reward_weight=0., reward_weight_horizon=0, reward_weight_decay=1.):
self.network = q_network
self.epsilon_start = epsilon_start
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.replay_memory_size = replay_memory_size
self.exp_pref = exp_pref
self.replay_start_size = replay_start_size
self.update_frequency = update_frequency
self.phi_length = self.network.num_frames
self.image_width = self.network.input_width
self.image_height = self.network.input_height
# CREATE A FOLDER TO HOLD RESULTS
time_str = time.strftime("_%m-%d-%H-%M_", time.gmtime())
self.exp_dir = self.exp_pref + time_str + \
"{}".format(self.network.lr).replace(".", "p") + "_" \
+ "{}".format(self.network.discount).replace(".", "p")
try:
os.stat(self.exp_dir)
except OSError:
os.makedirs(self.exp_dir)
import subprocess
subp = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = subp.communicate()
commit = out.strip()
with open(self.exp_dir + '/argv','w') as fp:
fp.write(' '.join(sys.argv) + '\n')
fp.write(commit + '\n')
self.num_actions = self.network.num_actions
self.data_set = ale_data_set.DataSet(width=self.image_width,
height=self.image_height,
max_steps=self.replay_memory_size,
phi_length=self.phi_length,
reward_weight=reward_weight,
reward_weight_horizon=reward_weight_horizon,
reward_weight_decay=reward_weight_decay
)
# just needs to be big enough to create phi's
self.test_data_set = ale_data_set.DataSet(width=self.image_width,
height=self.image_height,
max_steps=self.phi_length * 2,
phi_length=self.phi_length)
self.epsilon = self.epsilon_start
if self.epsilon_decay != 0:
self.epsilon_rate = ((self.epsilon_start - self.epsilon_min) /
self.epsilon_decay)
else:
self.epsilon_rate = 0
self.testing = False
self._open_results_file()
self._open_learning_file()
self.episode_counter = 0
self.batch_counter = 0
self.holdout_data = None
# In order to add an element to the data set we need the
# previous state and action and the current reward. These
# will be used to store states and actions.
self.last_img = None
self.last_action = None
self.interactive_mode = True
def _open_results_file(self):
logging.info("OPENING " + self.exp_dir + '/results.csv')
self.results_file = open(self.exp_dir + '/results.csv', 'w', 0)
self.results_file.write(\
'epoch,num_episodes,total_reward,reward_per_episode,mean_q\n')
self.results_file.flush()
def _open_learning_file(self):
self.learning_file = open(self.exp_dir + '/learning.csv', 'w', 0)
self.learning_file.write('mean_loss,epsilon\n')
self.learning_file.flush()
def _update_results_file(self, epoch, num_episodes, holdout_sum):
out = "{},{},{},{},{}\n".format(epoch, num_episodes, self.total_reward,
self.total_reward / float(num_episodes),
holdout_sum)
self.results_file.write(out)
self.results_file.flush()
def _update_learning_file(self):
out = "{},{}\n".format(np.mean(self.loss_averages),
self.epsilon)
self.learning_file.write(out)
self.learning_file.flush()
def start_episode(self, observation):
"""
This method is called once at the beginning of each episode.
No reward is provided, because reward is only available after
an action has been taken.
Arguments:
observation - height x width numpy array
Returns:
An integer action
"""
self.step_counter = 0
self.batch_counter = 0
self.episode_reward = 0
# We report the mean loss for every epoch.
self.loss_averages = []
self.start_time = time.time()
return_action = self.randGenerator.randint(0, self.num_actions-1)
self.last_action = return_action
self.last_img = observation
return return_action
def _show_phis(self, phi1, phi2):
import matplotlib.pyplot as plt
for p in range(self.phi_length):
plt.subplot(2, self.phi_length, p+1)
plt.imshow(phi1[p, :, :], interpolation='none', cmap="gray")
plt.grid(color='r', linestyle='-', linewidth=1)
for p in range(self.phi_length):
plt.subplot(2, self.phi_length, p+5)
plt.imshow(phi2[p, :, :], interpolation='none', cmap="gray")
plt.grid(color='r', linestyle='-', linewidth=1)
plt.show()
def step(self, reward, observation):
"""
This method is called each time step.
Arguments:
reward - Real valued reward.
observation - A height x width numpy array
Returns:
An integer action.
"""
self.step_counter += 1
#TESTING---------------------------
if self.testing:
self.episode_reward += reward
action = self._choose_action(self.test_data_set, .05,
observation, np.clip(reward, -1, 1))
#NOT TESTING---------------------------
else:
if len(self.data_set) > self.replay_start_size:
self.epsilon = max(self.epsilon_min,
self.epsilon - self.epsilon_rate)
action = self._choose_action(self.data_set, self.epsilon,
observation,
np.clip(reward, -1, 1))
if self.step_counter % self.update_frequency == 0:
loss = self._do_training()
self.batch_counter += 1
self.loss_averages.append(loss)
else: # Still gathering initial random data...
action = self._choose_action(self.data_set, self.epsilon,
observation,
np.clip(reward, -1, 1))
if self.interactive_mode:
try:
import pygame
keys=pygame.key.get_pressed()
if keys[32]:
print "FIRE"
action = 1
elif keys[275]:
print "RIGHT"
action = 2
elif keys[276]:
print "LEFT"
action = 3
elif keys[274]:
print "DOWN"
action = 4
elif any(keys):
action = 0
except:
self.interactive_mode = False
print "No interactive"
self.last_action = action
self.last_img = observation
return action
def _choose_action(self, data_set, epsilon, cur_img, reward):
"""
Add the most recent data to the data set and choose
an action based on the current policy.
"""
data_set.add_sample(self.last_img, self.last_action, reward, False)
if self.step_counter >= self.phi_length:
phi = data_set.phi(cur_img)
action = self.network.choose_action(phi, epsilon)
else:
action = self.randGenerator.randint(0, self.num_actions - 1)
return action
def _do_training(self):
"""
Returns the average loss for the current batch.
May be overridden if a subclass needs to train the network
differently.
"""
states, actions, rewards, next_states, terminals = \
self.data_set.random_batch(
self.network.batch_size)
return self.network.train(states, actions, rewards,
next_states, terminals)
def end_episode(self, reward, terminal=True):
"""
This function is called once at the end of an episode.
Arguments:
reward - Real valued reward.
terminal - Whether the episode ended intrinsically
(ie we didn't run out of steps)
Returns:
None
"""
self.episode_reward += reward
self.step_counter += 1
total_time = time.time() - self.start_time
if self.testing:
# If we run out of time, only count the last episode if
# it was the only episode.
if terminal or self.episode_counter == 0:
self.episode_counter += 1
self.total_reward += self.episode_reward
else:
# Store the latest sample.
self.data_set.add_sample(self.last_img,
self.last_action,
np.clip(reward, -1, 1),
True)
logging.info("steps/second: {:.2f}".format(\
self.step_counter/total_time))
if self.batch_counter > 0:
self._update_learning_file()
logging.info("average loss: {:.4f}".format(\
np.mean(self.loss_averages)))
def finish_epoch(self, epoch):
with open(self.exp_dir + '/network_file_' + str(epoch) + \
'.pkl', 'w') as net_file:
cPickle.dump(self.network, net_file, -1)
self.data_set.save(self.exp_dir + '/agent')
def start_testing(self):
self.testing = True
self.total_reward = 0
self.episode_counter = 0
def finish_testing(self, epoch):
self.testing = False
holdout_size = 3200
if self.holdout_data is None and len(self.data_set) > holdout_size:
self.holdout_data = self.data_set.random_batch(holdout_size)[0]
holdout_sum = 0
if self.holdout_data is not None:
for i in range(holdout_size):
holdout_sum += np.mean(
self.network.q_vals(self.holdout_data[i, ...]))
self._update_results_file(epoch, self.episode_counter,
holdout_sum / holdout_size)
if __name__ == "__main__":
pass
| |
from django.test import TestCase
from schools.models import *
from django.contrib.auth.models import Group
from datetime import datetime, timedelta
from django.utils.timezone import utc
from django.test.client import RequestFactory, Client
class TestSchoolViews(TestCase):
fixtures = ['users.json', 'groups.json', 'schools.json']
def setUp(self):
self.client = Client()
def testValidUserCanViewSchoolsList(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/')
self.assertEqual(response.status_code, 200)
def testInvalidUserCannotViewSchoolsList(self):
self.client.login(username='root', password='password')
response = self.client.get('/api/schools/')
self.assertRedirects(response, '/accounts/login/?next=/schools/', status_code=302, target_status_code=200)
def testValidUserCanCreateSchool(self):
self.client.login(username='kyllo', password='password')
response = self.client.post('/api/schools/create/', {"name": "Testing School"}, follow=True)
self.assertContains(response,"Testing School", status_code=200)
def testInvalidUserCannotCreateSchool(self):
self.client.login(username='root', password='password')
self.client.post('/api/schools/create/', {'name': 'Fake Testing School'})
try:
School.objects.get(name='Fake Testing School')
except:
self.assertTrue(True)
def testValidUserCanViewSchoolDetail(self):
self.client.login(username='kyllo', password='password')
self.client.post('/api/schools/create/', {'name':'School 1'})
response = self.client.get('/schools/1/')
self.assertContains(response, "School 1", status_code=200)
class TestAccounts(TestCase):
def setUp(self):
self.client = Client()
Group.objects.create(name="Managers")
User.objects.create_user(username='dawn', first_name='Dawn', last_name='Kyllo', password='ruby')
def testInvalidUserCannotLogIn(self):
self.assertEqual(self.client.login(username='root', password='password'), False)
def testValidUserCanLogIn(self):
self.assertTrue(self.client.login(username='dawn', password='ruby'))
class TestStudentViews(TestCase):
fixtures = ['users.json', 'groups.json', 'schools.json']
def setUp(self):
self.client = Client()
def test_managers_can_view_school_students_list(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/1/students/')
self.assertContains(response, 'Ruby Dog', status_code=200)
def test_managers_can_view_school_students_detail(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/users/ruby/')
self.assertContains(response, 'Ruby Dog', status_code=200)
def test_managers_can_create_students(self):
self.client.login(username='kyllo', password='password')
response = self.client.post('/api/schools/1/students/create/',
{
'username':'newstudent',
'email':'newstudent@newstudent.com',
'first_name':'New',
'last_name':'Student',
'password1':'newstudent',
'password2':'newstudent',
},
follow=True)
self.assertContains(response,"New Student", status_code=200)
def test_students_cannot_create_students(self):
self.client.login(username='ruby', password='ruby')
response = self.client.post('/api/schools/1/students/create/',
{
'username':'newstudent2',
'email':'newstudent@newstudent.com',
'first_name':'New',
'last_name':'Student2',
'password1':'newstudent2',
'password2':'newstudent2',
},
follow=True)
self.assertEqual(response.status_code, 403)
class TestInstructorViews(TestCase):
fixtures = ['users.json', 'groups.json', 'schools.json',]
def setUp(self):
self.client = Client()
def test_managers_can_create_instructors(self):
self.client.login(username='kyllo', password='password')
response = self.client.post('/api/schools/1/instructors/create/',
{
'username':'newinstructor',
'email':'new@instructor.com',
'first_name':'New',
'last_name':'Instructor',
'password1':'newinstructor',
'password2':'newinstructor',
},
follow=True)
self.assertContains(response,"New Instructor", status_code=200)
def test_managers_can_view_school_instructors_list(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/1/instructors/')
self.assertContains(response, 'Alex Kyllo', status_code=200)
def test_managers_can_view_school_instructors_detail(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/users/kyllo/')
self.assertContains(response, 'Alex Kyllo', status_code=200)
def test_students_cannot_create_instructors(self):
self.client.login(username='ruby', password='ruby')
response = self.client.post('/api/schools/1/instructors/create/',
{
'username':'newinstructor2',
'email':'new@instructor.com',
'first_name':'New',
'last_name':'Student2',
'password1':'newinstructor2',
'password2':'newinstructor2',
},
follow=True)
self.assertEqual(response.status_code, 403)
class TestSchoolViewsWithFixtures(TestCase):
fixtures = ['schools.json', 'users.json', 'groups.json']
def setUp(self):
self.client = Client()
def test_user_can_view_own_schools_list(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/')
self.assertContains(response, "A Cool School", status_code=200)
def test_user_cannot_view_other_schools_list(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/')
self.assertNotContains(response, "A Cooler School")
def test_user_can_view_own_school_detail(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/1/')
self.assertContains(response, "A Cool School", status_code=200)
def test_user_cannot_view_other_schools_detail(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/2/')
self.assertEqual(response.status_code, 404)
def test_anon_user_cannot_view_schools_list(self):
anon_client = Client()
response = anon_client.get('/api/schools/')
self.assertEqual(response.status_code, 403)
def test_manager_can_delete_own_school(self):
self.client.login(username='kyllo', password='password')
response = self.client.post('/api/schools/1/delete/', follow=True)
self.assertRedirects(response, '/api/schools/', target_status_code=200)
self.assertNotContains(response, "A Cool School", status_code=200)
def test_student_cannot_delete_school(self):
self.client.login(username='ruby', password='ruby')
response = self.client.post('/api/schools/1/delete/')
self.assertEqual(response.status_code, 403)
def test_manager_cannot_delete_other_school(self):
self.client.login(username='kyllo', password='password')
response = self.client.post('/api/schools/2/delete/')
self.assertEqual(response.status_code, 404)
class TestLocationViews(TestCase):
fixtures = ['schools.json', 'users.json', 'groups.json', 'locations.json']
def setUp(self):
self.client = Client()
def test_user_can_view_location_list_of_own_school(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/1/locations/')
self.assertContains(response, "A Cool School Kirkland Location", status_code=200)
def test_user_cannot_view_location_list_of_other_school(self):
self.client.login(username='kyllo', password='password')
response = self.client.get('/api/schools/2/locations/')
self.assertEqual(response.status_code, 404)
def test_user_can_view_location_detail_of_own_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/locations/1/')
self.assertContains(response, "A Cool School Kirkland Location", status_code=200)
def test_user_cannot_view_location_detail_of_other_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/locations/2/')
self.assertEqual(response.status_code, 404)
def test_user_cannot_create_location_for_other_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.post('/api/schools/2/locations/create/',
{
'school_id': 2,
'name':'a school',
'address_1': 'a',
'address_2': 'a',
'city': 'a',
'state_province': 'WA',
'zip_postal_code': '12345',
'country': 'US',
})
self.assertEqual(response.status_code, 404)
def test_anonymous_user_cannot_view_location_list(self):
anon_client = Client()
response = anon_client.get('/api/schools/1/locations/')
self.assertEqual(response.status_code, 403)
def test_anonymous_user_cannot_view_location_detail(self):
anon_client = Client()
response = anon_client.get('/api/locations/1/')
self.assertEqual(response.status_code, 403)
class TestCourseViews(TestCase):
fixtures = ['schools.json', 'locations.json', 'courses.json', 'users.json', 'groups.json']
def setUp(self):
self.client = Client()
def test_user_can_view_course_list_of_own_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/locations/1/courses/')
self.assertContains(response, "Yoga 101", status_code=200)
def test_user_cannot_view_course_list_of_other_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/locations/2/courses/')
self.assertEqual(response.status_code, 404)
def test_user_can_view_course_detail_of_own_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/courses/1/')
self.assertContains(response, "Yoga 101", status_code=200)
def test_user_cannot_view_course_detail_of_other_school(self):
self.client.login(username='kyllo',password='password')
response = self.client.get('/api/courses/2/')
self.assertEqual(response.status_code, 404)
def test_anonymous_user_cannot_view_course_list(self):
anon_client = Client()
response = anon_client.get('/api/locations/1/courses/')
self.assertEqual(response.status_code, 403)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import collections
import contextlib
import ctypes
import os
import platform
import re
import socket
import struct
import subprocess
import sys
import time
import zipfile
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import platform_backend
from telemetry.core.platform.power_monitor import msr_power_monitor
from telemetry.util import cloud_storage
from telemetry.util import path
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
from win32com.shell import shell # pylint: disable=F0401
from win32com.shell import shellcon # pylint: disable=F0401
import win32con # pylint: disable=F0401
import win32process # pylint: disable=F0401
import win32security # pylint: disable=F0401
except ImportError:
pywintypes = None
shell = None
shellcon = None
win32api = None
win32con = None
win32process = None
win32security = None
def _InstallWinRing0():
"""WinRing0 is used for reading MSRs."""
executable_dir = os.path.dirname(sys.executable)
python_is_64_bit = sys.maxsize > 2 ** 32
dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
dll_path = os.path.join(executable_dir, dll_file_name)
os_is_64_bit = platform.machine().endswith('64')
driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
driver_path = os.path.join(executable_dir, driver_file_name)
# Check for WinRing0 and download if needed.
if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
win_binary_dir = os.path.join(path.GetTelemetryDir(), 'bin', 'win')
zip_path = os.path.join(win_binary_dir, 'winring0.zip')
cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
try:
with zipfile.ZipFile(zip_path, 'r') as zip_file:
# Install DLL.
if not os.path.exists(dll_path):
zip_file.extract(dll_file_name, executable_dir)
# Install kernel driver.
if not os.path.exists(driver_path):
zip_file.extract(driver_file_name, executable_dir)
finally:
os.remove(zip_path)
def TerminateProcess(process_handle):
if not process_handle:
return
if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
win32process.TerminateProcess(process_handle, 0)
process_handle.close()
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def __init__(self):
super(WinPlatformBackend, self).__init__()
self._msr_server_handle = None
self._msr_server_port = None
self._power_monitor = msr_power_monitor.MsrPowerMonitor(self)
def __del__(self):
self.close()
def close(self):
self.CloseMsrServer()
def CloseMsrServer(self):
if not self._msr_server_handle:
return
TerminateProcess(self._msr_server_handle)
self._msr_server_handle = None
self._msr_server_port = None
# pylint: disable=W0613
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def GetIOStats(self, pid):
io_stats = self._GetWin32ProcessInfo(win32process.GetProcessIoCounters, pid)
return {'ReadOperationCount': io_stats['ReadOperationCount'],
'WriteOperationCount': io_stats['WriteOperationCount'],
'ReadTransferCount': io_stats['ReadTransferCount'],
'WriteTransferCount': io_stats['WriteTransferCount']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return platform_backend.XP
if os_version.startswith('6.0.'):
return platform_backend.VISTA
if os_version.startswith('6.1.'):
return platform_backend.WIN7
if os_version.startswith('6.2.'):
return platform_backend.WIN8
raise NotImplementedError('Unknown win version %s.' % os_version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
def IsCurrentProcessElevated(self):
if self.GetOSVersionName() < platform_backend.VISTA:
# TOKEN_QUERY is not defined before Vista. All processes are elevated.
return True
handle = win32process.GetCurrentProcess()
with contextlib.closing(
win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
return bool(win32security.GetTokenInformation(
token, win32security.TokenElevation))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launch an application. Returns a PyHANDLE object."""
parameters = ' '.join(parameters) if parameters else ''
if elevate_privilege and not self.IsCurrentProcessElevated():
# Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
# elevate privileges. A new console will be created if the new process has
# different permissions than this process.
proc_info = shell.ShellExecuteEx(
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
lpVerb='runas' if elevate_privilege else '',
lpFile=application,
lpParameters=parameters,
nShow=win32con.SW_HIDE)
if proc_info['hInstApp'] <= 32:
raise Exception('Unable to launch %s' % application)
return proc_info['hProcess']
else:
handle, _, _, _ = win32process.CreateProcess(
None, application + ' ' + parameters, None, None, False,
win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
return handle
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def _StartMsrServerIfNeeded(self):
if self._msr_server_handle:
return
_InstallWinRing0()
self._msr_server_port = util.GetUnreservedAvailableLocalPort()
# It might be flaky to get a port number without reserving it atomically,
# but if the server process chooses a port, we have no way of getting it.
# The stdout of the elevated process isn't accessible.
parameters = (
os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
str(self._msr_server_port),
)
self._msr_server_handle = self.LaunchApplication(
sys.executable, parameters, elevate_privilege=True)
# Wait for server to start.
try:
socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
except socket.error:
self.CloseMsrServer()
atexit.register(TerminateProcess, self._msr_server_handle)
def ReadMsr(self, msr_number, start=0, length=64):
self._StartMsrServerIfNeeded()
if not self._msr_server_handle:
raise OSError('Unable to start MSR server.')
sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 0.1)
try:
sock.sendall(struct.pack('I', msr_number))
response = sock.recv(8)
finally:
sock.close()
return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
| |
# Copyright 2014, Scalyr, Inc.
#
# Note, this can be run in standalone mode by:
# python -m scalyr_agent.run_monitor
# scalyr_agent.builtin_monitors.nginx_monitor
from __future__ import unicode_literals
from __future__ import absolute_import
import socket
import six
import six.moves.http_client
import six.moves.urllib.request
import six.moves.urllib.error
import six.moves.urllib.parse
from scalyr_agent.scalyr_monitor import (
ScalyrMonitor,
define_metric,
define_log_field,
define_config_option,
)
httpSourceAddress = "127.0.0.1"
__monitor__ = __name__
define_config_option(
__monitor__,
"module",
"Always ``scalyr_agent.builtin_monitors.nginx_monitor``",
convert_to=six.text_type,
required_option=True,
)
define_config_option(
__monitor__,
"status_url",
"Optional (defaults to 'http://localhost/nginx_status'). The URL the monitor will fetch"
"to retrieve the nginx status information.",
default="http://localhost/nginx_status",
)
define_config_option(
__monitor__,
"source_address",
"Optional (defaults to '%s'). The IP address to be used as the source address when fetching "
"the status URL. Many servers require this to be 127.0.0.1 because they only server the status "
"page to requests from localhost." % httpSourceAddress,
default=httpSourceAddress,
)
define_config_option(
__monitor__,
"id",
"Optional (defaults to empty string). Included in each log message generated by this monitor, "
"as a field named ``instance``. Allows you to distinguish between different nginx instances "
"running on the same server.",
convert_to=six.text_type,
)
define_log_field(__monitor__, "monitor", "Always ``nginx_monitor``.")
define_log_field(
__monitor__,
"metric",
"The metric name. See the metric tables for more information.",
)
define_log_field(__monitor__, "value", "The value of the metric.")
define_log_field(
__monitor__, "instance", "The ``id`` value from the monitor configuration."
)
define_metric(
__monitor__,
"nginx.connections.active",
"This is the number of connections currently opened to the "
"server. The total number of allowed connections is a function "
"of the number of worker_processes and the number of "
"worker_connections configured within your Nginx configuration "
"file.",
)
define_metric(
__monitor__,
"nginx.connections.reading",
"The number of connections currently reading from the clients.",
)
define_metric(
__monitor__,
"nginx.connections.writing",
"The number of connections currently writing to the clients.",
)
define_metric(
__monitor__,
"nginx.connections.waiting",
"The number of connections currently idle/sending keep alives.",
)
# Taken from:
# http://stackoverflow.com/questions/1150332/source-interface-with-python-and-urllib2
#
# For connecting to local machine, specifying the source IP may be required. So, using
# this mechanism should allow that. Since getting status requires "opening up" a
# non-standard/user-facing web page, it is best to be cautious.
#
# Note - the use of a global is ugly, but this form is more compatible than with another
# method mentioned which would not require the global. (The cleaner version was added
# in Python 2.7.)
class BindableHTTPConnection(six.moves.http_client.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.socket()
self.sock.bind((self.source_ip, 0))
if isinstance(self.timeout, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
def BindableHTTPConnectionFactory(source_ip):
def _get(host, port=None, strict=None, timeout=0):
# pylint: disable=unexpected-keyword-arg
# NOTE: "strict" argument is not supported by Python 3 class
# TODO: We should switch to using urllib Request object directly or requests library
if six.PY2:
kwargs = {"strict": strict}
else:
kwargs = {}
bhc = BindableHTTPConnection(host, port=port, timeout=timeout, **kwargs)
bhc.source_ip = source_ip
return bhc
return _get
class BindableHTTPHandler(six.moves.urllib.request.HTTPHandler):
def http_open(self, req):
return self.do_open(BindableHTTPConnectionFactory(httpSourceAddress), req)
class NginxMonitor(ScalyrMonitor):
"""
# Nginx Monitor
This agent monitor plugin records performance and usage data from an nginx server.
@class=bg-warning docInfoPanel: An *agent monitor plugin* is a component of the Scalyr Agent. To use a plugin,
simply add it to the ``monitors`` section of the Scalyr Agent configuration file (``/etc/scalyr/agent.json``).
For more information, see [Agent Plugins](/help/scalyr-agent#plugins).
## Configuring Nginx
To use this monitor, you will need to configure your nginx server to enable the status module. For details,
see the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html).
First, verify that your nginx server supports the status module. Execute the following command:
nginx -V 2>&1 | grep -o with-http_stub_status_module
The output ``with-http_stub_status_module`` indicates that your server supports the status module. Otherwise,
you will need to either recompile nginx with the ``--with-http_stub_status_module`` flag, or upgrade to a full
version of nginx that has been compiled with that flag.
Next, you must enable the status module, by updating the ``server`` configuration section of your nginx server.
This section can either be found in the ``nginx.conf`` file, or the file in your ``sites-available`` directory
that corresponds to your site. For most Linux systems, these are located at ``/etc/nginx/nginx.conf`` and
``/etc/nginx/sites-available``.
Add the following configuration inside the ``server { ... }`` stanza:
location /nginx_status {
stub_status on; # enable the status module
allow 127.0.0.1; # allow connections from localhost only
deny all; # deny every other connection
}
This specifies that the status page should be served at ``http://<address>/nginx_status``, and can't be accessed
from other servers.
Each time the Scalyr agent fetches ``/nginx_status``, an entry will be added to the Nginx access log. If you wish to
prevent this, add the line ``access_log off;`` to the above configuration.
Once you make the configuration change, you will need to restart Nginx. On most Linux systems, use the following
command:
sudo service nginx restart
To verify that the status module is working properly, you can view it manually. Execute this command on the server
(substituting the appropriate port number as needed):
curl http://localhost:80/nginx_status
If you have any difficulty enabling the status module, drop us a line at [support@scalyr.com](mailto:support@scalyr.com).
## Sample Configuration
Here is a typical configuration fragment:
monitors: [
{
module: "scalyr_agent.builtin_monitors.nginx_monitor",
}
]
If you were running an instances of Nginx on a non-standard port (say 8080), your config might resemble:
monitors: [
{
module: "scalyr_agent.builtin_monitors.nginx_monitor",
status_url: "http://localhost:8080/nginx_status"
id: "customers"
}
]
Note the ``id`` field in the configurations. This is an optional field that allows you to specify an identifier
specific to a particular instance of Nginx and will make it easier to filter on metrics specific to that
instance.
accessLog:
## Uploading the nginx access log
If you have not already done so, you should also configure the Scalyr Agent to upload the access log
generated by nginx. Scalyr's nginx dashboard uses this log to generate many statistics.
For most Linux systems, the access log is saved in ``/var/log/nginx/access.log``. To upload, edit the
``logs`` section of ``/etc/scalyr-agent-2/agent.json``. Add the following entry:
logs: [
...
*** {***
*** path: "/var/log/nginx/access.log",***
*** attributes: {parser: "accessLog", serverType: "nginx"}***
*** }***
]
Edit the ``path`` field as appropriate for your system setup.
## Viewing Data
After adding this plugin to the agent configuration file, wait one minute for data to begin recording. Then
click the {{menuRef:Dashboards}} menu and select {{menuRef:Nginx}}. (The dashboard may not be listed until
the agent begins sending nginx data.) You will see an overview of nginx data across all servers where you are
running the nginx plugin. Use the {{menuRef:ServerHost}} dropdown to show data for a specific server.
See [Analyze Access Logs](/solutions/analyze-access-logs) for more information about working with web access logs.
"""
def _initialize(self):
global httpSourceAddress
self.__url = self._config.get("status_url")
self.__sourceaddress = self._config.get("source_address")
httpSourceAddress = self.__sourceaddress
def _parse_data(self, data):
result = {}
lines = data.splitlines()
i = 0
# skip any blank lines
while len(lines[i]) == 0:
i = i + 1
while i < len(lines):
if lines[i].startswith(b"Active connections:"):
result["active_connections"] = int(
lines[i][len(b"Active connections: ") :]
)
elif lines[i].startswith(b"server accepts handled requests"):
i = i + 1
values = lines[i].split()
result["server_accepts"] = values[0]
result["server_handled"] = values[1]
result["server_requests"] = values[2]
elif lines[i].startswith(b"Reading:"):
values = lines[i].split()
result["reading"] = values[1]
result["writing"] = values[3]
result["waiting"] = values[5]
i = i + 1
return result
def _get_status(self):
data = None
# verify that the URL is valid
try:
url = six.moves.urllib.parse.urlparse(self.__url)
except Exception:
self._logger.error(
"The URL configured for requesting the status page appears to be invalid. Please verify that the URL is correct in your monitor configuration. The specified url: %s"
% self.__url
)
return data
# attempt to request server status
try:
opener = six.moves.urllib.request.build_opener(BindableHTTPHandler)
handle = opener.open(self.__url)
data = handle.read()
if data is not None:
data = self._parse_data(data)
except six.moves.urllib.error.HTTPError as err:
message = (
"An HTTP error occurred attempting to retrieve the status. Please consult your server logs to determine the cause. HTTP error code: ",
err.code,
)
if err.code == 404:
message = "The URL used to request the status page appears to be incorrect. Please verify the correct URL and update your nginx_monitor configuration."
elif err.code == 403:
message = "The server is denying access to the URL specified for requesting the status page. Please verify that permissions to access the status page are correctly configured in your server configuration and that your nginx_monitor configuration reflects the same configuration requirements."
elif err.code >= 500 or err.code < 600:
message = (
"The server failed to fulfill the request to get the status page. Please consult your server logs to determine the cause. HTTP error code: ",
err.code,
)
self._logger.error(message)
data = None
except six.moves.urllib.error.URLError as err:
message = (
"The was an error attempting to reach the server. Make sure the server is running and properly configured. The error reported is: %s"
% (str(err))
)
if err.reason.errno == 111:
message = (
"The HTTP server does not appear to running or cannot be reached. Please check that it is running and is reachable at the address: %s"
% url.netloc
)
self._logger.error(message)
data = None
except Exception as e:
self._logger.error(
"An error occurred attempting to request the server status: %s" % e
)
data = None
return data
"""
# Currently disabled as it requires platform specific functionality. This will need
# be reactivated once a cross platform solution is implemented.
def _get_procinfo(self):
try:
data = subprocess.Popen("ps aux | grep \"nginx: worker\" | grep -v grep | awk '{print $2, $3, $4}'", shell=True, stdout=subprocess.PIPE).stdout.read()
result = {}
lines = data.splitlines()
i = 0
while i < len(lines):
if len(lines[i]) != 0:
values = lines[i].split()
if len(values) == 3:
result[values[0]] = {
"cpu": values[1],
"mem": values[2]
}
i = i + 1
except Exception, e:
self._logger.error("Unable to check process status: %s" % e)
result = None
return result
"""
def gather_sample(self):
data = self._get_status()
if data is None:
self._logger.error("No data returned.")
else:
samplesToEmit = [
("active_connections", "nginx.connections.active"),
("reading", "nginx.connections.reading"),
("writing", "nginx.connections.writing"),
("waiting", "nginx.connections.waiting"),
]
for key, metric_name in samplesToEmit:
if key in data:
self._logger.emit_value(metric_name, int(data[key]))
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Drivers for Anapico SignalGenerator using VISA library.
"""
import re
from textwrap import fill
from inspect import cleandoc
from visa import VisaTypeError
from ..driver_tools import (InstrIOError, instrument_property,
secure_communication)
from ..visa_tools import VisaInstrument
class Anapico(VisaInstrument):
"""
Generic driver for Anapico Signal Generators,
using the VISA library.
Parameters
----------
see the `VisaInstrument` parameters
Attributes
----------
frequency_unit : str
Frequency unit used by the driver. The default unit is 'GHz'. Other
valid units are : 'MHz', 'KHz', 'Hz'
frequency : float, instrument_property
Fixed frequency of the output signal.
power : float, instrument_property
Fixed power of the output signal.
output : bool, instrument_property
State of the output 'ON'(True)/'OFF'(False).
"""
def __init__(self, connection_info, caching_allowed=True,
caching_permissions={}, auto_open=True):
super(Anapico, self).__init__(connection_info,
caching_allowed,
caching_permissions,
auto_open)
self.frequency_unit = 'GHz'
self.write_termination = '\n'
self.read_termination = '\n'
# The next line sets the timeout before reconnection to 0. This is available
# since firmware version 0.4.106 and avoids the Anapico generator to freeze
# upon unproperly closed connections (for instance if exopy crashes)
# no need to turn the generator OFF and ON with this line
# here if the explanation from the support team at Anapico:
# I added a reconnect timeout option. It allows to reconnect to an
# inactive link that has never been closed. The timeout defines how long
# the user must wait until the link is considered inactive and reconnect is
# enabled. The default timeout is infinite, meaning no reconnect possible at
# all so it behaves like earlier firmare. In this application the timeout can
# be set to zero so reconnect is always possible immediately.
#
# The command is "SYST:COMM:VXI:RTMO <x>", where "<x>" is the reconnect timeout
# in seconds or "INF" for infinite. If they need to reuse to an unclosed link,
# they should always send "SYST:COMM:VXI:RTMO 0" immediately after opening a
# connection.
self.write("SYST:COMM:VXI:RTMO 0")
@instrument_property
@secure_communication()
def frequency(self):
"""Frequency of the output signal.
"""
freq = self.ask_for_values('FREQ?')
if freq:
return freq[0]
else:
raise InstrIOError
@frequency.setter
@secure_communication()
def frequency(self, value):
"""Frequency setter method.
"""
unit = self.frequency_unit
self.write('FREQ {}{}'.format(value, unit))
result = self.ask_for_values('FREQ?')
if result:
if unit == 'GHz':
result[0] /= 1e9
elif unit == 'MHz':
result[0] /= 1e6
elif unit == 'KHz':
result[0] /= 1e3
if abs(result[0] - value) > 1e-12:
mes = 'Instrument did not set correctly the frequency.'
raise InstrIOError(mes)
@instrument_property
@secure_communication()
def power(self):
"""Power of the output signal.
"""
power = self.ask_for_values('POWER?')[0]
if power is not None:
return power
else:
raise InstrIOError
@power.setter
@secure_communication()
def power(self, value):
"""Power setter method.
"""
self.write('POWER {}'.format(value))
result = self.ask_for_values('POWER?')[0]
if abs(result - value) > 1e-12:
raise InstrIOError('Instrument did not set correctly the power')
@instrument_property
@secure_communication()
def output(self):
"""Output state of the source.
"""
output = self.ask_for_values(':OUTP?')
if output is not None:
return bool(output[0])
else:
mes = 'PSG signal generator did not return its output'
raise InstrIOError(mes)
@output.setter
@secure_communication()
def output(self, value):
"""Output setter method.
"""
on = re.compile('on', re.IGNORECASE)
off = re.compile('off', re.IGNORECASE)
if on.match(value) or value == 1:
self.write(':OUTPUT ON')
if self.ask(':OUTPUT?') != '1':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the output'''))
elif off.match(value) or value == 0:
self.write(':OUTPUT OFF')
if self.ask(':OUTPUT?') != '0':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the output'''))
else:
mess = fill(cleandoc('''The invalid value {} was sent to
switch_on_off method''').format(value), 80)
raise VisaTypeError(mess)
@instrument_property
@secure_communication()
def pm_state(self):
"""Pulse modulation getter method
"""
pm_state = self.ask_for_values('SOURce:PULM:STATE?')
if pm_state is not None:
return bool(pm_state[0])
else:
mes = 'Signal generator did not return its pulse modulation state'
raise InstrIOError(mes)
@pm_state.setter
@secure_communication()
def pm_state(self, value):
"""Pulse modulation setter method.
"""
# TODO: write checks
self.write('SOURce:PULM:SOURce EXT')
self.write('SOURce:PULM:POLarity NORMal')
self.write('SOURce:PULM:TRIGger:EXTernal:IMPedance G50')
on = re.compile('on', re.IGNORECASE)
off = re.compile('off', re.IGNORECASE)
if on.match(value) or value == 1:
self.write('SOURce:PULM:STATE ON')
if self.ask('SOURce:PULM:STATE?') != '1':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the pulse modulation state'''))
elif off.match(value) or value == 0:
self.write('SOURce:PULM:STATE OFF')
if self.ask('SOURce:PULM:STATE?') != '0':
raise InstrIOError(cleandoc('''Instrument did not set correctly
the pulse modulation state'''))
else:
mess = fill(cleandoc('''The invalid value {} was sent to
switch_on_off method''').format(value), 80)
raise VisaTypeError(mess)
| |
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with MongoDB `ObjectIds
<http://dochub.mongodb.org/core/objectids>`_.
"""
import binascii
import calendar
import datetime
try:
import hashlib
_md5func = hashlib.md5
except ImportError: # for Python < 2.5
import md5
_md5func = md5.new
import os
import random
import socket
import struct
import threading
import time
from bson.errors import InvalidId
from bson.py3compat import (PY3, b, binary_type, text_type,
bytes_from_hex, string_types)
from bson.tz_util import utc
EMPTY = b("")
ZERO = b("\x00")
def _machine_bytes():
"""Get the machine portion of an ObjectId.
"""
machine_hash = _md5func()
machine_hash.update(socket.gethostname().encode())
return machine_hash.digest()[0:3]
class ObjectId(object):
"""A MongoDB ObjectId.
"""
_inc = random.randint(0, 0xFFFFFF)
_inc_lock = threading.Lock()
_machine_bytes = _machine_bytes()
__slots__ = ('__id')
def __init__(self, oid=None):
"""Initialize a new ObjectId.
If `oid` is ``None``, create a new (unique) ObjectId. If `oid`
is an instance of (:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), :class:`ObjectId`) validate it and use that. Otherwise,
a :class:`TypeError` is raised. If `oid` is invalid,
:class:`~bson.errors.InvalidId` is raised.
:Parameters:
- `oid` (optional): a valid ObjectId (12 byte binary or 24 character
hex string)
.. versionadded:: 1.2.1
The `oid` parameter can be a ``unicode`` instance (that contains
only hexadecimal digits).
.. mongodoc:: objectids
"""
if oid is None:
self.__generate()
else:
self.__validate(oid)
@classmethod
def from_datetime(cls, generation_time):
"""Create a dummy ObjectId instance with a specific generation time.
This method is useful for doing range queries on a field
containing :class:`ObjectId` instances.
.. warning::
It is not safe to insert a document containing an ObjectId
generated using this method. This method deliberately
eliminates the uniqueness guarantee that ObjectIds
generally provide. ObjectIds generated with this method
should be used exclusively in queries.
`generation_time` will be converted to UTC. Naive datetime
instances will be treated as though they already contain UTC.
An example using this helper to get documents where ``"_id"``
was generated before January 1, 2010 would be:
>>> gen_time = datetime.datetime(2010, 1, 1)
>>> dummy_id = ObjectId.from_datetime(gen_time)
>>> result = collection.find({"_id": {"$lt": dummy_id}})
:Parameters:
- `generation_time`: :class:`~datetime.datetime` to be used
as the generation time for the resulting ObjectId.
.. versionchanged:: 1.8
Properly handle timezone aware values for
`generation_time`.
.. versionadded:: 1.6
"""
if generation_time.utcoffset() is not None:
generation_time = generation_time - generation_time.utcoffset()
ts = calendar.timegm(generation_time.timetuple())
oid = struct.pack(">i", int(ts)) + ZERO * 8
return cls(oid)
@classmethod
def is_valid(cls, oid):
"""Checks if a `oid` string is valid or not.
:Parameters:
- `oid`: the object id to validate
.. versionadded:: 2.3
"""
try:
ObjectId(oid)
return True
except (InvalidId, TypeError):
return False
def __generate(self):
"""Generate a new value for this ObjectId.
"""
oid = EMPTY
# 4 bytes current time
oid += struct.pack(">i", int(time.time()))
# 3 bytes machine
oid += ObjectId._machine_bytes
# 2 bytes pid
oid += struct.pack(">H", os.getpid() % 0xFFFF)
# 3 bytes inc
ObjectId._inc_lock.acquire()
oid += struct.pack(">i", ObjectId._inc)[1:4]
ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF
ObjectId._inc_lock.release()
self.__id = oid
def __validate(self, oid):
"""Validate and use the given id for this ObjectId.
Raises TypeError if id is not an instance of
(:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), ObjectId) and InvalidId if it is not a
valid ObjectId.
:Parameters:
- `oid`: a valid ObjectId
"""
if isinstance(oid, ObjectId):
self.__id = oid.__id
elif isinstance(oid, string_types):
if len(oid) == 12:
if isinstance(oid, binary_type):
self.__id = oid
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
elif len(oid) == 24:
try:
self.__id = bytes_from_hex(oid)
except (TypeError, ValueError):
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise TypeError("id must be an instance of (%s, %s, ObjectId), "
"not %s" % (binary_type.__name__,
text_type.__name__, type(oid)))
@property
def binary(self):
"""12-byte binary representation of this ObjectId.
"""
return self.__id
@property
def generation_time(self):
"""A :class:`datetime.datetime` instance representing the time of
generation for this :class:`ObjectId`.
The :class:`datetime.datetime` is timezone aware, and
represents the generation time in UTC. It is precise to the
second.
.. versionchanged:: 1.8
Now return an aware datetime instead of a naive one.
.. versionadded:: 1.2
"""
t = struct.unpack(">i", self.__id[0:4])[0]
return datetime.datetime.fromtimestamp(t, utc)
def __getstate__(self):
"""return value of object for pickling.
needed explicitly because __slots__() defined.
"""
return self.__id
def __setstate__(self, value):
"""explicit state set from pickling
"""
# Provide backwards compatability with OIDs
# pickled with pymongo-1.9 or older.
if isinstance(value, dict):
oid = value["_ObjectId__id"]
else:
oid = value
# ObjectIds pickled in python 2.x used `str` for __id.
# In python 3.x this has to be converted to `bytes`
# by encoding latin-1.
if PY3 and isinstance(oid, text_type):
self.__id = oid.encode('latin-1')
else:
self.__id = oid
def __str__(self):
return binascii.hexlify(self.__id).decode()
def __repr__(self):
return "ObjectId('%s')" % (str(self),)
def __eq__(self, other):
if isinstance(other, ObjectId):
return self.__id == other.__id
return NotImplemented
def __ne__(self,other):
if isinstance(other, ObjectId):
return self.__id != other.__id
return NotImplemented
def __lt__(self, other):
if isinstance(other, ObjectId):
return self.__id < other.__id
return NotImplemented
def __le__(self, other):
if isinstance(other, ObjectId):
return self.__id <= other.__id
return NotImplemented
def __gt__(self, other):
if isinstance(other, ObjectId):
return self.__id > other.__id
return NotImplemented
def __ge__(self, other):
if isinstance(other, ObjectId):
return self.__id >= other.__id
return NotImplemented
def __hash__(self):
"""Get a hash value for this :class:`ObjectId`.
.. versionadded:: 1.1
"""
return hash(self.__id)
| |
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
import time
import getopt
import fnmatch
import tempfile
import shutil
from zipfile import ZipFile
from viper.common.out import *
from viper.common.objects import File
from viper.common.network import download
from viper.core.session import __sessions__
from viper.core.project import __project__
from viper.core.plugins import __modules__
from viper.core.database import Database
from viper.core.storage import store_sample, get_sample_path
class Commands(object):
def __init__(self):
# Open connection to the database.
self.db = Database()
# Map commands to their related functions.
self.commands = dict(
help=dict(obj=self.cmd_help, description="Show this help message"),
open=dict(obj=self.cmd_open, description="Open a file"),
close=dict(obj=self.cmd_close, description="Close the current session"),
info=dict(obj=self.cmd_info, description="Show information on the opened file"),
notes=dict(obj=self.cmd_notes, description="View, add and edit notes on the opened file"),
clear=dict(obj=self.cmd_clear, description="Clear the console"),
store=dict(obj=self.cmd_store, description="Store the opened file to the local repository"),
delete=dict(obj=self.cmd_delete, description="Delete the opened file"),
find=dict(obj=self.cmd_find, description="Find a file"),
tags=dict(obj=self.cmd_tags, description="Modify tags of the opened file"),
sessions=dict(obj=self.cmd_sessions, description="List or switch sessions"),
projects=dict(obj=self.cmd_projects, description="List or switch existing projects"),
export=dict(obj=self.cmd_export, description="Export the current session to file or zip"),
)
##
# CLEAR
#
# This command simply clears the shell.
def cmd_clear(self, *args):
os.system('clear')
##
# HELP
#
# This command simply prints the help message.
# It lists both embedded commands and loaded modules.
def cmd_help(self, *args):
print(bold("Commands:"))
rows = []
for command_name, command_item in self.commands.items():
rows.append([command_name, command_item['description']])
rows.append(["exit, quit", "Exit Viper"])
rows = sorted(rows, key=lambda entry: entry[0])
print(table(['Command', 'Description'], rows))
print("")
print(bold("Modules:"))
rows = []
for module_name, module_item in __modules__.items():
rows.append([module_name, module_item['description']])
rows = sorted(rows, key=lambda entry: entry[0])
print(table(['Command', 'Description'], rows))
##
# OPEN
#
# This command is used to open a session on a given file.
# It either can be an external file path, or a SHA256 hash of a file which
# has been previously imported and stored.
# While the session is active, every operation and module executed will be
# run against the file specified.
def cmd_open(self, *args):
def usage():
print("usage: open [-h] [-f] [-u] [-l] [-t] <target|md5|sha256>")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--file (-f)\tThe target is a file")
print("\t--url (-u)\tThe target is a URL")
print("\t--last (-l)\tThe target is the entry number from the last find command's results")
print("\t--tor (-t)\tDownload the file through Tor")
print("")
print("You can also specify a MD5 or SHA256 hash to a previously stored")
print("file in order to open a session on it.")
print("")
try:
opts, argv = getopt.getopt(args, 'hfult', ['help', 'file', 'url', 'last', 'tor'])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_is_file = False
arg_is_url = False
arg_last = False
arg_use_tor = False
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-f', '--file'):
arg_is_file = True
elif opt in ('-u', '--url'):
arg_is_url = True
elif opt in ('-l', '--last'):
arg_last = True
elif opt in ('-t', '--tor'):
arg_use_tor = True
if len(argv) == 0:
usage()
return
else:
target = argv[0]
# If it's a file path, open a session on it.
if arg_is_file:
target = os.path.expanduser(target)
# This is kind of hacky. It checks if there are additional arguments
# to the open command, if there is I assume that it's the continuation
# of a filename with spaces. I then concatenate them.
# TODO: improve this.
if len(argv) > 1:
for arg in argv[1:]:
target += ' ' + arg
if not os.path.exists(target) or not os.path.isfile(target):
print_error("File not found: {0}".format(target))
return
__sessions__.new(target)
# If it's a URL, download it and open a session on the temporary
# file.
elif arg_is_url:
data = download(url=target, tor=arg_use_tor)
if data:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(data)
tmp.close()
__sessions__.new(tmp.name)
# Try to open the specified file from the list of results from
# the last find command.
elif arg_last:
if __sessions__.find:
count = 1
for item in __sessions__.find:
if count == int(target):
__sessions__.new(get_sample_path(item.sha256))
break
count += 1
else:
print_warning("You haven't performed a find yet")
# Otherwise we assume it's an hash of an previously stored sample.
else:
target = argv[0].strip().lower()
if len(target) == 32:
key = 'md5'
elif len(target) == 64:
key = 'sha256'
else:
usage()
return
rows = self.db.find(key=key, value=target)
if not rows:
print_warning("No file found with the given hash {0}".format(target))
return
path = get_sample_path(rows[0].sha256)
if path:
__sessions__.new(path)
##
# CLOSE
#
# This command resets the open session.
# After that, all handles to the opened file should be closed and the
# shell should be restored to the default prompt.
def cmd_close(self, *args):
__sessions__.close()
##
# INFO
#
# This command returns information on the open session. It returns details
# on the file (e.g. hashes) and other information that might available from
# the database.
def cmd_info(self, *args):
if __sessions__.is_set():
print(table(
['Key', 'Value'],
[
('Name', __sessions__.current.file.name),
('Tags', __sessions__.current.file.tags),
('Path', __sessions__.current.file.path),
('Size', __sessions__.current.file.size),
('Type', __sessions__.current.file.type),
('Mime', __sessions__.current.file.mime),
('MD5', __sessions__.current.file.md5),
('SHA1', __sessions__.current.file.sha1),
('SHA256', __sessions__.current.file.sha256),
('SHA512', __sessions__.current.file.sha512),
('SSdeep', __sessions__.current.file.ssdeep),
('CRC32', __sessions__.current.file.crc32)
]
))
##
# NOTES
#
# This command allows you to view, add, modify and delete notes associated
# with the currently opened file.
def cmd_notes(self, *args):
def usage():
print("usage: notes [-h] [-l] [-a] [-e <note id>] [-d <note id>]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--list (-l)\tList all notes available for the current file")
print("\t--add (-a)\tAdd a new note to the current file")
print("\t--view (-v)\tView the specified note")
print("\t--edit (-e)\tEdit an existing note")
print("\t--delete (-d)\tDelete an existing note")
print("")
try:
opts, argv = getopt.getopt(args, 'hlav:e:d:', ['help', 'list', 'add', 'view=', 'edit=', 'delete='])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_list = False
arg_add = False
arg_view = None
arg_edit = None
arg_delete = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-l', '--list'):
arg_list = True
elif opt in ('-a', '--add'):
arg_add = True
elif opt in ('-v', '--view'):
arg_view = value
elif opt in ('-e', '--edit'):
arg_edit = value
elif opt in ('-d', '--delete'):
arg_delete = value
if not __sessions__.is_set():
print_error("No session opened")
return
if arg_list:
# Retrieve all notes for the currently opened file.
malware = Database().find(key='sha256', value=__sessions__.current.file.sha256)
if not malware:
print_error("The opened file doesn't appear to be in the database, have you stored it yet?")
return
notes = malware[0].note
if not notes:
print_info("No notes available for this file yet")
return
# Populate table rows.
rows = []
for note in notes:
rows.append([note.id, note.title])
# Display list of existing notes.
print(table(header=['ID', 'Title'], rows=rows))
elif arg_add:
title = raw_input("Enter a title for the new note: ")
# Create a new temporary file.
tmp = tempfile.NamedTemporaryFile(delete=False)
# Open the temporary file with the default editor, or with nano.
os.system('"${EDITOR:-nano}" ' + tmp.name)
# Once the user is done editing, we need to read the content and
# store it in the database.
body = tmp.read()
Database().add_note(__sessions__.current.file.sha256, title, body)
# Finally, remove the temporary file.
os.remove(tmp.name)
print_info("New note with title \"{0}\" added to the current file".format(bold(title)))
elif arg_view:
# Retrieve note wth the specified ID and print it.
note = Database().get_note(arg_view)
if note:
print_info(bold('Title: ') + note.title)
print_info(bold('Body:'))
print(note.body)
else:
print_info("There is no note with ID {0}".format(arg_view))
elif arg_edit:
# Retrieve note with the specified ID.
note = Database().get_note(arg_edit)
if note:
# Create a new temporary file.
tmp = tempfile.NamedTemporaryFile(delete=False)
# Write the old body to the temporary file.
tmp.write(note.body)
tmp.close()
# Open the old body with the text editor.
os.system('"${EDITOR:-nano}" ' + tmp.name)
# Read the new body from the temporary file.
body = open(tmp.name, 'r').read()
# Update the note entry with the new body.
Database().edit_note(arg_edit, body)
# Remove the temporary file.
os.remove(tmp.name)
print_info("Updated note with ID {0}".format(arg_edit))
elif arg_delete:
# Delete the note with the specified ID.
Database().delete_note(arg_delete)
else:
usage()
##
# STORE
#
# This command stores the opened file in the local repository and tries
# to store details in the database.
def cmd_store(self, *args):
def usage():
print("usage: store [-h] [-d] [-f <path>] [-s <size>] [-y <type>] [-n <name>] [-t]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--delete (-d)\tDelete the original file")
print("\t--folder (-f)\tSpecify a folder to import")
print("\t--file-size (-s)\tSpecify a maximum file size")
print("\t--file-type (-y)\tSpecify a file type pattern")
print("\t--file-name (-n)\tSpecify a file name pattern")
print("\t--tags (-t)\tSpecify a list of comma-separated tags")
print("")
try:
opts, argv = getopt.getopt(args, 'hdf:s:y:n:t:', ['help', 'delete', 'folder=', 'file-size=', 'file-type=', 'file-name=', 'tags='])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_delete = False
arg_folder = False
arg_file_size = None
arg_file_type = None
arg_file_name = None
arg_tags = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-d', '--delete'):
arg_delete = True
elif opt in ('-f', '--folder'):
arg_folder = value
elif opt in ('-s', '--file-size'):
arg_file_size = value
elif opt in ('-y', '--file-type'):
arg_file_type = value
elif opt in ('-n', '--file-name'):
arg_file_name = value
elif opt in ('-t', '--tags'):
arg_tags = value
def add_file(obj, tags=None):
if get_sample_path(obj.sha256):
print_warning("Skip, file \"{0}\" appears to be already stored".format(obj.name))
return False
# Try to store file object into database.
status = self.db.add(obj=obj, tags=tags)
if status:
# If succeeds, store also in the local repository.
# If something fails in the database (for example unicode strings)
# we don't want to have the binary lying in the repository with no
# associated database record.
new_path = store_sample(obj)
print_success("Stored file \"{0}\" to {1}".format(obj.name, new_path))
else:
return False
# Delete the file if requested to do so.
if arg_delete:
try:
os.unlink(obj.path)
except Exception as e:
print_warning("Failed deleting file: {0}".format(e))
return True
# If the user specified the --folder flag, we walk recursively and try
# to add all contained files to the local repository.
# This is note going to open a new session.
# TODO: perhaps disable or make recursion optional?
if arg_folder:
# Check if the specified folder is valid.
if os.path.isdir(arg_folder):
# Walk through the folder and subfolders.
for dir_name, dir_names, file_names in os.walk(arg_folder):
# Add each collected file.
for file_name in file_names:
file_path = os.path.join(dir_name, file_name)
if not os.path.exists(file_path):
continue
# Check if file is not zero.
if not os.path.getsize(file_path) > 0:
continue
# Check if the file name matches the provided pattern.
if arg_file_name:
if not fnmatch.fnmatch(file_name, arg_file_name):
#print_warning("Skip, file \"{0}\" doesn't match the file name pattern".format(file_path))
continue
# Check if the file type matches the provided pattern.
if arg_file_type:
if arg_file_type not in File(file_path).type:
#print_warning("Skip, file \"{0}\" doesn't match the file type".format(file_path))
continue
# Check if file exceeds maximum size limit.
if arg_file_size:
# Obtain file size.
if os.path.getsize(file_path) > arg_file_size:
print_warning("Skip, file \"{0}\" is too big".format(file_path))
continue
file_obj = File(file_path)
# Add file.
add_file(file_obj, arg_tags)
else:
print_error("You specified an invalid folder: {0}".format(arg_folder))
# Otherwise we try to store the currently opened file, if there is any.
else:
if __sessions__.is_set():
if __sessions__.current.file.size == 0:
print_warning("Skip, file \"{0}\" appears to be empty".format(__sessions__.current.file.name))
return False
# Add file.
if add_file(__sessions__.current.file, arg_tags):
# Open session to the new file.
self.cmd_open(*[__sessions__.current.file.sha256])
else:
print_error("No session opened")
##
# DELETE
#
# This commands deletes the currenlty opened file (only if it's stored in
# the local repository) and removes the details from the database
def cmd_delete(self, *args):
if __sessions__.is_set():
while True:
choice = raw_input("Are you sure you want to delete this binary? Can't be reverted! [y/n] ")
if choice == 'y':
break
elif choice == 'n':
return
rows = self.db.find('sha256', __sessions__.current.file.sha256)
if rows:
malware_id = rows[0].id
if self.db.delete(malware_id):
print_success("File deleted")
else:
print_error("Unable to delete file")
os.remove(__sessions__.current.file.path)
__sessions__.close()
else:
print_error("No session opened")
##
# FIND
#
# This command is used to search for files in the database.
def cmd_find(self, *args):
def usage():
print("usage: find [-h] [-t] <all|latest|name|type|mime|md5|sha256|tag|note> <value>")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--tags (-t)\tList tags")
print("")
try:
opts, argv = getopt.getopt(args, 'ht', ['help', 'tags'])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_list_tags = False
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-t', '--tags'):
arg_list_tags = True
# One of the most useful search terms is by tag. With the --tags
# argument we first retrieve a list of existing tags and the count
# of files associated with each of them.
if arg_list_tags:
# Retrieve list of tags.
tags = self.db.list_tags()
if tags:
rows = []
# For each tag, retrieve the count of files associated with it.
for tag in tags:
count = len(self.db.find('tag', tag.tag))
rows.append([tag.tag, count])
# Generate the table with the results.
header = ['Tag', '# Entries']
rows.sort(key=lambda x: x[1], reverse=True)
print(table(header=header, rows=rows))
else:
print("No tags available")
return
# At this point, if there are no search terms specified, return.
if len(args) == 0:
usage()
return
# The first argument is the search term (or "key").
key = args[0]
if key != 'all' and key != 'latest':
try:
# The second argument is the search value.
value = args[1]
except IndexError:
print_error("You need to include a search term.")
return
else:
value = None
# Search all the files matching the given parameters.
items = self.db.find(key, value)
if not items:
return
# Populate the list of search results.
rows = []
count = 1
for item in items:
tag = ', '.join([t.tag for t in item.tag if t.tag])
row = [count, item.name, item.mime, item.md5, tag]
if key == 'latest':
row.append(item.created_at)
rows.append(row)
count += 1
# Update find results in current session.
__sessions__.find = items
# Generate a table with the results.
header = ['#', 'Name', 'Mime', 'MD5', 'Tags']
if key == 'latest':
header.append('Created At')
print(table(header=header, rows=rows))
##
# TAGS
#
# This command is used to modify the tags of the opened file.
def cmd_tags(self, *args):
def usage():
print("usage: tags [-h] [-a=tags] [-d=tag]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--add (-a)\tAdd tags to the opened file (comma separated)")
print("\t--delete (-d)\tDelete a tag from the opened file")
print("")
try:
opts, argv = getopt.getopt(args, 'ha:d:', ['help', 'add=', 'delete='])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_add = None
arg_delete = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-a', '--add'):
arg_add = value
elif opt in ('-d', '--delete'):
arg_delete = value
# This command requires a session to be opened.
if not __sessions__.is_set():
print_error("No session opened")
return
# If no arguments are specified, there's not much to do.
# However, it could make sense to also retrieve a list of existing
# tags from this command, and not just from the "find" command alone.
if not arg_add and not arg_delete:
usage()
return
# TODO: handle situation where addition or deletion of a tag fail.
if arg_add:
# Add specified tags to the database's entry belonging to
# the opened file.
db = Database()
db.add_tags(__sessions__.current.file.sha256, arg_add)
print_info("Tags added to the currently opened file")
# We refresh the opened session to update the attributes.
# Namely, the list of tags returned by the "info" command
# needs to be re-generated, or it wouldn't show the new tags
# until the existing session is closed a new one is opened.
print_info("Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
if arg_delete:
# Delete the tag from the database.
Database().delete_tag(arg_delete)
# Refresh the session so that the attributes of the file are
# updated.
print_info("Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
###
# SESSION
#
# This command is used to list and switch across all the opened sessions.
def cmd_sessions(self, *args):
def usage():
print("usage: sessions [-h] [-l] [-s=session]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--list (-l)\tList all existing sessions")
print("\t--switch (-s)\tSwitch to the specified session")
print("")
try:
opts, argv = getopt.getopt(args, 'hls:', ['help', 'list', 'switch='])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_list = False
arg_switch = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-l', '--list'):
arg_list = True
elif opt in ('-s', '--switch'):
arg_switch = int(value)
if arg_list:
if not __sessions__.sessions:
print_info("There are no opened sessions")
return
rows = []
for session in __sessions__.sessions:
current = ''
if session == __sessions__.current:
current = 'Yes'
rows.append([
session.id,
session.file.name,
session.file.md5,
session.created_at,
current
])
print_info("Opened Sessions:")
print(table(header=['#', 'Name', 'MD5', 'Created At', 'Current'], rows=rows))
return
elif arg_switch:
for session in __sessions__.sessions:
if arg_switch == session.id:
__sessions__.switch(session)
return
print_warning("The specified session ID doesn't seem to exist")
return
usage()
##
# PROJECTS
#
# This command retrieves a list of all projects.
# You can also switch to a different project.
def cmd_projects(self, *args):
def usage():
print("usage: projects [-h] [-l] [-s=project]")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--list (-l)\tList all existing projects")
print("\t--switch (-s)\tSwitch to the specified project")
print("")
try:
opts, argv = getopt.getopt(args, 'hls:', ['help', 'list', 'switch='])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_list = False
arg_switch = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-l', '--list'):
arg_list = True
elif opt in ('-s', '--switch'):
arg_switch = value
projects_path = os.path.join(os.getcwd(), 'projects')
if not os.path.exists(projects_path):
print_info("The projects directory does not exist yet")
return
if arg_list:
print_info("Projects Available:")
rows = []
for project in os.listdir(projects_path):
project_path = os.path.join(projects_path, project)
if os.path.isdir(project_path):
current = ''
if __project__.name and project == __project__.name:
current = 'Yes'
rows.append([project, time.ctime(os.path.getctime(project_path)), current])
print(table(header=['Project Name', 'Creation Time', 'Current'], rows=rows))
return
elif arg_switch:
if __sessions__.is_set():
__sessions__.close()
print_info("Closed opened session")
__project__.open(arg_switch)
print_info("Switched to project {0}".format(bold(arg_switch)))
# Need to re-initialize the Database to open the new SQLite file.
self.db = Database()
return
usage()
##
# EXPORT
#
# This command will export the current session to file or zip.
def cmd_export(self, *args):
def usage():
print("usage: export [-h] [-z] <path or archive name>")
def help():
usage()
print("")
print("Options:")
print("\t--help (-h)\tShow this help message")
print("\t--zip (-z)\tExport session in a zip archive")
print("")
try:
opts, argv = getopt.getopt(args, 'hz', ['help', 'zip'])
except getopt.GetoptError as e:
print(e)
usage()
return
arg_zip = False
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-z', '--zip'):
arg_zip = True
# This command requires a session to be opened.
if not __sessions__.is_set():
print_error("No session opened")
return
# Check for valid export path.
if len(args) ==0:
usage()
return
# TODO: having for one a folder and for the other a full
# target path can be confusing. We should perhaps standardize this.
# Abort if the specified path already exists.
if os.path.isfile(argv[0]):
print_error("File at path \"{0}\" already exists, abort".format(argv[0]))
return
# If the argument chosed so, archive the file when exporting it.
# TODO: perhaps add an option to use a password for the archive
# and default it to "infected".
if arg_zip:
try:
with ZipFile(argv[0], 'w') as export_zip:
export_zip.write(__sessions__.current.file.path, arcname=__sessions__.current.file.name)
except IOError as e:
print_error("Unable to export file: {0}".format(e))
else:
print_info("File archived and exported to {0}".format(argv[0]))
# Otherwise just dump it to the given directory.
else:
# XXX: Export file with the original file name.
store_path = os.path.join(argv[0], __sessions__.current.file.name)
try:
shutil.copyfile(__sessions__.current.file.path, store_path)
except IOError as e:
print_error("Unable to export file: {0}".format(e))
else:
print_info("File exported to {0}".format(store_path))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from swift.common import utils as swift_utils
from swift.common.middleware import acl as swift_acl
from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middleware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped directly with keystone it
does not have any other dependences than itself so you can either
install it by copying the file directly in your python path or by
installing keystone.
If support is required for unvalidated users (as with anonymous
access) or for tempurl/formpost middleware, authtoken will need
to be configured with delay_auth_decision set to 1. See the
Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server] account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
This maps tenants to account in Swift.
The user whose able to give ACL / create Containers permissions
will be the one that are inside the operator_roles
setting which by default includes the admin and the swiftoperator
roles.
The option is_admin if set to true will allow the
username that has the same name as the account name to be the owner.
Example: If we have the account called hellocorp with a user
hellocorp that user will be admin on that account and can give ACL
to all other users for hellocorp.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
reseller_prefix in your keystoneauth entry like this :
reseller_prefix = NEWAUTH_
Make sure you have a underscore at the end of your new
reseller_prefix option.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_').strip()
self.operator_roles = conf.get('operator_roles',
'admin, swiftoperator')
self.reseller_admin_role = conf.get('reseller_admin_role',
'ResellerAdmin')
config_is_admin = conf.get('is_admin', "false").lower()
self.is_admin = swift_utils.config_true_value(config_is_admin)
config_overrides = conf.get('allow_overrides', 't').lower()
self.allow_overrides = swift_utils.config_true_value(config_overrides)
def __call__(self, environ, start_response):
identity = self._keystone_identity(environ)
# Check if one of the middleware like tempurl or formpost have
# set the swift.authorize_override environ and want to control the
# authentication
if (self.allow_overrides and
environ.get('swift.authorize_override', False)):
msg = 'Authorizing from an overriding middleware (i.e: tempurl)'
self.logger.debug(msg)
return self.app(environ, start_response)
if identity:
self.logger.debug('Using identity: %r' % (identity))
environ['keystone.identity'] = identity
environ['REMOTE_USER'] = identity.get('tenant')
environ['swift.authorize'] = self.authorize
else:
self.logger.debug('Authorizing as anonymous')
environ['swift.authorize'] = self.authorize_anonymous
environ['swift.clean_acl'] = swift_acl.clean_acl
return self.app(environ, start_response)
def _keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = []
if 'HTTP_X_ROLES' in environ:
roles = environ['HTTP_X_ROLES'].split(',')
identity = {'user': environ.get('HTTP_X_USER_NAME'),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles}
return identity
def _get_account_for_tenant(self, tenant_id):
return '%s%s' % (self.reseller_prefix, tenant_id)
def _reseller_check(self, account, tenant_id):
"""Check reseller prefix."""
return account == self._get_account_for_tenant(tenant_id)
def _authorize_cross_tenant(self, user, tenant_id, tenant_name, roles):
""" Check cross-tenant ACLs
Match tenant_id:user, tenant_name:user, and *:user.
:param user: The user name from the identity token.
:param tenant_id: The tenant ID from the identity token.
:param tenant_name: The tenant name from the identity token.
:param roles: The given container ACL.
:returns: True if tenant_id:user, tenant_name:user, or *:user matches
the given ACL. False otherwise.
"""
wildcard_tenant_match = '*:%s' % (user)
tenant_id_user_match = '%s:%s' % (tenant_id, user)
tenant_name_user_match = '%s:%s' % (tenant_name, user)
return (wildcard_tenant_match in roles
or tenant_id_user_match in roles
or tenant_name_user_match in roles)
def authorize(self, req):
env = req.environ
env_identity = env.get('keystone.identity', {})
tenant_id, tenant_name = env_identity.get('tenant')
user = env_identity.get('user', '')
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
user_roles = env_identity.get('roles', [])
# Give unconditional access to a user with the reseller_admin
# role.
if self.reseller_admin_role in user_roles:
msg = 'User %s has reseller admin authorizing'
self.logger.debug(msg % tenant_id)
req.environ['swift_owner'] = True
return
# cross-tenant authorization
if self._authorize_cross_tenant(user, tenant_id, tenant_name, roles):
log_msg = 'user %s:%s, %s:%s, or *:%s allowed in ACL authorizing'
self.logger.debug(log_msg % (tenant_name, user,
tenant_id, user, user))
return
# Check if a user tries to access an account that does not match their
# token
if not self._reseller_check(account, tenant_id):
log_msg = 'tenant mismatch: %s != %s' % (account, tenant_id)
self.logger.debug(log_msg)
return self.denied_response(req)
# Check the roles the user is belonging to. If the user is
# part of the role defined in the config variable
# operator_roles (like admin) then it will be
# promoted as an admin of the account/tenant.
for role in self.operator_roles.split(','):
role = role.strip()
if role in user_roles:
log_msg = 'allow user with role %s as account admin' % (role)
self.logger.debug(log_msg)
req.environ['swift_owner'] = True
return
# If user is of the same name of the tenant then make owner of it.
if self.is_admin and user == tenant_name:
req.environ['swift_owner'] = True
return
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if authorized:
return
elif authorized is not None:
return self.denied_response(req)
# Check if we have the role in the userroles and allow it
for user_role in user_roles:
if user_role in roles:
log_msg = 'user %s:%s allowed in ACL: %s authorizing'
self.logger.debug(log_msg % (tenant_name, user, user_role))
return
return self.denied_response(req)
def authorize_anonymous(self, req):
"""
Authorize an anonymous request.
:returns: None if authorization is granted, an error page otherwise.
"""
try:
part = req.split_path(1, 4, True)
version, account, container, obj = part
except ValueError:
return HTTPNotFound(request=req)
#allow OPTIONS requests to proceed as normal
if req.method == 'OPTIONS':
return
is_authoritative_authz = (account and
account.startswith(self.reseller_prefix))
if not is_authoritative_authz:
return self.denied_response(req)
referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None))
authorized = self._authorize_unconfirmed_identity(req, obj, referrers,
roles)
if not authorized:
return self.denied_response(req)
def _authorize_unconfirmed_identity(self, req, obj, referrers, roles):
""""
Perform authorization for access that does not require a
confirmed identity.
:returns: A boolean if authorization is granted or denied. None if
a determination could not be made.
"""
# Allow container sync.
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
log_msg = 'allowing proxy %s for container-sync' % req.remote_addr
self.logger.debug(log_msg)
return True
# Check if referrer is allowed.
if swift_acl.referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in roles:
log_msg = 'authorizing %s via referer ACL' % req.referrer
self.logger.debug(log_msg)
return True
return False
def denied_response(self, req):
"""Deny WSGI Response.
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter
| |
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import os
import re
import select
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
from mcrouter.test.config import McrouterGlobals
class BaseDirectory(object):
def __init__(self, prefix="mctest"):
self.path = tempfile.mkdtemp(prefix=prefix + '.')
def __del__(self):
shutil.rmtree(self.path)
def MCPopen(cmd, stdout=None, stderr=None, env=None):
return subprocess.Popen(cmd, stdout=stdout, stderr=stderr, env=env)
class MCProcess(object):
"""
It would be best to use mc.client and support all requests. But we can't do
that until mc.client supports ASCII (because mcproxy doesn't support
binary). For now, be hacky and just talk ASCII by hand.
"""
proc = None
def __init__(self, cmd, port, base_dir=None, junk_fill=False):
port = int(port)
if base_dir is None:
base_dir = BaseDirectory('MCProcess')
self.base_dir = base_dir
self.stdout = os.path.join(base_dir.path, 'stdout')
self.stderr = os.path.join(base_dir.path, 'stderr')
stdout = open(self.stdout, 'w')
stderr = open(self.stderr, 'w')
if cmd:
for command in cmd:
if command == 'python':
continue
if command.startswith('-'):
continue
command = os.path.basename(command)
break
try:
if junk_fill:
env = dict(MALLOC_CONF='junk:true')
else:
env = None
self.proc = MCPopen(cmd, stdout, stderr, env)
except OSError:
sys.exit("Fatal: Could not run " + repr(" ".join(cmd)))
else:
self.proc = None
self.addr = ('localhost', port)
self.port = port
self.sets = 0
self.gets = 0
self.deletes = 0
self.others = 0
def getprocess(self):
return self.proc
def pause(self):
if self.proc:
self.proc.send_signal(signal.SIGSTOP)
def resume(self):
if self.proc:
self.proc.send_signal(signal.SIGCONT)
def getport(self):
return self.port
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(self.addr)
self.fd = self.socket.makefile()
def ensure_connected(self):
while True:
try:
self.connect()
return
except Exception as e:
if e.errno == errno.ECONNREFUSED:
pass
else:
raise
def disconnect(self):
try:
self.socket.close()
except IOError:
pass
try:
self.fd.close()
except IOError:
pass
self.fd = self.socket = None
def terminate(self):
if not self.proc:
return None
self.disconnect()
self.dump()
proc = self.proc
if self.proc:
if self.proc.returncode is None:
self.proc.terminate()
self.proc.wait()
self.proc = None
return proc
def is_alive(self):
self.proc.poll()
return self.proc.returncode is None
def dump(self):
""" dump stderr, stdout, and the log file to stdout with nice headers.
This allows us to get all this information in a test failure (hidden by
default) so we can debug better. """
# Grumble... this would be so much easier if I could just pass
# sys.stdout/stderr to Popen.
with open(self.stdout, 'r') as stdout_f:
stdout = stdout_f.read()
with open(self.stderr, 'r') as stderr_f:
stderr = stderr_f.read()
if hasattr(self, 'log'):
print(self.base_dir)
try:
with open(self.log, 'r') as log_f:
log = log_f.read()
except:
log = ""
else:
log = ""
if log:
print("%s stdout:\n%s" % (self, log))
if stdout:
print("%s stdout:\n%s" % (self, stdout))
if stderr:
print("%s stdout:\n%s" % (self, stderr))
def __del__(self):
if self.proc:
self.proc.terminate()
def get(self, keys, return_all_info=False):
multi = True
if not isinstance(keys, list):
multi = False
keys = [keys]
self.gets += len(keys)
self.socket.sendall("get %s\r\n" % " ".join(keys))
res = dict([(key, None) for key in keys])
while True:
l = self.fd.readline().strip()
if l == 'END':
if multi:
return res
else:
assert len(res) == 1
return res.values()[0]
elif l.startswith("VALUE"):
v, k, f, n = l.split()
assert k in keys
payload = self.fd.read(int(n))
self.fd.read(2)
if (return_all_info):
res[k] = dict({"key": k,
"flags": int(f),
"size": int(n),
"value": payload})
else:
res[k] = payload
elif l.startswith("SERVER_ERROR"):
return l
else:
self.connect()
raise Exception('Unexpected response "%s" (%s)' % (l, keys))
def metaget(self, keys):
## FIXME: Not supporting multi-metaget yet
#multi = True
#if not instance(keys, list):
# multi = False
# keys = [keys]
#self.gets += len(keys)
res = {}
self.gets += 1
self.socket.sendall("metaget %s\r\n" % keys)
while True:
l = self.fd.readline().strip()
if l.startswith("END"):
return res
elif l.startswith("META"):
meta_list = l.split()
for i in range(1, len(meta_list) // 2):
res[meta_list[2 * i].strip(':')] = \
meta_list[2 * i + 1].strip(';')
def leaseGet(self, keys):
multi = True
if not isinstance(keys, list):
multi = False
keys = [keys]
self.gets += len(keys)
self.socket.sendall("lease-get %s\r\n" % " ".join(keys))
res = dict([(key, None) for key in keys])
while True:
l = self.fd.readline().strip()
if l == 'END':
if multi:
assert(len(res) == len(keys))
return res
else:
assert len(res) == 1
return res.values()[0]
elif l.startswith("VALUE"):
v, k, f, n = l.split()
assert k in keys
res[k] = {"value": self.fd.read(int(n)),
"token": None}
self.fd.read(2)
elif l.startswith("LVALUE"):
v, k, t, f, n = l.split()
assert k in keys
res[k] = {"value": self.fd.read(int(n)),
"token": int(t)}
def expectNoReply(self):
self.socket.settimeout(0.5)
try:
self.socket.recv(1)
return False
except socket.timeout:
pass
return True
def _set(self, command, key, value, replicate=False, noreply=False):
self.sets += 1
value = str(value)
flags = 1024 if replicate else 0
self.socket.sendall("%s %s %d 0 %d%s\r\n%s\r\n" %
(command, key, flags, len(value),
(' noreply' if noreply else ''), value))
if noreply:
return self.expectNoReply()
answer = self.fd.readline().strip()
if re.search('ERROR', answer):
print(answer)
self.connect()
return None
return re.match("STORED", answer)
def leaseSet(self, key, value_token, is_stalestored=False):
self.sets += 1
value = str(value_token["value"])
token = int(value_token["token"])
flags = 0
cmd = "lease-set %s %d %d 0 %d\r\n%s\r\n" % \
(key, token, flags, len(value), value)
self.socket.sendall(cmd)
answer = self.fd.readline().strip()
if re.search('ERROR', answer):
print(answer)
self.connect()
return None
if is_stalestored:
return re.match("STALE_STORED", answer)
return re.match("STORED", answer)
def set(self, key, value, replicate=False, noreply=False):
return self._set("set", key, value, replicate, noreply)
def add(self, key, value, replicate=False, noreply=False):
return self._set("add", key, value, replicate, noreply)
def replace(self, key, value, replicate=False, noreply=False):
return self._set("replace", key, value, replicate, noreply)
def delete(self, key, noreply=False):
self.socket.sendall("delete %s%s\r\n" %
(key, (' noreply' if noreply else '')))
self.deletes += 1
if noreply:
return self.expectNoReply()
answer = self.fd.readline()
assert re.match("DELETED|NOT_FOUND|SERVER_ERROR", answer), answer
return re.match("DELETED", answer)
def incr(self, key, value=1, noreply=False):
self.socket.sendall("incr %s %d%s\r\n" %
(key, value, (' noreply' if noreply else '')))
self.sets += 1
if noreply:
return self.expectNoReply()
answer = self.fd.readline()
if re.match("NOT_FOUND", answer):
return None
else:
return int(answer)
def decr(self, key, value=1, noreply=False):
self.socket.sendall("decr %s %d%s\r\n" %
(key, value, (' noreply' if noreply else '')))
self.sets += 1
if noreply:
return self.expectNoReply()
answer = self.fd.readline()
if re.match("NOT_FOUND", answer):
return None
else:
return int(answer)
def stats(self, spec=None):
q = 'stats\r\n'
if spec:
q = 'stats {0}\r\n'.format(spec)
self.socket.sendall(q)
s = {}
l = None
fds = select.select([self.fd], [], [], 2.0)
if len(fds[0]) == 0:
return None
while l != 'END':
l = self.fd.readline().strip()
a = l.split(None, 2)
if len(a) == 3:
s[a[1]] = a[2]
return s
def issue_command(self, command):
self.others += 1
self.socket.sendall(command)
answer = self.fd.readline()
return answer
def version(self):
self.socket.sendall("version\r\n")
return self.fd.readline()
def shutdown(self):
self.socket.sendall("shutdown\r\n")
return self.fd.readline()
def flush_all(self, delay=None):
if delay is None:
self.socket.sendall("flush_all\r\n")
else:
self.socket.sendall("flush_all {}\r\n".format(delay))
return self.fd.readline().rstrip()
def sub_port(s, substitute_ports, port_map):
parts = s.split(':')
if len(parts) < 2:
return s
for i in (-1, -2):
try:
port = int(parts[i])
if port not in port_map:
if len(port_map) < len(substitute_ports):
if isinstance(substitute_ports, list):
port_map[port] = substitute_ports[len(port_map)]
else:
if port not in substitute_ports:
raise Exception(
"Port %s not in substitute port map" % port)
port_map[port] = substitute_ports[port]
else:
raise Exception("Looking up port %d: config file has more"
" ports specified than the number of"
" mock servers started" % port)
parts[i] = str(port_map[port])
except (IndexError, ValueError):
pass
return ':'.join(parts)
def replace_ports(json, substitute_ports):
"""In string json (which must be a valid JSON string), replace all ports in
strings of the form "host:port" with ports from the list or map
substitute_ports.
If list, each new distinct port from the json will be replaced from the
next port from the list.
If map of the form (old_port: new_port), replaces all old_ports with
new_ports.
"""
NORMAL = 0
STRING = 1
ESCAPE = 2
state = NORMAL
out = ""
s = ""
port_map = {}
for c in json:
if state == NORMAL:
out += c
if c == '"':
s = ""
state = STRING
elif state == STRING:
if c == '\\':
s += c
state = ESCAPE
elif c == '"':
out += sub_port(s, substitute_ports, port_map)
out += c
state = NORMAL
else:
s += c
elif state == ESCAPE:
s += c
state = NORMAL
if len(port_map) < len(substitute_ports):
raise Exception("Config file has fewer ports specified than the number"
" of mock servers started")
return out
def replace_strings(json, replace_map):
for (key, value) in replace_map.items():
json = json.replace(key, str(value))
return json
def create_listen_socket():
if socket.has_ipv6:
listen_sock = socket.socket(socket.AF_INET6)
else:
listen_sock = socket.socket(socket.AF_INET)
listen_sock.listen(100)
return listen_sock
class Mcrouter(MCProcess):
def __init__(self, config, port=None, default_route=None, extra_args=None,
base_dir=None, substitute_config_ports=None,
substitute_port_map=None, replace_map=None):
if base_dir is None:
base_dir = BaseDirectory('mcrouter')
self.base_dir = base_dir
self.log = os.path.join(self.base_dir.path, 'mcrouter.log')
self.async_spool = os.path.join(self.base_dir.path, 'spool')
os.mkdir(self.async_spool)
if replace_map:
with open(config, 'r') as config_file:
replaced_config = replace_strings(config_file.read(),
replace_map)
(_, config) = tempfile.mkstemp(dir=self.base_dir.path)
with open(config, 'w') as config_file:
config_file.write(replaced_config)
if substitute_config_ports:
with open(config, 'r') as config_file:
replaced_config = replace_ports(config_file.read(),
substitute_config_ports)
(_, config) = tempfile.mkstemp(dir=self.base_dir.path)
with open(config, 'w') as config_file:
config_file.write(replaced_config)
self.config = config
args = [McrouterGlobals.InstallDir + '/mcrouter/mcrouter', '-d',
'-f', config,
'-L', self.log,
'-a', self.async_spool]
listen_sock = None
if port is None:
listen_sock = create_listen_socket()
port = listen_sock.getsockname()[1]
args.extend(['--listen-sock-fd', str(listen_sock.fileno())])
else:
args.extend(['-p', str(port)])
if default_route:
args.extend(['-R', default_route])
if extra_args:
args.extend(extra_args)
if '-b' in args:
pid_file = os.path.join(self.base_dir.path, 'mcrouter.pid')
args.extend(['-P', pid_file])
def get_pid():
with open(pid_file, 'r') as pid_f:
pid = pid_f.read().strip()
if not pid:
return pid
return int(pid)
def terminate():
pid = get_pid()
if pid:
os.kill(pid, signal.SIGTERM)
def is_alive():
pid = get_pid()
if pid:
return os.path.exists("/proc/{}".format(pid))
return False
self.terminate = terminate
self.is_alive = is_alive
args = McrouterGlobals.preprocessArgs(args)
MCProcess.__init__(self, args, port, self.base_dir, junk_fill=True)
if listen_sock is not None:
listen_sock.close()
def get_async_spool_dir(self):
return self.async_spool
def change_config(self, new_config_path):
shutil.copyfile(new_config_path, self.config)
def check_in_log(self, needle):
return needle in open(self.log).read()
class McrouterClient(MCProcess):
def __init__(self, port):
MCProcess.__init__(self, None, str(port))
class Memcached(MCProcess):
def __init__(self, port=None):
args = [McrouterGlobals.InstallDir +
'/mcrouter/lib/network/mock_mc_server']
listen_sock = None
if port is None:
listen_sock = create_listen_socket()
port = listen_sock.getsockname()[1]
args.extend(['-t', str(listen_sock.fileno())])
else:
args.extend(['-P', str(port)])
MCProcess.__init__(self, args, port)
if listen_sock is not None:
listen_sock.close()
| |
# -*- coding: utf-8 -*-
u"""Flash Config parser.
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdp, pkdlog
from sirepo.template import template_common
from sirepo.template import flash_views
import os.path
import re
class ConfigParser():
# D <name> <comment> or D & <comment>
# DATAFILES <wildcard>
# LINKIF <filename> <unitename>
# MASS_SCALAR <name> (EOSMAP: <eosrole> | (EOSMAPIN: <eosrole>)? (EOSMAPOUT: <eosrole>)?)?
# PARAMETER <name> <type> CONSTANT? <default> <range spec>?
# PARTICLEMAP TO <partname> FROM <vartype> <varname>
# PARTICLEPROP <name> <type>
# PARTICLETYPE <particletype> INITMETHOD <initmethod> MAPMETHOD <mapmethod> ADVMETHOD <advmethod>
# PPDEFINE <sym> <val>?
# REQUESTS <unit>
# REQUIRES <unit>
# SCRATCHCENTERVAR <name>
# SPECIES <name> (TO <number of ions>)?
# USESETUPVARS <var>
# VARIABLE <name> (TYPE: <vartype)
# (IF, ELSEIF, ELSE, ENDIF)
def parse(self, config_text):
idx = 1
self.stack = [PKDict(statements=[])]
for line in config_text.split('\n'):
line = re.sub(r'#.*$', '', line)
line = re.sub(r'(TYPE:)(\S)', r'\1 \2', line)
p = line.split()
if not p:
continue
method = f'_parse_{p[0].lower()}'
if not hasattr(self, method):
pkdlog('skipping line={}', line)
continue
m = PKDict(
_id=idx,
_type=p[0],
)
idx += 1
item = getattr(self, method)(p, m)
if item:
self.stack[-1].statements.append(item)
assert len(self.stack) == 1, \
'improper IF/ENDIF nesting'
return self.__move_descriptions_to_parameters(self.stack[0])
def _parse_d(self, parts, model):
if parts[1] == '&':
prev = self.stack[-1].statements[-1]
assert prev._type == 'D', \
'expected multiline description for D &'
prev.comment += ' {}'.format(' '.join(parts[2:]))
return None
return model.pkupdate(
name=parts[1],
comment=' '.join(parts[2:]),
)
def _parse_datafiles(self, parts, model):
return model.pkupdate(
wildcard=parts[1],
)
def _parse_else(self, parts, model):
self.stack.pop()
self.__new_stack(model)
return None
def _parse_elseif(self, parts, model):
self.stack.pop()
return self._parse_if(parts, model)
def _parse_endif(self, parts, model):
self.stack.pop()
return model
def _parse_if(self, parts, model):
self.__new_stack(model, parts[1])
return None
def _parse_linkif(self, parts, model):
return model.pkupdate(
filename=parts[1],
unitname=parts[2],
)
def _parse_mass_scalar(self, parts, model):
for i in range(2, len(parts), 2):
n = parts[i]
m = re.search(r'^(EOSMAP(IN|OUT)?):$', n)
assert m, f'unknown MASS_SCALAR arg: {n}'
model[m.group(1).lower()] = parts[i + 1]
return model.pkupdate(
name=parts[1],
)
def _parse_parameter(self, parts, model):
assert re.search(r'^(REAL|INTEGER|STRING|BOOLEAN)$', parts[2]), \
f'invalid Config type: {parts[2]}'
if parts[3] == 'CONSTANT':
del parts[3]
model.isConstant = '1'
else:
model.isConstant = '0'
model.range = ''
if len(parts) > 4:
model.range = re.sub(r'\[|\]', '', ' '.join(parts[4:]))
return model.pkupdate(
name=parts[1],
type=parts[2],
default=re.sub(r'"', '', parts[3]),
)
def _parse_particlemap(self, parts, model):
assert parts[1] == 'TO' and parts[3] == 'FROM', \
f'invalid PARTICLEMAP def: {" ".join(parts)}'
return model.pkupdate(
partname=parts[2],
pvartype=parts[4],
varname=parts[5],
)
def _parse_particleprop(self, parts, model):
return model.pkupdate(
name=parts[1],
type=parts[2],
)
def _parse_particletype(self, parts, model):
assert parts[2] == 'INITMETHOD' and parts[4] == 'MAPMETHOD', \
f'invalid PARTICLETYPE def: {" ".join(parts)}'
return model.pkupdate(
particletype=parts[1],
initmethod=parts[3],
mapmethod=parts[5],
advmethod=parts[7] if len(parts) > 6 and parts[6] == 'ADVMETHOD' else '',
)
def _parse_ppdefine(self, parts, model):
return model.pkupdate(
sym=parts[1],
val=' '.join(parts[2:]) if len(parts) > 2 else '',
)
def _parse_requires(self, parts, model):
return model.pkupdate(
unit=parts[1],
)
def _parse_requests(self, parts, model):
return model.pkupdate(
unit=parts[1],
)
def _parse_scratchcentervar(self, parts, model):
return model.pkupdate(
name=parts[1],
)
def _parse_species(self, parts, model):
if len(parts) > 2:
assert parts[2] == 'TO', \
f'invalid SPECIES: {" ".join(parts)}'
model.numberOfIons = parts[3]
return model.pkupdate(
name=parts[1],
)
def _parse_usesetupvars(self, parts, model):
return model.pkupdate(
vars=' '.join(parts[1:])
)
def _parse_variable(self, parts, model):
model.vartype = ''
if len(parts) > 2:
assert parts[2] == 'TYPE:', \
f'invalid VARIABLE line: {" ".join(parts)}'
model.vartype = parts[3]
return model.pkupdate(
name=parts[1],
)
def __move_descriptions_to_parameters(self, item):
def _find_all(item, search_type, do_remove=False, res=None):
if res is None:
res = PKDict()
if '_type' in item and item._type == search_type:
res[item.name] = item
if 'statements' in item:
for stmt in item.statements:
_find_all(stmt, search_type, do_remove, res)
if do_remove:
item.statements = list(filter(lambda x: x._type != search_type, item.statements))
return res
descriptions = _find_all(item, 'D', do_remove=True)
parameters = _find_all(item, 'PARAMETER')
for name in descriptions:
if name in parameters:
parameters[name].comment = descriptions[name].comment
return item.statements
def __new_stack(self, model, condition=None):
self.stack[-1].statements.append(model.pkupdate(
statements=[],
))
if condition:
model.condition = condition
self.stack.append(model)
class ParameterParser():
def parse(self, sim_in, par_text):
self.schema = sim_in.models.flashSchema
self.field_map = self.__field_to_model_map()
return self.__parse_values(self.__parse_text(par_text))
def __field_to_model_map(self):
res = PKDict()
for m in self.schema.model:
for f in self.schema.model[m]:
res[f.lower()] = [m, f]
return res
def __parse_text(self, par_text):
res = PKDict()
for line in par_text.split('\n'):
line = re.sub(r'#.*$', '', line)
m = re.search(r'^(\w.*?)\s*=\s*(.*?)\s*$', line)
if m:
f, v = m.group(1, 2)
res[f.lower()] = v
return res
def __parse_values(self, fields):
enum_map = PKDict()
for (e, items) in self.schema.enum.items():
enum_map[e] = PKDict({v[0].lower(): v[0] for v in items})
res = PKDict()
for (f, v) in fields.items():
if f not in self.field_map:
pkdlog(f'Unknown field: {f}: {v}')
continue
m, fn = self.field_map[f]
ftype = self.schema.model[m][fn][1]
if ftype in self.schema.enum:
v = re.sub(r'"', '', v)
assert v.lower() in enum_map[ftype], \
f'Unknown enum value for field: {ftype}: {v}'
res[fn] = enum_map[ftype][v.lower()]
elif ftype == 'Boolean':
v = SetupParameterParser.remove_quotes(v)
m = re.search(r'^\.(true|false)\.$', v, re.IGNORECASE)
assert m, f'invalid boolean for field {f}: {v}'
res[fn] = '1' if m.group(1).lower() == 'true' else '0'
else:
res[fn] = SetupParameterParser.parse_string_or_number(
ftype, fields[f], maybe_quoted=True)
return res
class SetupParameterParser():
_HUGE = PKDict(
Integer=2147483647,
Float=3.40282347E+38,
)
_MAX_VAR_COUNT = 20
_SPECIAL_TYPES = PKDict(
Grid_GridMain=PKDict({
k: 'GridBoundaryType' for k in (
'xl_boundary_type', 'xr_boundary_type',
'yl_boundary_type', 'yr_boundary_type',
'zl_boundary_type', 'zr_boundary_type',
)
}),
Grid_GridMain_paramesh=PKDict({
f'refine_var_{v}': 'VariableNameOptional' for v in range(1, _MAX_VAR_COUNT)
}),
IO_IOMain=PKDict({
f'plot_var_{v}': 'VariableNameOptional' for v in range(1, _MAX_VAR_COUNT)
}),
physics_Diffuse_DiffuseMain=PKDict({
k: 'DiffuseBoundaryType' for k in (
'diff_eleXlBoundaryType', 'diff_eleXrBoundaryType',
'diff_eleYlBoundaryType', 'diff_eleYrBoundaryType',
'diff_eleZlBoundaryType', 'diff_eleZrBoundaryType',
)
}),
physics_Gravity=PKDict(
grav_boundary_type='GravityBoundaryType',
),
physics_Gravity_GravityMain_Constant=PKDict(
gdirec='GravityDirection',
),
physics_Hydro_HydroMain_unsplit=PKDict(
RiemannSolver='RiemannSolver'
),
physics_RadTrans_RadTransMain_MGD=PKDict({
k: 'RadTransMGDBoundaryType' for k in (
'rt_mgdXlBoundaryType', 'rt_mgdXrBoundaryType',
'rt_mgdYlBoundaryType', 'rt_mgdYrBoundaryType',
'rt_mgdZlBoundaryType', 'rt_mgdZrBoundaryType',
)
}),
physics_sourceTerms_EnergyDeposition_EnergyDepositionMain_Laser=PKDict({
f'ed_crossSectionFunctionType_{v}': 'LaserCrossSectionOptional' for v in range(1, _MAX_VAR_COUNT)
}),
)
_TINY = PKDict(
Float=1.17549435E-38,
)
_TYPE_MAP = PKDict(
BOOLEAN='Boolean',
INTEGER='Integer',
REAL='Float',
STRING='String',
)
def __add_enum(self, enums, name, values):
enums[name] = [[v, v] for v in values]
def __init__(self, setup_dir):
self.setup_dir = setup_dir
def generate_schema(self):
with pkio.open_text(self.setup_dir.join('setup_vars')) as f:
self.var_names = self.__parse_vars(f)
with pkio.open_text(self.setup_dir.join('setup_params')) as f:
self.models, self.views = self.__parse_setup(f)
with pkio.open_text(self.setup_dir.join('setup_datafiles')) as f:
self.datafiles = self.__parse_datafiles(f)
return self.__format_schema()
@classmethod
def model_name_from_flash_unit_name(cls, text):
# TODO(e-carlin): discuss with pjm. Main units have different vars than the non-main
# return '_'.join(filter(lambda x: not re.search(r'Main$', x), text.split('/')))
return '_'.join(text.split('/'))
@classmethod
def parse_string_or_number(cls, field_type, value, maybe_quoted=False):
if field_type == 'String' or field_type == 'OptionalString':
return cls.__parse_string(value)
if maybe_quoted:
value = cls.remove_quotes(value)
if re.search(r'^(-)?(HUGE|TINY)', value):
return cls.__parse_special_number(field_type, value)
if field_type == 'Integer':
assert re.search(r'^([\-|+])?\d+$', str(value)), \
f'{field.name} invalid flash integer: {value}'
return int(value)
if field_type == 'Float':
value = re.sub(r'\+$', '', value)
assert template_common.NUMERIC_RE.search(value), \
f'invalid flash float: {value}'
return float(value)
assert False, f'unknown field type: {field_type}, value: {value}'
@classmethod
def remove_quotes(cls, value):
# any value may be quoted
return re.sub(r'^"(.*)"$', r'\1', value)
def __create_views(self, schema):
schema.view = self.views
for m in schema.model:
schema.view[m].pkupdate(
fieldsPerTab=8,
advanced=[v for v in schema.model[m].keys()],
basic=[],
)
def __field_default(self, field):
if field.is_constant:
assert field.default, f'missing constant value: {field.name}'
return field.default
return self.__value_for_type(field, field.default)
def __field_type(self, field, model_name, enums):
assert field.type in self._TYPE_MAP, \
f'unknown field type: {field.type}'
field.type = self._TYPE_MAP[field.type]
if field.is_constant:
field.type = 'Constant'
return
if self.__is_file_field(field):
field.type = 'SetupDatafilesOptional'
if field.default == '"-none-"' or field.default == '"NOT SPECIFIED"':
field.default = 'none'
field.enum = [v[0] for v in enums[field.type]]
return
if model_name in self._SPECIAL_TYPES:
if field.name in self._SPECIAL_TYPES[model_name]:
ftype = self._SPECIAL_TYPES[model_name][field.name]
field.type = ftype
field.enum = [v[0] for v in enums[ftype]]
return
if 'valid_values' in field:
self.__valid_values(field)
if 'enum' in field:
enum_name = f'{model_name}{field.name}'
assert enum_name not in enums, \
f'duplicate enum: {enum_name}'
self.__add_enum(enums, enum_name, field.enum)
field.type = enum_name
elif field.type == 'String' and field.default == '""':
field.type = 'OptionalString'
def __format_schema(self):
res = self.__init_schema()
for name in sorted(self.models.keys()):
m = self.models[name]
fields = PKDict()
for fname in sorted(m):
field = m[fname]
# [label, type, default, description, min, max]
self.__field_type(field, name, res.enum)
fields[fname] = [fname, field.type, self.__field_default(field)]
if field.description:
fields[fname].append(field.description)
if 'min' in field:
if not field.description:
fields[fname].append('')
if field.min is None:
fields[fname].append(None)
else:
fields[fname].append(self.__value_for_type(field, field.min))
if 'max' in field:
fields[fname].append(self.__value_for_type(field, field.max))
res.model[name] = fields
self.__create_views(res)
return flash_views.SpecializedViews().update_schema(res)
def __init_schema(self):
enums = PKDict()
for (name, values) in PKDict(
DiffuseBoundaryType=['dirichlet', 'neumann', 'outflow', 'zero-gradient'],
GridBoundaryType=['axisymmetric', 'diode', 'eqtsymmetric', 'outflow', 'periodic', 'reflect', 'user'],
GravityBoundaryType=['dirichlet', 'isolated', 'periodic'],
GravityDirection=['x', 'y', 'z'],
LaserCrossSectionOptional=['none', 'gaussian1D', 'gaussian2D', 'uniform'],
RadTransMGDBoundaryType=['dirichlet', 'neumann', 'reflecting', 'vacuum'],
RiemannSolver=['Roe', 'HLL', 'HLLC', 'Marquina', 'MarquinaModified', 'Hybrid', 'HLLD'],
SetupDatafiles=self.datafiles,
SetupDatafilesOptional=['none', *sorted(self.datafiles)],
VariableName=sorted(self.var_names),
VariableNameOptional=['none', *sorted(self.var_names)],
).items():
self.__add_enum(enums, name, values)
return PKDict(
enum=enums,
model=PKDict(),
)
def __is_file_field(self, field):
#TODO(pjm): there may be other cases of datafile selection
return re.search(r'^eos_.*?TableFile$', field.name) \
or re.search(r'^op_.*?FileName$', field.name)
def __parse_datafiles(self, in_stream):
res = []
for line in in_stream:
line = line.strip()
if line:
res.append(os.path.basename(line))
return res
def __parse_description(self, text, field):
if not field.description:
m = re.search(r'^Valid Values:\s+(.*)', text)
if m:
assert 'valid_values' not in field, \
f'duplicate valid value def: {text}'
field.valid_values = m.group(1)
return
if re.search(r'^"', text):
assert 'valid_values' in field, \
f'expected previous valid values def: {text}'
field.valid_values += ' ' + text
return
field.description += (' ' if field.description else '') + text
def __parse_field(self, text, model):
# [BOOLEAN] CONSTANT [FALSE]
# [STRING] ["FLASH 3 run"]
# [INTEGER] [2]
# [REAL] [1.0]
assert model is not None
m = re.search(r'^(.*?)\s\[(.*?)\]\s(CONSTANT)?\s*\[(.*?)\](.*)', text)
assert m, f'unparsable field: {line}'
name = m.group(1)
assert name not in model, f'duplicate field: {name}'
ftype = m.group(2)
is_constant = m.group(3) == 'CONSTANT'
fdefault = m.group(4)
assert not m.group(5), f'extra values in field def: {line}'
model[name] = PKDict(
name=name,
type=ftype,
is_constant=is_constant,
default=fdefault,
description='',
)
return model[name]
def __parse_model(self, text, models, views):
name = self.model_name_from_flash_unit_name(text)
if name not in models:
models[name] = PKDict()
views[name] = PKDict(
title=text,
)
return models[name]
def __parse_setup(self, in_stream):
models = PKDict()
views = PKDict()
model = None
field = None
for line in in_stream:
if line == '\n':
continue
m = re.search(r'(^\w.*)', line)
if m:
model = self.__parse_model(m.group(1), models, views)
continue
m = re.search(r'^\s{4}(\w.*)', line)
if m:
field = self.__parse_field(m.group(1), model)
continue
m = re.search(r'^\s{8}(\S.*)', line)
if m:
self.__parse_description(m.group(1), field)
continue
assert False, f'unhandled line: "{line}"'
return models, views
@classmethod
def __parse_special_number(cls, field_type, value):
res = cls._HUGE[field_type] if re.search(r'.*?HUGE', value) \
else cls._TINY[field_type]
if re.search(r'^-', value):
return - res
return res
@classmethod
def __parse_string(cls, value):
assert re.search(r'^".*"$', value), \
f'invalid string: {value}'
return cls.remove_quotes(value)
def __parse_vars(self, in_stream):
res = set()
state = 'name'
for line in in_stream:
if line == '\n':
if state == 'newline':
state = 'name'
continue
if state == 'name':
m = re.search(r'^Name: (.*)\s*$', line)
assert m, f'expected var name: {line}'
res.add(m.group(1))
state = 'newline'
return res
def __valid_values(self, field):
vv = field.valid_values
if vv == 'Unconstrained':
return
m = re.search(r'^([\-0-9\.]+) to ([\-0-9\.]+)$', vv)
if m:
field.min = m.group(1)
field.max = m.group(2)
return
if re.search(r'^([\-0-9]+,\s*)+[\-0-9]+$', vv):
# integer pick list
field.enum = re.split(r',\s*', vv)
return
if re.search(r'^(".*?",\s*)*".*?"+$', vv):
# string pick list
field.enum = [self.__parse_string(v) for v in re.split(r',\s*', vv)]
return
m = re.search(r'^(\S+) to INFTY$', vv)
if m:
field.min = m.group(1)
return
m = re.search(r'^-INFTY to (\S+)$', vv)
if m:
field.min = None
field.max = m.group(1)
return
m = re.search(r'\sto\s([\-0-9]+)$', vv)
if m:
field.min = None
field.max = m.group(1)
field.description = f'Valid Values: {field.description}'
return
if re.search(r'\sto\sINFTY$', vv) or re.search(r'^([\-0-9.]+,\s*)+[\-0-9.]+$', vv):
# restore valid value info to description
field.description = f'Valid Values: {field.description}'
return
assert False, f'unhandled Valid Values for {field.name}: {vv}'
def __value_for_type(self, field, value):
if 'enum' in field:
if re.search(r'"', value):
value = self.__parse_string(value)
if field.type.endswith('Optional'):
if not value or value == ' ':
value = field.enum[0]
assert value in field.enum, \
f'enum: {value} not in list: {field.enum}'
return value
if field.type == 'Boolean':
assert re.search(r'^(true|false)$', value, re.IGNORECASE), \
f'{field.name} invalid flash boolean: {value}'
return '1' if value.lower() == 'true' else '0'
return self.parse_string_or_number(field.type, value)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
Sample usage:
python mnist.py --help
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
class MNISTModel(tfe.Network):
"""MNIST Network.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But written using the tf.layers API.
"""
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
super(MNISTModel, self).__init__(name='')
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = self.track_layer(
tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
self.conv2 = self.track_layer(
tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
self.fc2 = self.track_layer(tf.layers.Dense(10))
self.dropout = self.track_layer(tf.layers.Dropout(0.5))
self.max_pool2d = self.track_layer(
tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='SAME', data_format=data_format))
def call(self, inputs, training):
"""Computes labels from inputs.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 784].
training: True if invoked in the context of training (causing dropout to
be applied). False otherwise.
Returns:
A Tensor with shape [batch_size, 10] containing the predicted logits
for each image in the batch, for each of the 10 classes.
"""
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = tf.layers.flatten(x)
x = self.fc1(x)
x = self.dropout(x, training=training)
x = self.fc2(x)
return x
def loss(predictions, labels):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=predictions, labels=labels))
def compute_accuracy(predictions, labels):
return tf.reduce_sum(
tf.cast(
tf.equal(
tf.argmax(predictions, axis=1,
output_type=tf.int64),
tf.argmax(labels, axis=1,
output_type=tf.int64)),
dtype=tf.float32)) / float(predictions.shape[0].value)
def train_one_epoch(model, optimizer, dataset, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
tf.train.get_or_create_global_step()
for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)):
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
with tfe.GradientTape() as tape:
prediction = model(images, training=True)
loss_value = loss(prediction, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy',
compute_accuracy(prediction, labels))
grads = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
if log_interval and batch % log_interval == 0:
print('Batch #%d\tLoss: %.6f' % (batch, loss_value))
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss')
accuracy = tfe.metrics.Accuracy('accuracy')
for (images, labels) in tfe.Iterator(dataset):
predictions = model(images, training=False)
avg_loss(loss(predictions, labels))
accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64),
tf.argmax(labels, axis=1, output_type=tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
def load_data(data_dir):
"""Returns training and test tf.data.Dataset objects."""
data = input_data.read_data_sets(data_dir, one_hot=True)
train_ds = tf.data.Dataset.from_tensor_slices((data.train.images,
data.train.labels))
test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels))
return (train_ds, test_ds)
def main(_):
tfe.enable_eager_execution()
(device, data_format) = ('/gpu:0', 'channels_first')
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
(device, data_format) = ('/cpu:0', 'channels_last')
print('Using device %s, and data format %s.' % (device, data_format))
# Load the datasets
(train_ds, test_ds) = load_data(FLAGS.data_dir)
train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size)
# Create the model and optimizer
model = MNISTModel(data_format)
optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)
if FLAGS.output_dir:
train_dir = os.path.join(FLAGS.output_dir, 'train')
test_dir = os.path.join(FLAGS.output_dir, 'eval')
tf.gfile.MakeDirs(FLAGS.output_dir)
else:
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
with tf.device(device):
for epoch in range(1, 11):
with tfe.restore_variables_on_create(
tf.train.latest_checkpoint(FLAGS.checkpoint_dir)):
global_step = tf.train.get_or_create_global_step()
start = time.time()
with summary_writer.as_default():
train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval)
end = time.time()
print('\nTrain time for epoch #%d (global step %d): %f' % (
epoch, global_step.numpy(), end - start))
with test_summary_writer.as_default():
test(model, test_ds)
all_variables = (
model.variables
+ optimizer.variables()
+ [global_step])
tfe.Saver(all_variables).save(
checkpoint_prefix, global_step=global_step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--batch-size',
type=int,
default=64,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument(
'--output_dir',
type=str,
default=None,
metavar='N',
help='Directory to write TensorBoard summaries')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='/tmp/tensorflow/mnist/checkpoints/',
metavar='N',
help='Directory to save checkpoints in (once per epoch)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--no-gpu',
action='store_true',
default=False,
help='disables GPU usage even if a GPU is available')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| |
"""
pycollect implements `collectd`s binary protocol[1], using Tornado's `IOLoop`
and `PeriodicCallback` classes.
By default, metrics are `sum()`ed and passed to to configured `collectd`
instance every 60 seconds. The summation function can be defined on a
per-metric basis (see `CollectdClient.queue()` and
`CollectdClient._summarize_queue()`), and the interval can be set on a
per-client basis.
[1] https://collectd.org/wiki/index.php/Binary_protocol
"""
import collections
import re
import socket
import struct
import time
from tornado import ioloop
__all__ = [
"CollectdClient", "PycollectdException",
"sanitize", "count_to_value_part",
"part_type_to_data_type", "value_to_part"
"PART_TYPE", "VALUE_TYPE", "MAX_PACKET_LENGTH", "DEFAULT_PLUGIN_NAME",
"DEFAULT_PLUGIN_INSTANCE", "DEFAULT_PLUGIN_TYPE", "DEFAULT_SEND_INTERVAL",
"DEFAULT_PORT", "DEFAULT_CUMM_FUNCTION",
]
# Constants
PART_TYPE = {
'HOST': 0x0000,
'TIME': 0x0001,
'PLUGIN': 0x0002,
'PLUGIN_INSTANCE': 0x0003,
'TYPE': 0x0004,
'TYPE_INSTANCE': 0x0005,
'VALUES': 0x0006,
'INTERVAL': 0x0007,
'TIME_HR': 0x0008,
'INTERVAL_HR': 0x0009,
'MESSAGE': 0x0100,
'SEVERITY': 0x0101,
'SIGNATURE': 0x0200,
'ENCRYPTION': 0x0210
}
VALUE_TYPE = {
'COUNTER': (0x00, '>Q'),
'GAUGE': (0x01, '<d'),
'DERIVE': (0x02, '>q'),
'ABSOLUTE': (0x03, '>Q')
}
MAX_PACKET_LENGTH = 1024
DEFAULT_PLUGIN_NAME = "any"
DEFAULT_PLUGIN_INSTANCE = ""
DEFAULT_PLUGIN_TYPE = "gauge"
DEFAULT_SEND_INTERVAL = 60 # seconds
DEFAULT_PORT = 25826
DEFAULT_CUMM_FUNCTION = sum
#
# Utility Functions
#
def sanitize(name):
"""Sanitizes a metric name."""
return re.sub(r"[^a-zA-Z0-9]+", "_", name).strip("_")
def count_to_value_part(name, count, value_type='GAUGE'):
"""
Converts a metric name and value into 2 collectd packet parts:
* a type instance part, setting the metric name as the type instance
name for subsequent values
* a value part containing a single value: the provided count
"""
data_type, pack_format = VALUE_TYPE[value_type]
count_value = struct.pack(pack_format, count)
return ''.join([
value_to_part(PART_TYPE['TYPE_INSTANCE'], name),
struct.pack(
'>HHHB',
PART_TYPE['VALUES'], len(count_value) + 7, 1, data_type
),
count_value
])
def part_type_to_data_type(part_type):
"""Returns the appropriate data type -- either numeric or string -- for a
collectd part type"""
if part_type in (
PART_TYPE['TIME'], PART_TYPE['TIME_HR'],
PART_TYPE['INTERVAL'], PART_TYPE['INTERVAL_HR']):
return 'numeric'
elif part_type in (
PART_TYPE['HOST'], PART_TYPE['PLUGIN'],
PART_TYPE['PLUGIN_INSTANCE'], PART_TYPE['TYPE'],
PART_TYPE['TYPE_INSTANCE']):
return 'string'
else:
raise PycollectdException(
"Unknown or unimplemented part_type {}".format(part_type)
)
def value_to_part(part_type, value):
"""Returns a collectd packet part for the provided value"""
# part_type(2), length(2), payload
data_type = part_type_to_data_type(part_type)
if data_type == 'numeric':
return struct.pack('>HHq', part_type, 12, value)
elif data_type == 'string':
return struct.pack('>HH', part_type, len(value) + 5) + value + '\0'
else:
raise PycollectdException("Invalid data_type {}".format(data_type))
#
# Classes
#
class PycollectdException(Exception):
pass
class CollectdClient(object): # pylint: disable=R0902
"""
Provides an API for sending metrics to a `collectd` instance.
Basic example:
collectd = CollectdClient("collectd.example.com")
collectd.queue('summed_random', random.randrange(100))
collectd.queue('summed_random', random.randrange(100))
collectd.queue('summed_random', random.randrange(100))
def avg(values):
return sum(values)/float(len(values))
collectd.queue('avg_values', 1, avg)
collectd.queue('avg_values', 2, avg)
collectd.queue('avg_values', 3, avg)
collectd.start()
ioloop.IOLoop.instance().start()
"""
def __init__(self, collectd_hostname, **kwargs):
"""
Creates a `CollectdClient` for communicating with the `collectd`
endpoint at `collectd_hostname`.
Valid kwargs:
* `collectd_port`: The UDP port to talk to collectd on.
* `hostname`: The hostname of this machine. Defaults to
`socket.getfqdn()`
* `plugin_name`: The name of the collectd-plugin we are reporting
stats for. Defaults to "any".
* `plugin_instance`: The instance of the plugin we are reporting
stats for. Defaults to ""
* `plugin_type`: The data-type for this plugin.
* `send_interval`: Seconds between each data send.
* `io_loop`: The tornado.ioloop.IOLoop instance to use. Defaults to
`ioloop.IOLoop.instance()`
"""
collectd_port = kwargs.pop("collectd_port", DEFAULT_PORT)
self.collectd_addr = (collectd_hostname, collectd_port)
self.hostname = kwargs.pop("hostname", socket.getfqdn())
self.plugin_name = kwargs.pop(
"plugin_name", DEFAULT_PLUGIN_NAME)
self.plugin_instance = kwargs.pop(
"plugin_instance", DEFAULT_PLUGIN_INSTANCE)
self.plugin_type = kwargs.pop(
"plugin_type", DEFAULT_PLUGIN_TYPE)
self.send_interval = kwargs.pop(
"send_interval", DEFAULT_SEND_INTERVAL)
self.io_loop = kwargs.pop("io_loop", ioloop.IOLoop.instance())
self._queue = collections.deque()
self._timer = ioloop.PeriodicCallback(
self._process_queue,
self.send_interval * 1000,
self.io_loop
)
if(len(kwargs) != 0):
raise ValueError("Unkown keys for {}: {}".format(
self.__class__.__name__,
",".join(kwargs.keys())
))
def queue(self, metric, value, cumm_func=None):
"""
Records a metric to be summarized and sent to `collectd`.
The `cumm_func` argument should be a function that takes a sequence
of values, returning their summarized form -- if none is defined,
`sum()` will be used.
If you pass mutliple different `cumm_func`s for a single `metric`,
the most recent `cumm_func` will be used. E.g., calling:
collectd.queue('foo', 1, f1)
collectd.queue('foo', 2)
collectd.queue('bar', 3, f2)
collectd.queue('foo', 4, f3)
would result in `f3` being used to summarize "foo" values, and `f2`
being used for summarizing "bar" values.
"""
self._queue.append((metric, value, cumm_func))
def start(self):
"""
Starts the periodic loop.
"""
self._timer.start()
def stop(self):
"""
Stops the periodic loop.
"""
self._timer.stop()
def _process_queue(self):
"""
Creates summaries of the metrics queued so far, and sends them to
`collectd`.
Called automatically by `self._timer` every `self.send_interval`
seconds.
"""
summed_values = self._summarize_queue()
sent_values = self._send_values(summed_values)
return sent_values
def _summarize_queue(self):
"""
Generates summaries of the queued metrics.
"""
values_by_metric = collections.defaultdict(list)
summed_values = {}
functions = {}
for metric, value, cumm_func in self._queue:
metric = sanitize(metric)
values_by_metric[metric].append(value)
if cumm_func is not None:
functions[metric] = cumm_func
self._queue.clear()
for metric, values in values_by_metric.iteritems():
cumm_func = functions.get(metric, DEFAULT_CUMM_FUNCTION)
summed_values[metric] = cumm_func(values)
return summed_values
def _send_values(self, values):
"""
Sends the summarized values to the `collectd` instance.
Returns the number of packets sent successfully.
"""
values_sent = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
for packet in self.counts_to_packets(values):
bytes_tx = sock.sendto(packet, self.collectd_addr)
if len(packet) == bytes_tx:
values_sent += 1
sock.close()
return values_sent
def counts_to_packets(self, counts, timestamp=None):
"""
Given a dict of { metric: value }, yields collectd UDP packets
"""
packet = self.head_part(timestamp)
for name, count in counts.iteritems():
count_part = count_to_value_part(name, count)
if len(packet) + len(count_part) >= MAX_PACKET_LENGTH:
yield packet
packet = self.head_part(timestamp)
packet += count_part
yield packet
def head_part(self, timestamp=None):
"""
Returns a sequence of parts to use as a header for a collectd packet.
These header parts provide the context for any subsequent value parts.
"""
timestamp = timestamp or time.time()
return ''.join([
value_to_part(PART_TYPE['HOST'], self.hostname),
value_to_part(PART_TYPE['TIME'], timestamp),
value_to_part(PART_TYPE['PLUGIN'], self.plugin_name),
value_to_part(PART_TYPE['PLUGIN_INSTANCE'], self.plugin_instance),
value_to_part(PART_TYPE['TYPE'], self.plugin_type),
value_to_part(PART_TYPE['INTERVAL'], self.send_interval)
])
# Predefined summarizing functions
@staticmethod
def average(values):
"""
Returns the average of the provided values.
"""
return sum(values) / float(len(values))
if __name__ == "__main__":
import string
import unittest
class PyCollectdClientTest(unittest.TestCase):
def setUp(self):
self.client = CollectdClient("localhost", hostname="hostname")
def test_header(self):
expected = "".join(chr(x) for x in [
0x00, 0x00, 0x00, 0x0d, 0x68, 0x6f, 0x73, 0x74,
0x6e, 0x61, 0x6d, 0x65, 0x00, 0x00, 0x01, 0x00,
0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x02, 0x00, 0x08, 0x61, 0x6e, 0x79,
0x00, 0x00, 0x03, 0x00, 0x05, 0x00, 0x00, 0x04,
0x00, 0x0a, 0x67, 0x61, 0x75, 0x67, 0x65, 0x00,
0x00, 0x07, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3c
])
header = self.client.head_part(1)
self.assertEqual(header, expected)
def test_sanitize(self):
self.assertEqual(
sanitize(string.ascii_letters), string.ascii_letters
)
self.assertEqual(sanitize(string.digits), string.digits)
self.assertEqual(sanitize("`~!@#$%^&*()-_=+[{]};:'\",<.>/? "), "")
self.assertEqual(
sanitize("~~this~~is~~a~~test~~string~~"),
"this_is_a_test_string"
)
def test_pack_numeric(self):
self.assertEqual(value_to_part(PART_TYPE['TIME'], -1),
"".join(chr(x) for x in [
0x00, 0x01, 0x00, 0x0c, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff
]))
self.assertEqual(value_to_part(PART_TYPE['TIME'], 1),
"".join(chr(x) for x in [
0x00, 0x01, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01
]))
def test_pack_string(self):
self.assertEqual(value_to_part(PART_TYPE['HOST'], "hostname"),
"".join(chr(x) for x in [
0x00, 0x00, 0x00, 0x0d, 0x68, 0x6f, 0x73, 0x74,
0x6e, 0x61, 0x6d, 0x65, 0x00
]))
self.assertEqual(value_to_part(PART_TYPE['PLUGIN'], "plugin"),
"".join(chr(x) for x in [
0x00, 0x02, 0x00, 0x0b, 0x70, 0x6c, 0x75, 0x67,
0x69, 0x6e, 0x00
]))
def test_pack_value(self):
self.assertEqual(count_to_value_part("value", 1),
"".join(chr(x) for x in [
0x00, 0x05, 0x00, 0x0a, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x00, 0x00, 0x06, 0x00, 0x0f, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x3f
]))
self.assertEqual(count_to_value_part("value", -1),
"".join(chr(x) for x in [
0x00, 0x05, 0x00, 0x0a, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x00, 0x00, 0x06, 0x00, 0x0f, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0xbf
]))
self.assertEqual(count_to_value_part("value", 0),
"".join(chr(x) for x in [
0x00, 0x05, 0x00, 0x0a, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x00, 0x00, 0x06, 0x00, 0x0f, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00
]))
self.assertEqual(count_to_value_part("value", 12345),
"".join(chr(x) for x in [
0x00, 0x05, 0x00, 0x0a, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x00, 0x00, 0x06, 0x00, 0x0f, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x80, 0x1c, 0xc8,
0x40
]))
unittest.main()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from PyQt4.QtGui import *
from froi.gui.base.utils import get_data_dir
from froi.gui.base.utils import get_file_names
from froi.io.xml_api import get_info
from froi.io.atlas_api import *
def get_atlas_names():
"""
Get atlas name which is equal to xml names.
"""
tar_path = os.path.join(get_data_dir(), 'atlas')
xml_names = get_file_names(tar_path,'.xml')
return xml_names
def get_atlas_data(xml_name):
"""
Get atlas nii data.
"""
tar_path = os.path.join(get_data_dir(), 'atlas')
nii_name = get_info(os.path.join(tar_path, xml_name), 'imagefile')
nii_data = get_nii_data(tar_path, nii_name[0])
return nii_data
def get_label_info(xml_name):
"""
Get atlas label information.
"""
tar_path = os.path.join(get_data_dir(), 'atlas')
label_list = get_info(os.path.join(tar_path, xml_name), 'label')
return label_list
class AtlasDialog(QDialog):
"""
A dialog for action of Atlas.
"""
default_atlas = ['Harvard-Oxford_Cortical_Structural_Atlas.xml', \
'Harvard-Oxford_Subcortical_Structural_Atlas.xml']
def __init__(self, model, parent=None):
super(AtlasDialog, self).__init__(parent)
self._model = model
self.xml_names = get_atlas_names()
self.nii_data, self.label_list=[],[]
self._init_gui()
self._create_actions()
def atlas_display(self, data, label):
"""
The layout of a single atlas prob information.
"""
xyz = self._model.get_cross_pos()
prob_list = get_atlasprob(data, xyz[0], xyz[1], xyz[2])
display = sorting(label, prob_list)
return display
def _init_gui(self):
"""
Initialize GUI.
"""
# set dialog title
self.setWindowTitle("Candidate Label")
# initialize widgets
# self.source_combo = QComboBox()
vbox_layout = QVBoxLayout()
self.scrollContents = QWidget()
self.Layout_2 = QHBoxLayout(self.scrollContents)
self.Layout_2.addLayout(vbox_layout)
self.label,self.prob = list(),list()
for i in range(len(self.xml_names)):
self.label.append(QLabel())
self.prob.append(QLabel())
self.nii_data.append(0)
self.label_list.append(0)
for i in range(len(self.xml_names)):
self.label[i].setText(self.xml_names[i].split('.')[0])
self.label[i].setFont(QFont("Roman times", 10, QFont.Bold))
vbox_layout.addWidget(self.label[i])
vbox_layout.addWidget(self.prob[i])
if (self.xml_names[i] in self.default_atlas):
self.nii_data[i] = get_atlas_data(self.xml_names[i])
self.label_list[i] = get_label_info(self.xml_names[i])
atlas_prob = self.atlas_display(self.nii_data[i], self.label_list[i])
self.prob[i].setText(atlas_prob)
else:
self.label[i].setVisible(False)
self.prob[i].setVisible(False)
self.Layout1=QVBoxLayout(self)
self.scrollArea=QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollContents)
self.Layout1.addWidget(self.scrollArea)
self.setGeometry(300,260,300,260)
self.space = QLabel(" ")
self.set_button = QPushButton("Select Atlas")
grid_layout = QGridLayout()
grid_layout.addWidget(self.space,0,0)
grid_layout.addWidget(self.set_button,0,1)
self.Layout1.addLayout(grid_layout)
self.setLayout(self.Layout1)
def _create_actions(self):
"""
Create actions for button and cross_changed
"""
self.set_button.clicked.connect(self._set_dialog)
self._model.cross_pos_changed.connect(self._update_prob)
def _update_prob(self):
"""
Update atlas probability values
"""
for i in range(len(self.xml_names)):
if (self.label_list[i]!=0):
atlas_prob = self.atlas_display(self.nii_data[i], self.label_list[i])
self.prob[i].setText(atlas_prob)
def _set_dialog(self):
"""
Setting clicked
"""
self.stat=list()
for i in range(len(self.xml_names)):
self.stat.append(self.prob[i].isVisible())
new_dialog = SettingDialog(self.stat)
new_dialog.exec_()
self.status = new_dialog._get_checkbox_status()
for i in range(len(self.xml_names)):
if (self.status[i]):
self.label[i].setVisible(True)
self.prob[i].setVisible(True)
if not self.label_list[i]:
self.nii_data[i] = get_atlas_data(self.xml_names[i])
self.label_list[i] = get_label_info(self.xml_names[i])
atlas_prob = self.atlas_display(self.nii_data[i], self.label_list[i])
self.prob[i].setText(atlas_prob)
else:
self.label[i].setVisible(False)
self.prob[i].setVisible(False)
def _get_setting_status(self):
"""
Get setting status.
"""
return self.status
class SettingDialog(QDialog):
"""
A dialog for setting button.
"""
def __init__(self, stat, parent=None):
super(SettingDialog, self).__init__(parent)
self.stat= stat
self.xml_names = get_atlas_names()
self._init_gui()
self._create_actions()
def _init_gui(self):
"""
Initialize GUI.
"""
self.label,self.check=list(),list()
for i in range(len(self.xml_names)):
self.label.append(QLabel())
self.check.append(QCheckBox())
self.setWindowTitle("Atlas Selection")
self.scrollContents = QWidget()
self.Layout_2 = QHBoxLayout(self.scrollContents)
grid_layout = QGridLayout()
self.Layout_2.addLayout(grid_layout)
for i in range(len(self.xml_names)):
self.label[i].setText(self.xml_names[i].split('.')[0])
if self.stat[i]:
self.check[i].setChecked(True)
else:
self.check[i].setChecked(False)
grid_layout.addWidget(self.check[i], i,0)
grid_layout.addWidget(self.label[i],i,1)
self.Layout1=QVBoxLayout(self)
self.scrollArea=QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollContents)
self.Layout1.addWidget(self.scrollArea)
self.setGeometry(300,260,250,280)
self.save_button = QPushButton("OK")
self.Layout1.addWidget(self.save_button)
self.setLayout(self.Layout1)
def _create_actions(self):
"""Create actions for the button."""
self.save_button.clicked.connect(self._save)
def _save(self):
"""Actions for save button."""
self.stat=[]
for i in range(len(self.xml_names)):
self.stat.append(self.check[i].isChecked())
self.close()
#def _update_checkbox_status(self):
# """Update checkbox status."""
# for i in range(len(self.stat)):
# if self.stat[i]:
# self.check[i].setChecked(True)
# else:
# self.check[i].setChecked(False)
def _get_checkbox_status(self):
"""Get checkbox status."""
return self.stat
| |
"""
run_config.py - Configuration settings for a simulation run
The user changeable values in this file are explained below. For each run
the main options include choice of potential and initial conditions, choice of
k mode range and selection of first, source and second order python classes.
"""
#Author: Ian Huston
#For license and copyright information see LICENSE.txt which was distributed with this file.
###############################################################################
# DO NOT CHANGE ANYTHING IN THIS SECTION
import numpy as np
import os.path
#Pyflation imports
from pyflation import cosmomodels as c
from pyflation import configuration
from pyflation.sourceterm import srcequations
from pyflation.helpers import getkend
# DO NOT CHANGE ANYTHING ABOVE THIS LINE
###############################################################################
#
# USER CONFIGURABLE VALUES START HERE
#
########### LOGGING ###########################################################
# The logging level changes how much is saved to logging files.
# The default value is set by the configuration module.
LOGLEVEL = configuration.LOGLEVEL
# To change the default uncomment the LOGLEVEL command below and choose from
# logging.DEBUG, .INFO, .WARN, .ERROR, .CRITICAL in decreasing order of verbosity.
# LOGLEVEL = logging.INFO
###############################################################################
############ CHOICE OF POTENTIAL AND INITIAL CONDITIONS #######################
# The following dictionary structure contains various present combinations of
# potentials and initial conditions. To add a new combination just enter it as
# the next item of the dictionary.
#
# The cq parameter controls how far into the subhorizon stage is the
# initialisation of each k mode pertubation. Initialisation takes place when
# k/aH = cq. Default value is 50.
fixtures = {"msqphisq": {"potential_func": "msqphisq",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([18.0, -0.1,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"lambdaphi4": {"potential_func": "lambdaphi4",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([25.0, 0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"hybrid2and4": {"potential_func": "hybrid2and4",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([25.0, 0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"linde": {"potential_func": "linde",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([25.0, 0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"phi2over3": {"potential_func": "phi2over3",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([10.0, 0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"msqphisq_withV0": {"potential_func": "msqphisq_withV0",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([18.0, 0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"step_potential": {"potential_func": "step_potential",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([18.0, -0.1,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"bump_potential": {"potential_func": "bump_potential",
"pot_params": {"nfields": 1},
"nfields": 1,
"bgystart": np.array([18.0, -0.1,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"hybridquadratic": {"potential_func": "hybridquadratic",
"pot_params": {"nfields": 2},
"nfields": 2,
"bgystart": np.array([12.0, 1/300.0, 12.0,49/300.0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"nflation": {"potential_func": "nflation",
"pot_params": {"nfields": 2},
"nfields": 2,
"bgystart": None, #Defaults to (18,-0.1)/sqrt(nfields)
"cq": 50,
"solver": "rkdriver_tsix"},
"hybridquartic": {"potential_func": "hybridquartic",
"pot_params": {"nfields": 2},
"nfields": 2,
"bgystart": np.array([1e-2, 2e-8, 1.63e-9,3.26e-7,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
"productexponential": {"potential_func": "productexponential",
"pot_params": {"nfields": 2},
"nfields": 2,
"bgystart": np.array([18.0, 0.0, 0.001,0,0]),
"cq": 50,
"solver": "rkdriver_tsix"},
}
##############################
# CHOOSE FIXTURE HERE
# Choose one of the combinations of potential and initial conditions described
# above by selecting it by name.
foargs = fixtures["msqphisq"]
##############################
###############################################################################
################# WAVEMODE RANGE SELECTION #####################################
# Choose the range for the wavenumber k here. The kinit parameter is the
# starting value and deltak specifies the difference between consecutive k's.
# The numsoks parameter specifies the number of k values to be calculated during
# the second order perturbation calculation. This should be one plus two to the
# power of an integer, i.e. 2n+1 for n integer.
#
# The K_ranges dictionary has pre_defined values for the k range.
K_ranges = { "K1": {"kinit": 0.5e-61, "deltak": 1e-61, "numsoks": 1025},
"K2": {"kinit": 1.5e-61, "deltak": 3e-61, "numsoks": 1025},
"K3": {"kinit": 0.25e-60, "deltak": 1e-60, "numsoks": 1025},
"K4": {"kinit": 0.85e-60, "deltak": 0.4e-60, "numsoks":1025}}
# Pick K_range used here by selecting it from the dictionary above.
K_range = K_ranges["K4"]
# Do not change these lines, which select the initial and delta values from
# the specified range.
kinit = K_range["kinit"]
deltak = K_range["deltak"]
numsoks = K_range["numsoks"] #Should be power of two + 1
# The end value of the k range is calculated using the getkend function in
# pyflation.helpers.
kend = getkend(kinit, deltak, numsoks)
###############################################################################
############## MODEL CLASS SELECTION ###########################################
# These options are for advanced users only.
#
# The driver class used for the first order perturbation calculation can be
# selected here. It should be accessible from this module, so add imports if
# necessary. The default class is in the pyflation.cosmomodels module.
# The default is c.FOCanonicalTwoStage.
# To set a fixed a_init value use c.FixedainitTwoStage
foclass = c.FOCanonicalTwoStage
# Here the source term class can be selected. The classes are in the
# pyflation.sourceterm.srcequations module and the default is
# srcequations.SelectedkOnlyFullSource. Other options include SlowRollSource,
# FullSingleFieldSource and SelectedkOnlySlowRollSource.
srcclass = srcequations.SelectedkOnlyFullSource
# The second order perturbation class can also be selected, again from the
# pyflation.cosmomodels module. The default is c.CanonicalRampedSecondOrder
# but the unramped version is availabe using c.CanonicalSecondOrder.
soclass = c.CanonicalRampedSecondOrder
# The ntheta parameter controls how finely the [0,pi] range is divided in the
# integration of the convolution terms. Default is 513.
ntheta = 513
###############################################################################
################ QSUB SUBMISSION OPTIONS ######################################
# These parameters are inserted into the qsub submission scripts
# which are generated and submitted by pyflation-qsubstart.py.
# Please contact your local cluster administrator and consult the qsub man page
# for good values for your local configuration.
timelimit = "23:00:00" # Time needed for each array job
taskmin= "1" #starting task id number
taskmax= "100" #finishing task id number
hold_jid_list= "" # List of jobs this task depends on
###############################################################################
#
# USER CONFIGURABLE VALUES END HERE
#
###############################################################################
###############################################################################
# DO NOT CHANGE ANYTHING BELOW THIS LINE UNLESS SURE
###############################################################################
###############################################################################
soargs = {"solver": "rkdriver_tsix",
"nfields": 1, #Only single field models can have second order calced
"soclass": soclass}
#If sourceterm files already exist should they be overwritten?
overwrite = True
# Calculate code directory as being directory in which cosmomodels.py
# is situated. This should be changed if a more portable system is used.
CODEDIR = os.path.abspath(os.path.dirname(c.__file__))
#Directory names computed from directory in which run_config.py is based.
RUNDIR = os.path.abspath(os.path.dirname(__file__))
RESULTSDIR = os.path.join(RUNDIR, configuration.RESULTSDIRNAME)
LOGDIR = os.path.join(RUNDIR, configuration.LOGDIRNAME)
QSUBSCRIPTSDIR = os.path.join(RUNDIR, configuration.QSUBSCRIPTSDIRNAME)
QSUBLOGSDIR = os.path.join(RUNDIR, configuration.QSUBLOGSDIRNAME)
if not all(map(os.path.isdir, [RESULTSDIR, LOGDIR, QSUBSCRIPTSDIR, QSUBLOGSDIR])):
raise IOError("Directory structure is not correct!")
# This is the default log file although scripts do write to their own files.
logfile = os.path.join(LOGDIR, "run.log")
# qsub script values
runname = "pyfl"
qsublogname = os.path.join(QSUBLOGSDIR, "log" )
templatefilename = "qsub-sh.template"
templatefile = os.path.join(CODEDIR, templatefilename)
foscriptname = os.path.join(QSUBSCRIPTSDIR, "fo.qsub")
srcscriptname = os.path.join(QSUBSCRIPTSDIR, "src.qsub")
src_indivscriptname = os.path.join(QSUBSCRIPTSDIR, "src_individual.qsub")
mrgscriptname = os.path.join(QSUBSCRIPTSDIR, "mrg.qsub")
soscriptname = os.path.join(QSUBSCRIPTSDIR, "so.qsub")
cmbscriptname = os.path.join(QSUBSCRIPTSDIR, "cmb.qsub")
# Results filenames
foresults = os.path.join(RESULTSDIR, "fo.hf5")
#Source results will be stored in src-#.hf5
srcstub = os.path.join(RESULTSDIR, "src-")
#This is the pattern that is checked when results are merged
pattern = "src-(\d*).hf5"
srcresults = os.path.join(RESULTSDIR, "src.hf5")
mrgresults = os.path.join(RESULTSDIR, "mrg.hf5")
soresults = os.path.join(RESULTSDIR, "so.hf5")
cmbresults = os.path.join(RESULTSDIR, "cmb.hf5")
| |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import uuid
from cgi import FieldStorage
from types import NoneType
from google.appengine.api import images
from google.appengine.api.blobstore import blobstore
from google.appengine.ext import ndb
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import upload_to_gcs
from rogerthat.bizz.job.update_app_asset import update_app_asset
from rogerthat.consts import GCS_BUCKET_PREFIX
from rogerthat.exceptions.app_assets import AppAssetNotFoundException, CannotDeleteDefaultAppAssetException
from rogerthat.models import App
from rogerthat.models.apps import AppAsset
from rogerthat.rpc.service import BusinessException
from rogerthat.to.app import AppAssetTO
@returns([AppAsset])
@arguments(app_id=unicode)
def get_app_assets(app_id):
return [asset for asset in AppAsset.list_by_app_id(app_id) if not asset.is_default]
@returns([AppAsset])
@arguments()
def get_all_app_assets():
return AppAsset.query()
@returns(AppAsset)
@arguments(asset_id=unicode)
def get_app_asset(asset_id):
try:
asset_id = long(asset_id)
except ValueError:
# it is only a string in case it's a default app asset, else it's a long
pass
return AppAsset.get_by_id(asset_id)
@returns(AppAsset)
@arguments(asset_type=unicode)
def get_default_app_asset(asset_type):
"""
Args:
asset_type (unicode)
Returns:
AppAsset
"""
return AppAsset.default_key(asset_type).get()
def get_app_ids_without_custom_asset(asset_type):
app_ids = set([key.name() for key in App.all(keys_only=True)])
for asset in AppAsset.list_by_type(asset_type):
for app_id in asset.app_ids:
app_ids.remove(app_id)
return app_ids
def remove_from_gcs(blob_key):
blobstore.delete(blob_key)
images.delete_serving_url(blob_key)
@returns(AppAsset)
@arguments(kind=unicode, uploaded_file=(FieldStorage, NoneType), default=bool, app_ids=[unicode], scale_x=float,
asset_id=unicode)
def process_uploaded_assets(kind, uploaded_file, default, app_ids, scale_x, asset_id=None):
"""
Args:
kind (unicode)
uploaded_file (FieldStorage)
default (bool)
app_ids (list of unicode)
scale_x (float)
asset_id (unicode)
"""
if uploaded_file is not None:
if not uploaded_file.type.startswith('image/'):
raise BusinessException('only_images_allowed')
file_content = uploaded_file.value
if len(file_content) > AppAsset.MAX_SIZE:
raise BusinessException('file_too_large')
elif not asset_id:
raise BusinessException('please_select_an_image')
if not default and not len(app_ids):
raise BusinessException('one_or_more_apps_required')
if kind not in AppAsset.TYPES:
raise BusinessException('unknown_asset_type')
to_put = []
to_delete = []
if uploaded_file is not None:
gcs_filename = '/%s-app-assets/%s-%s' % (GCS_BUCKET_PREFIX, kind, uuid.uuid4())
content_type = uploaded_file.type
blob_key = upload_to_gcs(file_content, content_type, gcs_filename)
is_new_default = False
modified_app_ids = set(app_ids)
if default:
default_key = AppAsset.default_key(kind)
asset = default_key.get()
if not asset:
asset = AppAsset(id=default_key.id(), app_ids=[])
is_new_default = True
# get all apps that do not have a custom asset set
modified_app_ids = get_app_ids_without_custom_asset(kind)
else:
if asset_id:
asset = get_app_asset(asset_id)
if not asset:
raise AppAssetNotFoundException(asset_id)
asset.app_ids = app_ids
else:
asset = AppAsset(app_ids=app_ids)
for original_asset in AppAsset.get_by_app_ids(app_ids, kind):
for app_id in original_asset.app_ids:
modified_app_ids.add(app_id)
if original_asset.key != asset.key:
original_asset.app_ids = [app_id for app_id in original_asset.app_ids if app_id not in app_ids]
if original_asset.app_ids:
to_put.append(original_asset)
else:
to_delete.append(original_asset)
asset.asset_type = kind
if uploaded_file is not None:
if asset.content_key:
remove_from_gcs(asset.content_key)
asset.content_key = blob_key
asset.serving_url = images.get_serving_url(blob_key)
asset.content_type = content_type
asset.scale_x = scale_x
to_put.append(asset)
ndb.put_multi(to_put)
if to_delete:
for asset_to_delete in to_delete: # type: AppAsset
remove_from_gcs(asset_to_delete.content_key)
asset_to_delete.key.delete()
if not is_new_default:
for app_id in modified_app_ids:
if app_id in asset.app_ids:
app_asset_to = AppAssetTO(kind, asset.serving_url, scale_x)
else:
app_asset_to = AppAssetTO(kind, None, scale_x)
update_app_asset(app_id, app_asset_to)
return asset
def _update_default_app_asset(asset_type, app_ids):
"""
send update to send default branding to phone, if it exists
Args:
asset_type (unicode)
app_ids (list of unicode)
"""
asset = get_default_app_asset(asset_type)
if asset:
app_asset_to = AppAssetTO(asset.asset_type, asset.serving_url, asset.scale_x)
else:
# This will remove the asset on the phones
app_asset_to = AppAssetTO(asset.asset_type, None, 0.0)
for app_id in app_ids:
update_app_asset(app_id, app_asset_to)
@returns()
@arguments(app_id=unicode, asset_type=unicode)
def remove_app_asset(app_id, asset_type):
asset = AppAsset.get_by_app_id(app_id, asset_type)
if not asset:
return
asset.app_ids = [a_id for a_id in asset.app_ids if a_id != app_id]
if asset.app_ids:
asset.put()
else:
remove_from_gcs(asset.content_key)
asset.key.delete()
_update_default_app_asset(asset_type, [app_id])
@returns()
@arguments(asset_id=unicode)
def remove_global_app_asset(asset_id):
asset = get_app_asset(asset_id)
if not asset:
return
if asset.is_default:
raise CannotDeleteDefaultAppAssetException()
asset_type = asset.asset_type
app_ids = asset.app_ids
remove_from_gcs(asset.content_key)
asset.key.delete()
_update_default_app_asset(asset_type, app_ids)
| |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Perspective Broker module.
TODO: update protocol level tests to use new connection API, leaving
only specific tests for old API.
"""
# issue1195 TODOs: replace pump.pump() with something involving Deferreds.
# Clean up warning suppression.
import sys, os, time, gc
from cStringIO import StringIO
from zope.interface import implements, Interface
from twisted.python.versions import Version
from twisted.trial import unittest
from twisted.spread import pb, util, publish, jelly
from twisted.internet import protocol, main, reactor
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.defer import Deferred, gatherResults, succeed
from twisted.protocols.policies import WrappingFactory
from twisted.python import failure, log
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
from twisted.cred import portal, checkers, credentials
class Dummy(pb.Viewable):
def view_doNothing(self, user):
if isinstance(user, DummyPerspective):
return 'hello world!'
else:
return 'goodbye, cruel world!'
class DummyPerspective(pb.Avatar):
"""
An L{IPerspective} avatar which will be used in some tests.
"""
def perspective_getDummyViewPoint(self):
return Dummy()
class DummyRealm(object):
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is pb.IPerspective:
return iface, DummyPerspective(avatarId), lambda: None
class IOPump:
"""
Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
def flush(self):
"""
Pump until there is no more input or output. This does not run any
timers, so don't use it with any code that calls reactor.callLater.
"""
# failsafe timeout
timeout = time.time() + 5
while self.pump():
if time.time() > timeout:
return
def pump(self):
"""
Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek(0)
cData = self.clientIO.read()
sData = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
self.client.transport._checkProducer()
self.server.transport._checkProducer()
for byte in cData:
self.server.dataReceived(byte)
for byte in sData:
self.client.dataReceived(byte)
if cData or sData:
return 1
else:
return 0
def connectedServerAndClient():
"""
Returns a 3-tuple: (client, server, pump).
"""
clientBroker = pb.Broker()
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(guest='guest')
factory = pb.PBServerFactory(portal.Portal(DummyRealm(), [checker]))
serverBroker = factory.buildProtocol(('127.0.0.1',))
clientTransport = StringIO()
serverTransport = StringIO()
clientBroker.makeConnection(protocol.FileWrapper(clientTransport))
serverBroker.makeConnection(protocol.FileWrapper(serverTransport))
pump = IOPump(clientBroker, serverBroker, clientTransport, serverTransport)
# Challenge-response authentication:
pump.flush()
return clientBroker, serverBroker, pump
class SimpleRemote(pb.Referenceable):
def remote_thunk(self, arg):
self.arg = arg
return arg + 1
def remote_knuth(self, arg):
raise Exception()
class NestedRemote(pb.Referenceable):
def remote_getSimple(self):
return SimpleRemote()
class SimpleCopy(pb.Copyable):
def __init__(self):
self.x = 1
self.y = {"Hello":"World"}
self.z = ['test']
class SimpleLocalCopy(pb.RemoteCopy):
pass
pb.setUnjellyableForClass(SimpleCopy, SimpleLocalCopy)
class SimpleFactoryCopy(pb.Copyable):
"""
@cvar allIDs: hold every created instances of this class.
@type allIDs: C{dict}
"""
allIDs = {}
def __init__(self, id):
self.id = id
SimpleFactoryCopy.allIDs[id] = self
def createFactoryCopy(state):
"""
Factory of L{SimpleFactoryCopy}, getting a created instance given the
C{id} found in C{state}.
"""
stateId = state.get("id", None)
if stateId is None:
raise RuntimeError("factory copy state has no 'id' member %s" %
(repr(state),))
if not stateId in SimpleFactoryCopy.allIDs:
raise RuntimeError("factory class has no ID: %s" %
(SimpleFactoryCopy.allIDs,))
inst = SimpleFactoryCopy.allIDs[stateId]
if not inst:
raise RuntimeError("factory method found no object with id")
return inst
pb.setUnjellyableFactoryForClass(SimpleFactoryCopy, createFactoryCopy)
class NestedCopy(pb.Referenceable):
def remote_getCopy(self):
return SimpleCopy()
def remote_getFactory(self, value):
return SimpleFactoryCopy(value)
class SimpleCache(pb.Cacheable):
def __init___(self):
self.x = 1
self.y = {"Hello":"World"}
self.z = ['test']
class NestedComplicatedCache(pb.Referenceable):
def __init__(self):
self.c = VeryVeryComplicatedCacheable()
def remote_getCache(self):
return self.c
class VeryVeryComplicatedCacheable(pb.Cacheable):
def __init__(self):
self.x = 1
self.y = 2
self.foo = 3
def setFoo4(self):
self.foo = 4
self.observer.callRemote('foo',4)
def getStateToCacheAndObserveFor(self, perspective, observer):
self.observer = observer
return {"x": self.x,
"y": self.y,
"foo": self.foo}
def stoppedObserving(self, perspective, observer):
log.msg("stopped observing")
observer.callRemote("end")
if observer == self.observer:
self.observer = None
class RatherBaroqueCache(pb.RemoteCache):
def observe_foo(self, newFoo):
self.foo = newFoo
def observe_end(self):
log.msg("the end of things")
pb.setUnjellyableForClass(VeryVeryComplicatedCacheable, RatherBaroqueCache)
class SimpleLocalCache(pb.RemoteCache):
def setCopyableState(self, state):
self.__dict__.update(state)
def checkMethod(self):
return self.check
def checkSelf(self):
return self
def check(self):
return 1
pb.setUnjellyableForClass(SimpleCache, SimpleLocalCache)
class NestedCache(pb.Referenceable):
def __init__(self):
self.x = SimpleCache()
def remote_getCache(self):
return [self.x,self.x]
def remote_putCache(self, cache):
return (self.x is cache)
class Observable(pb.Referenceable):
def __init__(self):
self.observers = []
def remote_observe(self, obs):
self.observers.append(obs)
def remote_unobserve(self, obs):
self.observers.remove(obs)
def notify(self, obj):
for observer in self.observers:
observer.callRemote('notify', self, obj)
class DeferredRemote(pb.Referenceable):
def __init__(self):
self.run = 0
def runMe(self, arg):
self.run = arg
return arg + 1
def dontRunMe(self, arg):
assert 0, "shouldn't have been run!"
def remote_doItLater(self):
"""
Return a L{Deferred} to be fired on client side. When fired,
C{self.runMe} is called.
"""
d = Deferred()
d.addCallbacks(self.runMe, self.dontRunMe)
self.d = d
return d
class Observer(pb.Referenceable):
notified = 0
obj = None
def remote_notify(self, other, obj):
self.obj = obj
self.notified = self.notified + 1
other.callRemote('unobserve',self)
class NewStyleCopy(pb.Copyable, pb.RemoteCopy, object):
def __init__(self, s):
self.s = s
pb.setUnjellyableForClass(NewStyleCopy, NewStyleCopy)
class NewStyleCopy2(pb.Copyable, pb.RemoteCopy, object):
allocated = 0
initialized = 0
value = 1
def __new__(self):
NewStyleCopy2.allocated += 1
inst = object.__new__(self)
inst.value = 2
return inst
def __init__(self):
NewStyleCopy2.initialized += 1
pb.setUnjellyableForClass(NewStyleCopy2, NewStyleCopy2)
class NewStyleCacheCopy(pb.Cacheable, pb.RemoteCache, object):
def getStateToCacheAndObserveFor(self, perspective, observer):
return self.__dict__
pb.setUnjellyableForClass(NewStyleCacheCopy, NewStyleCacheCopy)
class Echoer(pb.Root):
def remote_echo(self, st):
return st
class CachedReturner(pb.Root):
def __init__(self, cache):
self.cache = cache
def remote_giveMeCache(self, st):
return self.cache
class NewStyleTestCase(unittest.TestCase):
def setUp(self):
"""
Create a pb server using L{Echoer} protocol and connect a client to it.
"""
self.serverFactory = pb.PBServerFactory(Echoer())
self.wrapper = WrappingFactory(self.serverFactory)
self.server = reactor.listenTCP(0, self.wrapper)
clientFactory = pb.PBClientFactory()
reactor.connectTCP("localhost", self.server.getHost().port,
clientFactory)
def gotRoot(ref):
self.ref = ref
return clientFactory.getRootObject().addCallback(gotRoot)
def tearDown(self):
"""
Close client and server connections, reset values of L{NewStyleCopy2}
class variables.
"""
NewStyleCopy2.allocated = 0
NewStyleCopy2.initialized = 0
NewStyleCopy2.value = 1
self.ref.broker.transport.loseConnection()
# Disconnect any server-side connections too.
for proto in self.wrapper.protocols:
proto.transport.loseConnection()
return self.server.stopListening()
def test_newStyle(self):
"""
Create a new style object, send it over the wire, and check the result.
"""
orig = NewStyleCopy("value")
d = self.ref.callRemote("echo", orig)
def cb(res):
self.failUnless(isinstance(res, NewStyleCopy))
self.failUnlessEqual(res.s, "value")
self.failIf(res is orig) # no cheating :)
d.addCallback(cb)
return d
def test_alloc(self):
"""
Send a new style object and check the number of allocations.
"""
orig = NewStyleCopy2()
self.failUnlessEqual(NewStyleCopy2.allocated, 1)
self.failUnlessEqual(NewStyleCopy2.initialized, 1)
d = self.ref.callRemote("echo", orig)
def cb(res):
# receiving the response creates a third one on the way back
self.failUnless(isinstance(res, NewStyleCopy2))
self.failUnlessEqual(res.value, 2)
self.failUnlessEqual(NewStyleCopy2.allocated, 3)
self.failUnlessEqual(NewStyleCopy2.initialized, 1)
self.failIf(res is orig) # no cheating :)
# sending the object creates a second one on the far side
d.addCallback(cb)
return d
class ConnectionNotifyServerFactory(pb.PBServerFactory):
"""
A server factory which stores the last connection and fires a
L{Deferred} on connection made. This factory can handle only one
client connection.
@ivar protocolInstance: the last protocol instance.
@type protocolInstance: C{pb.Broker}
@ivar connectionMade: the deferred fired upon connection.
@type connectionMade: C{Deferred}
"""
protocolInstance = None
def __init__(self, root):
"""
Initialize the factory.
"""
pb.PBServerFactory.__init__(self, root)
self.connectionMade = Deferred()
def clientConnectionMade(self, protocol):
"""
Store the protocol and fire the connection deferred.
"""
self.protocolInstance = protocol
d, self.connectionMade = self.connectionMade, None
if d is not None:
d.callback(None)
class NewStyleCachedTestCase(unittest.TestCase):
def setUp(self):
"""
Create a pb server using L{CachedReturner} protocol and connect a
client to it.
"""
self.orig = NewStyleCacheCopy()
self.orig.s = "value"
self.server = reactor.listenTCP(0,
ConnectionNotifyServerFactory(CachedReturner(self.orig)))
clientFactory = pb.PBClientFactory()
reactor.connectTCP("localhost", self.server.getHost().port,
clientFactory)
def gotRoot(ref):
self.ref = ref
d1 = clientFactory.getRootObject().addCallback(gotRoot)
d2 = self.server.factory.connectionMade
return gatherResults([d1, d2])
def tearDown(self):
"""
Close client and server connections.
"""
self.server.factory.protocolInstance.transport.loseConnection()
self.ref.broker.transport.loseConnection()
return self.server.stopListening()
def test_newStyleCache(self):
"""
Get the object from the cache, and checks its properties.
"""
d = self.ref.callRemote("giveMeCache", self.orig)
def cb(res):
self.failUnless(isinstance(res, NewStyleCacheCopy))
self.failUnlessEqual(res.s, "value")
self.failIf(res is self.orig) # no cheating :)
d.addCallback(cb)
return d
class BrokerTestCase(unittest.TestCase):
thunkResult = None
def tearDown(self):
try:
# from RemotePublished.getFileName
os.unlink('None-None-TESTING.pub')
except OSError:
pass
def thunkErrorBad(self, error):
self.fail("This should cause a return value, not %s" % (error,))
def thunkResultGood(self, result):
self.thunkResult = result
def thunkErrorGood(self, tb):
pass
def thunkResultBad(self, result):
self.fail("This should cause an error, not %s" % (result,))
def test_reference(self):
c, s, pump = connectedServerAndClient()
class X(pb.Referenceable):
def remote_catch(self,arg):
self.caught = arg
class Y(pb.Referenceable):
def remote_throw(self, a, b):
a.callRemote('catch', b)
s.setNameForLocal("y", Y())
y = c.remoteForName("y")
x = X()
z = X()
y.callRemote('throw', x, z)
pump.pump()
pump.pump()
pump.pump()
self.assertIdentical(x.caught, z, "X should have caught Z")
# make sure references to remote methods are equals
self.assertEquals(y.remoteMethod('throw'), y.remoteMethod('throw'))
def test_result(self):
c, s, pump = connectedServerAndClient()
for x, y in (c, s), (s, c):
# test reflexivity
foo = SimpleRemote()
x.setNameForLocal("foo", foo)
bar = y.remoteForName("foo")
self.expectedThunkResult = 8
bar.callRemote('thunk',self.expectedThunkResult - 1
).addCallbacks(self.thunkResultGood, self.thunkErrorBad)
# Send question.
pump.pump()
# Send response.
pump.pump()
# Shouldn't require any more pumping than that...
self.assertEquals(self.thunkResult, self.expectedThunkResult,
"result wasn't received.")
def refcountResult(self, result):
self.nestedRemote = result
def test_tooManyRefs(self):
l = []
e = []
c, s, pump = connectedServerAndClient()
foo = NestedRemote()
s.setNameForLocal("foo", foo)
x = c.remoteForName("foo")
for igno in xrange(pb.MAX_BROKER_REFS + 10):
if s.transport.closed or c.transport.closed:
break
x.callRemote("getSimple").addCallbacks(l.append, e.append)
pump.pump()
expected = (pb.MAX_BROKER_REFS - 1)
self.assertTrue(s.transport.closed, "transport was not closed")
self.assertEquals(len(l), expected,
"expected %s got %s" % (expected, len(l)))
def test_copy(self):
c, s, pump = connectedServerAndClient()
foo = NestedCopy()
s.setNameForLocal("foo", foo)
x = c.remoteForName("foo")
x.callRemote('getCopy'
).addCallbacks(self.thunkResultGood, self.thunkErrorBad)
pump.pump()
pump.pump()
self.assertEquals(self.thunkResult.x, 1)
self.assertEquals(self.thunkResult.y['Hello'], 'World')
self.assertEquals(self.thunkResult.z[0], 'test')
def test_observe(self):
c, s, pump = connectedServerAndClient()
# this is really testing the comparison between remote objects, to make
# sure that you can *UN*observe when you have an observer architecture.
a = Observable()
b = Observer()
s.setNameForLocal("a", a)
ra = c.remoteForName("a")
ra.callRemote('observe',b)
pump.pump()
a.notify(1)
pump.pump()
pump.pump()
a.notify(10)
pump.pump()
pump.pump()
self.assertNotIdentical(b.obj, None, "didn't notify")
self.assertEquals(b.obj, 1, 'notified too much')
def test_defer(self):
c, s, pump = connectedServerAndClient()
d = DeferredRemote()
s.setNameForLocal("d", d)
e = c.remoteForName("d")
pump.pump(); pump.pump()
results = []
e.callRemote('doItLater').addCallback(results.append)
pump.pump(); pump.pump()
self.assertFalse(d.run, "Deferred method run too early.")
d.d.callback(5)
self.assertEquals(d.run, 5, "Deferred method run too late.")
pump.pump(); pump.pump()
self.assertEquals(results[0], 6, "Incorrect result.")
def test_refcount(self):
c, s, pump = connectedServerAndClient()
foo = NestedRemote()
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
bar.callRemote('getSimple'
).addCallbacks(self.refcountResult, self.thunkErrorBad)
# send question
pump.pump()
# send response
pump.pump()
# delving into internal structures here, because GC is sort of
# inherently internal.
rluid = self.nestedRemote.luid
self.assertIn(rluid, s.localObjects)
del self.nestedRemote
# nudge the gc
if sys.hexversion >= 0x2000000:
gc.collect()
# try to nudge the GC even if we can't really
pump.pump()
pump.pump()
pump.pump()
self.assertNotIn(rluid, s.localObjects)
def test_cache(self):
c, s, pump = connectedServerAndClient()
obj = NestedCache()
obj2 = NestedComplicatedCache()
vcc = obj2.c
s.setNameForLocal("obj", obj)
s.setNameForLocal("xxx", obj2)
o2 = c.remoteForName("obj")
o3 = c.remoteForName("xxx")
coll = []
o2.callRemote("getCache"
).addCallback(coll.append).addErrback(coll.append)
o2.callRemote("getCache"
).addCallback(coll.append).addErrback(coll.append)
complex = []
o3.callRemote("getCache").addCallback(complex.append)
o3.callRemote("getCache").addCallback(complex.append)
pump.flush()
# `worst things first'
self.assertEquals(complex[0].x, 1)
self.assertEquals(complex[0].y, 2)
self.assertEquals(complex[0].foo, 3)
vcc.setFoo4()
pump.flush()
self.assertEquals(complex[0].foo, 4)
self.assertEquals(len(coll), 2)
cp = coll[0][0]
self.assertIdentical(cp.checkMethod().im_self, cp,
"potential refcounting issue")
self.assertIdentical(cp.checkSelf(), cp,
"other potential refcounting issue")
col2 = []
o2.callRemote('putCache',cp).addCallback(col2.append)
pump.flush()
# The objects were the same (testing lcache identity)
self.assertTrue(col2[0])
# test equality of references to methods
self.assertEquals(o2.remoteMethod("getCache"),
o2.remoteMethod("getCache"))
# now, refcounting (similiar to testRefCount)
luid = cp.luid
baroqueLuid = complex[0].luid
self.assertIn(luid, s.remotelyCachedObjects,
"remote cache doesn't have it")
del coll
del cp
pump.flush()
del complex
del col2
# extra nudge...
pump.flush()
# del vcc.observer
# nudge the gc
if sys.hexversion >= 0x2000000:
gc.collect()
# try to nudge the GC even if we can't really
pump.flush()
# The GC is done with it.
self.assertNotIn(luid, s.remotelyCachedObjects,
"Server still had it after GC")
self.assertNotIn(luid, c.locallyCachedObjects,
"Client still had it after GC")
self.assertNotIn(baroqueLuid, s.remotelyCachedObjects,
"Server still had complex after GC")
self.assertNotIn(baroqueLuid, c.locallyCachedObjects,
"Client still had complex after GC")
self.assertIdentical(vcc.observer, None, "observer was not removed")
def test_publishable(self):
try:
os.unlink('None-None-TESTING.pub') # from RemotePublished.getFileName
except OSError:
pass # Sometimes it's not there.
c, s, pump = connectedServerAndClient()
foo = GetPublisher()
# foo.pub.timestamp = 1.0
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
accum = []
bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad)
pump.flush()
obj = accum.pop()
self.assertEquals(obj.activateCalled, 1)
self.assertEquals(obj.isActivated, 1)
self.assertEquals(obj.yayIGotPublished, 1)
# timestamp's dirty, we don't have a cache file
self.assertEquals(obj._wasCleanWhenLoaded, 0)
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", foo)
bar = c.remoteForName("foo")
bar.callRemote('getPub').addCallbacks(accum.append, self.thunkErrorBad)
pump.flush()
obj = accum.pop()
# timestamp's clean, our cache file is up-to-date
self.assertEquals(obj._wasCleanWhenLoaded, 1)
def gotCopy(self, val):
self.thunkResult = val.id
def test_factoryCopy(self):
c, s, pump = connectedServerAndClient()
ID = 99
obj = NestedCopy()
s.setNameForLocal("foo", obj)
x = c.remoteForName("foo")
x.callRemote('getFactory', ID
).addCallbacks(self.gotCopy, self.thunkResultBad)
pump.pump()
pump.pump()
pump.pump()
self.assertEquals(self.thunkResult, ID,
"ID not correct on factory object %s" % (self.thunkResult,))
bigString = "helloworld" * 50
callbackArgs = None
callbackKeyword = None
def finishedCallback(*args, **kw):
global callbackArgs, callbackKeyword
callbackArgs = args
callbackKeyword = kw
class Pagerizer(pb.Referenceable):
def __init__(self, callback, *args, **kw):
self.callback, self.args, self.kw = callback, args, kw
def remote_getPages(self, collector):
util.StringPager(collector, bigString, 100,
self.callback, *self.args, **self.kw)
self.args = self.kw = None
class FilePagerizer(pb.Referenceable):
pager = None
def __init__(self, filename, callback, *args, **kw):
self.filename = filename
self.callback, self.args, self.kw = callback, args, kw
def remote_getPages(self, collector):
self.pager = util.FilePager(collector, file(self.filename),
self.callback, *self.args, **self.kw)
self.args = self.kw = None
class PagingTestCase(unittest.TestCase):
"""
Test pb objects sending data by pages.
"""
def setUp(self):
"""
Create a file used to test L{util.FilePager}.
"""
self.filename = self.mktemp()
fd = file(self.filename, 'w')
fd.write(bigString)
fd.close()
def test_pagingWithCallback(self):
"""
Test L{util.StringPager}, passing a callback to fire when all pages
are sent.
"""
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", Pagerizer(finishedCallback, 'hello', value=10))
x = c.remoteForName("foo")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(callbackArgs, ('hello',),
"Completed callback not invoked")
self.assertEquals(callbackKeyword, {'value': 10},
"Completed callback not invoked")
def test_pagingWithoutCallback(self):
"""
Test L{util.StringPager} without a callback.
"""
c, s, pump = connectedServerAndClient()
s.setNameForLocal("foo", Pagerizer(None))
x = c.remoteForName("foo")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
def test_emptyFilePaging(self):
"""
Test L{util.FilePager}, sending an empty file.
"""
filenameEmpty = self.mktemp()
fd = file(filenameEmpty, 'w')
fd.close()
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(filenameEmpty, None)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
ttl = 10
while not l and ttl > 0:
pump.pump()
ttl -= 1
if not ttl:
self.fail('getAllPages timed out')
self.assertEquals(''.join(l[0]), '',
"Pages received not equal to pages sent!")
def test_filePagingWithCallback(self):
"""
Test L{util.FilePager}, passing a callback to fire when all pages
are sent, and verify that the pager doesn't keep chunks in memory.
"""
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(self.filename, finishedCallback,
'frodo', value = 9)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(callbackArgs, ('frodo',),
"Completed callback not invoked")
self.assertEquals(callbackKeyword, {'value': 9},
"Completed callback not invoked")
self.assertEquals(pagerizer.pager.chunks, [])
def test_filePagingWithoutCallback(self):
"""
Test L{util.FilePager} without a callback.
"""
c, s, pump = connectedServerAndClient()
pagerizer = FilePagerizer(self.filename, None)
s.setNameForLocal("bar", pagerizer)
x = c.remoteForName("bar")
l = []
util.getAllPages(x, "getPages").addCallback(l.append)
while not l:
pump.pump()
self.assertEquals(''.join(l[0]), bigString,
"Pages received not equal to pages sent!")
self.assertEquals(pagerizer.pager.chunks, [])
class DumbPublishable(publish.Publishable):
def getStateToPublish(self):
return {"yayIGotPublished": 1}
class DumbPub(publish.RemotePublished):
def activated(self):
self.activateCalled = 1
class GetPublisher(pb.Referenceable):
def __init__(self):
self.pub = DumbPublishable("TESTING")
def remote_getPub(self):
return self.pub
pb.setUnjellyableForClass(DumbPublishable, DumbPub)
class DisconnectionTestCase(unittest.TestCase):
"""
Test disconnection callbacks.
"""
def error(self, *args):
raise RuntimeError("I shouldn't have been called: %s" % (args,))
def gotDisconnected(self):
"""
Called on broker disconnect.
"""
self.gotCallback = 1
def objectDisconnected(self, o):
"""
Called on RemoteReference disconnect.
"""
self.assertEquals(o, self.remoteObject)
self.objectCallback = 1
def test_badSerialization(self):
c, s, pump = connectedServerAndClient()
pump.pump()
s.setNameForLocal("o", BadCopySet())
g = c.remoteForName("o")
l = []
g.callRemote("setBadCopy", BadCopyable()).addErrback(l.append)
pump.flush()
self.assertEquals(len(l), 1)
def test_disconnection(self):
c, s, pump = connectedServerAndClient()
pump.pump()
s.setNameForLocal("o", SimpleRemote())
# get a client reference to server object
r = c.remoteForName("o")
pump.pump()
pump.pump()
pump.pump()
# register and then unregister disconnect callbacks
# making sure they get unregistered
c.notifyOnDisconnect(self.error)
self.assertIn(self.error, c.disconnects)
c.dontNotifyOnDisconnect(self.error)
self.assertNotIn(self.error, c.disconnects)
r.notifyOnDisconnect(self.error)
self.assertIn(r._disconnected, c.disconnects)
self.assertIn(self.error, r.disconnectCallbacks)
r.dontNotifyOnDisconnect(self.error)
self.assertNotIn(r._disconnected, c.disconnects)
self.assertNotIn(self.error, r.disconnectCallbacks)
# register disconnect callbacks
c.notifyOnDisconnect(self.gotDisconnected)
r.notifyOnDisconnect(self.objectDisconnected)
self.remoteObject = r
# disconnect
c.connectionLost(failure.Failure(main.CONNECTION_DONE))
self.assertTrue(self.gotCallback)
self.assertTrue(self.objectCallback)
class FreakOut(Exception):
pass
class BadCopyable(pb.Copyable):
def getStateToCopyFor(self, p):
raise FreakOut()
class BadCopySet(pb.Referenceable):
def remote_setBadCopy(self, bc):
return None
class LocalRemoteTest(util.LocalAsRemote):
reportAllTracebacks = 0
def sync_add1(self, x):
return x + 1
def async_add(self, x=0, y=1):
return x + y
def async_fail(self):
raise RuntimeError()
class MyPerspective(pb.Avatar):
"""
@ivar loggedIn: set to C{True} when the avatar is logged in.
@type loggedIn: C{bool}
@ivar loggedOut: set to C{True} when the avatar is logged out.
@type loggedOut: C{bool}
"""
implements(pb.IPerspective)
loggedIn = loggedOut = False
def __init__(self, avatarId):
self.avatarId = avatarId
def perspective_getAvatarId(self):
"""
Return the avatar identifier which was used to access this avatar.
"""
return self.avatarId
def perspective_getViewPoint(self):
return MyView()
def perspective_add(self, a, b):
"""
Add the given objects and return the result. This is a method
unavailable on L{Echoer}, so it can only be invoked by authenticated
users who received their avatar from L{TestRealm}.
"""
return a + b
def logout(self):
self.loggedOut = True
class TestRealm(object):
"""
A realm which repeatedly gives out a single instance of L{MyPerspective}
for non-anonymous logins and which gives out a new instance of L{Echoer}
for each anonymous login.
@ivar lastPerspective: The L{MyPerspective} most recently created and
returned from C{requestAvatar}.
@ivar perspectiveFactory: A one-argument callable which will be used to
create avatars to be returned from C{requestAvatar}.
"""
perspectiveFactory = MyPerspective
lastPerspective = None
def requestAvatar(self, avatarId, mind, interface):
"""
Verify that the mind and interface supplied have the expected values
(this should really be done somewhere else, like inside a test method)
and return an avatar appropriate for the given identifier.
"""
assert interface == pb.IPerspective
assert mind == "BRAINS!"
if avatarId is checkers.ANONYMOUS:
return pb.IPerspective, Echoer(), lambda: None
else:
self.lastPerspective = self.perspectiveFactory(avatarId)
self.lastPerspective.loggedIn = True
return (
pb.IPerspective, self.lastPerspective,
self.lastPerspective.logout)
class MyView(pb.Viewable):
def view_check(self, user):
return isinstance(user, MyPerspective)
class NewCredTestCase(unittest.TestCase):
"""
Tests related to the L{twisted.cred} support in PB.
"""
def setUp(self):
"""
Create a portal with no checkers and wrap it around a simple test
realm. Set up a PB server on a TCP port which serves perspectives
using that portal.
"""
self.realm = TestRealm()
self.portal = portal.Portal(self.realm)
self.factory = ConnectionNotifyServerFactory(self.portal)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portno = self.port.getHost().port
def tearDown(self):
"""
Shut down the TCP port created by L{setUp}.
"""
return self.port.stopListening()
def getFactoryAndRootObject(self, clientFactory=pb.PBClientFactory):
"""
Create a connection to the test server.
@param clientFactory: the factory class used to create the connection.
@return: a tuple (C{factory}, C{deferred}), where factory is an
instance of C{clientFactory} and C{deferred} the L{Deferred} firing
with the PB root object.
"""
factory = clientFactory()
rootObjDeferred = factory.getRootObject()
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return factory, rootObjDeferred
def test_getRootObject(self):
"""
Assert only that L{PBClientFactory.getRootObject}'s Deferred fires with
a L{RemoteReference}.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_deadReferenceError(self):
"""
Test that when a connection is lost, calling a method on a
RemoteReference obtained from it raises DeadReferenceError.
"""
factory, rootObjDeferred = self.getFactoryAndRootObject()
def gotRootObject(rootObj):
disconnectedDeferred = Deferred()
rootObj.notifyOnDisconnect(disconnectedDeferred.callback)
def lostConnection(ign):
self.assertRaises(
pb.DeadReferenceError,
rootObj.callRemote, 'method')
disconnectedDeferred.addCallback(lostConnection)
factory.disconnect()
return disconnectedDeferred
return rootObjDeferred.addCallback(gotRootObject)
def test_clientConnectionLost(self):
"""
Test that if the L{reconnecting} flag is passed with a True value then
a remote call made from a disconnection notification callback gets a
result successfully.
"""
class ReconnectOnce(pb.PBClientFactory):
reconnectedAlready = False
def clientConnectionLost(self, connector, reason):
reconnecting = not self.reconnectedAlready
self.reconnectedAlready = True
if reconnecting:
connector.connect()
return pb.PBClientFactory.clientConnectionLost(
self, connector, reason, reconnecting)
factory, rootObjDeferred = self.getFactoryAndRootObject(ReconnectOnce)
def gotRootObject(rootObj):
self.assertIsInstance(rootObj, pb.RemoteReference)
d = Deferred()
rootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
def disconnected(ign):
d = factory.getRootObject()
def gotAnotherRootObject(anotherRootObj):
self.assertIsInstance(anotherRootObj, pb.RemoteReference)
d = Deferred()
anotherRootObj.notifyOnDisconnect(d.callback)
factory.disconnect()
return d
return d.addCallback(gotAnotherRootObject)
return d.addCallback(disconnected)
return rootObjDeferred.addCallback(gotRootObject)
def test_immediateClose(self):
"""
Test that if a Broker loses its connection without receiving any bytes,
it doesn't raise any exceptions or log any errors.
"""
serverProto = self.factory.buildProtocol(('127.0.0.1', 12345))
serverProto.makeConnection(protocol.FileWrapper(StringIO()))
serverProto.connectionLost(failure.Failure(main.CONNECTION_DONE))
def test_loginConnectionRefused(self):
"""
L{PBClientFactory.login} returns a L{Deferred} which is errbacked
with the L{ConnectionRefusedError} if the underlying connection is
refused.
"""
clientFactory = pb.PBClientFactory()
loginDeferred = clientFactory.login(
credentials.UsernamePassword("foo", "bar"))
clientFactory.clientConnectionFailed(
None,
failure.Failure(
ConnectionRefusedError("Test simulated refused connection")))
return self.assertFailure(loginDeferred, ConnectionRefusedError)
def _disconnect(self, ignore, factory):
"""
Helper method disconnecting the given client factory and returning a
C{Deferred} that will fire when the server connection has noticed the
disconnection.
"""
disconnectedDeferred = Deferred()
self.factory.protocolInstance.notifyOnDisconnect(
lambda: disconnectedDeferred.callback(None))
factory.disconnect()
return disconnectedDeferred
def test_loginLogout(self):
"""
Test that login can be performed with IUsernamePassword credentials and
that when the connection is dropped the avatar is logged out.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
creds = credentials.UsernamePassword("user", "pass")
# NOTE: real code probably won't need anything where we have the
# "BRAINS!" argument, passing None is fine. We just do it here to
# test that it is being passed. It is used to give additional info to
# the realm to aid perspective creation, if you don't need that,
# ignore it.
mind = "BRAINS!"
d = factory.login(creds, mind)
def cbLogin(perspective):
self.assertTrue(self.realm.lastPerspective.loggedIn)
self.assertIsInstance(perspective, pb.RemoteReference)
return self._disconnect(None, factory)
d.addCallback(cbLogin)
def cbLogout(ignored):
self.assertTrue(self.realm.lastPerspective.loggedOut)
d.addCallback(cbLogout)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_logoutAfterDecref(self):
"""
If a L{RemoteReference} to an L{IPerspective} avatar is decrefed and
there remain no other references to the avatar on the server, the
avatar is garbage collected and the logout method called.
"""
loggedOut = Deferred()
class EventPerspective(pb.Avatar):
"""
An avatar which fires a Deferred when it is logged out.
"""
def __init__(self, avatarId):
pass
def logout(self):
loggedOut.callback(None)
self.realm.perspectiveFactory = EventPerspective
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(foo='bar'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
def cbLoggedIn(avatar):
# Just wait for the logout to happen, as it should since the
# reference to the avatar will shortly no longer exists.
return loggedOut
d.addCallback(cbLoggedIn)
def cbLoggedOut(ignored):
# Verify that the server broker's _localCleanup dict isn't growing
# without bound.
self.assertEqual(self.factory.protocolInstance._localCleanup, {})
d.addCallback(cbLoggedOut)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_concurrentLogin(self):
"""
Two different correct login attempts can be made on the same root
object at the same time and produce two different resulting avatars.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(
foo='bar', baz='quux'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('foo', 'bar'), "BRAINS!")
secondLogin = factory.login(
credentials.UsernamePassword('baz', 'quux'), "BRAINS!")
d = gatherResults([firstLogin, secondLogin])
def cbLoggedIn((first, second)):
return gatherResults([
first.callRemote('getAvatarId'),
second.callRemote('getAvatarId')])
d.addCallback(cbLoggedIn)
def cbAvatarIds((first, second)):
self.assertEqual(first, 'foo')
self.assertEqual(second, 'baz')
d.addCallback(cbAvatarIds)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_badUsernamePasswordLogin(self):
"""
Test that a login attempt with an invalid user or invalid password
fails in the appropriate way.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
firstLogin = factory.login(
credentials.UsernamePassword('nosuchuser', 'pass'))
secondLogin = factory.login(
credentials.UsernamePassword('user', 'wrongpass'))
self.assertFailure(firstLogin, UnauthorizedLogin)
self.assertFailure(secondLogin, UnauthorizedLogin)
d = gatherResults([firstLogin, secondLogin])
def cleanup(ignore):
errors = self.flushLoggedErrors(UnauthorizedLogin)
self.assertEquals(len(errors), 2)
return self._disconnect(None, factory)
d.addCallback(cleanup)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLogin(self):
"""
Verify that a PB server using a portal configured with an checker which
allows IAnonymous credentials can be logged into using IAnonymous
credentials.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
def cbLoggedIn(perspective):
return perspective.callRemote('echo', 123)
d.addCallback(cbLoggedIn)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLoginNotPermitted(self):
"""
Verify that without an anonymous checker set up, anonymous login is
rejected.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
self.assertFailure(d, UnhandledCredentials)
def cleanup(ignore):
errors = self.flushLoggedErrors(UnhandledCredentials)
self.assertEquals(len(errors), 1)
return self._disconnect(None, factory)
d.addCallback(cleanup)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_anonymousLoginWithMultipleCheckers(self):
"""
Like L{test_anonymousLogin} but against a portal with a checker for
both IAnonymous and IUsernamePassword.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(credentials.Anonymous(), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote('echo', 123)
d.addCallback(cbLogin)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_authenticatedLoginWithMultipleCheckers(self):
"""
Like L{test_anonymousLoginWithMultipleCheckers} but check that
username/password authentication works.
"""
self.portal.registerChecker(checkers.AllowAnonymousAccess())
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword('user', 'pass'), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote('add', 100, 23)
d.addCallback(cbLogin)
d.addCallback(self.assertEqual, 123)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP('127.0.0.1', self.portno, factory)
self.addCleanup(connector.disconnect)
return d
def test_view(self):
"""
Verify that a viewpoint can be retrieved after authenticating with
cred.
"""
self.portal.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(user='pass'))
factory = pb.PBClientFactory()
d = factory.login(
credentials.UsernamePassword("user", "pass"), "BRAINS!")
def cbLogin(perspective):
return perspective.callRemote("getViewPoint")
d.addCallback(cbLogin)
def cbView(viewpoint):
return viewpoint.callRemote("check")
d.addCallback(cbView)
d.addCallback(self.assertTrue)
d.addCallback(self._disconnect, factory)
connector = reactor.connectTCP("127.0.0.1", self.portno, factory)
self.addCleanup(connector.disconnect)
return d
class NonSubclassingPerspective:
implements(pb.IPerspective)
def __init__(self, avatarId):
pass
# IPerspective implementation
def perspectiveMessageReceived(self, broker, message, args, kwargs):
args = broker.unserialize(args, self)
kwargs = broker.unserialize(kwargs, self)
return broker.serialize((message, args, kwargs))
# Methods required by TestRealm
def logout(self):
self.loggedOut = True
class NSPTestCase(unittest.TestCase):
"""
Tests for authentication against a realm where the L{IPerspective}
implementation is not a subclass of L{Avatar}.
"""
def setUp(self):
self.realm = TestRealm()
self.realm.perspectiveFactory = NonSubclassingPerspective
self.portal = portal.Portal(self.realm)
self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.checker.addUser("user", "pass")
self.portal.registerChecker(self.checker)
self.factory = WrappingFactory(pb.PBServerFactory(self.portal))
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.addCleanup(self.port.stopListening)
self.portno = self.port.getHost().port
def test_NSP(self):
"""
An L{IPerspective} implementation which does not subclass
L{Avatar} can expose remote methods for the client to call.
"""
factory = pb.PBClientFactory()
d = factory.login(credentials.UsernamePassword('user', 'pass'),
"BRAINS!")
reactor.connectTCP('127.0.0.1', self.portno, factory)
d.addCallback(lambda p: p.callRemote('ANYTHING', 'here', bar='baz'))
d.addCallback(self.assertEquals,
('ANYTHING', ('here',), {'bar': 'baz'}))
def cleanup(ignored):
factory.disconnect()
for p in self.factory.protocols:
p.transport.loseConnection()
d.addCallback(cleanup)
return d
class IForwarded(Interface):
"""
Interface used for testing L{util.LocalAsyncForwarder}.
"""
def forwardMe():
"""
Simple synchronous method.
"""
def forwardDeferred():
"""
Simple asynchronous method.
"""
class Forwarded:
"""
Test implementation of L{IForwarded}.
@ivar forwarded: set if C{forwardMe} is called.
@type forwarded: C{bool}
@ivar unforwarded: set if C{dontForwardMe} is called.
@type unforwarded: C{bool}
"""
implements(IForwarded)
forwarded = False
unforwarded = False
def forwardMe(self):
"""
Set a local flag to test afterwards.
"""
self.forwarded = True
def dontForwardMe(self):
"""
Set a local flag to test afterwards. This should not be called as it's
not in the interface.
"""
self.unforwarded = True
def forwardDeferred(self):
"""
Asynchronously return C{True}.
"""
return succeed(True)
class SpreadUtilTestCase(unittest.TestCase):
"""
Tests for L{twisted.spread.util}.
"""
def test_sync(self):
"""
Call a synchronous method of a L{util.LocalAsRemote} object and check
the result.
"""
o = LocalRemoteTest()
self.assertEquals(o.callRemote("add1", 2), 3)
def test_async(self):
"""
Call an asynchronous method of a L{util.LocalAsRemote} object and check
the result.
"""
o = LocalRemoteTest()
o = LocalRemoteTest()
d = o.callRemote("add", 2, y=4)
self.assertIsInstance(d, Deferred)
d.addCallback(self.assertEquals, 6)
return d
def test_asyncFail(self):
"""
Test a asynchronous failure on a remote method call.
"""
l = []
o = LocalRemoteTest()
d = o.callRemote("fail")
def eb(f):
self.assertTrue(isinstance(f, failure.Failure))
f.trap(RuntimeError)
d.addCallbacks(lambda res: self.fail("supposed to fail"), eb)
return d
def test_remoteMethod(self):
"""
Test the C{remoteMethod} facility of L{util.LocalAsRemote}.
"""
o = LocalRemoteTest()
m = o.remoteMethod("add1")
self.assertEquals(m(3), 4)
def test_localAsyncForwarder(self):
"""
Test a call to L{util.LocalAsyncForwarder} using L{Forwarded} local
object.
"""
f = Forwarded()
lf = util.LocalAsyncForwarder(f, IForwarded)
lf.callRemote("forwardMe")
self.assertTrue(f.forwarded)
lf.callRemote("dontForwardMe")
self.assertFalse(f.unforwarded)
rr = lf.callRemote("forwardDeferred")
l = []
rr.addCallback(l.append)
self.assertEqual(l[0], 1)
class PBWithSecurityOptionsTest(unittest.TestCase):
"""
Test security customization.
"""
def test_clientDefaultSecurityOptions(self):
"""
By default, client broker should use C{jelly.globalSecurity} as
security settings.
"""
factory = pb.PBClientFactory()
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, jelly.globalSecurity)
def test_serverDefaultSecurityOptions(self):
"""
By default, server broker should use C{jelly.globalSecurity} as
security settings.
"""
factory = pb.PBServerFactory(Echoer())
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, jelly.globalSecurity)
def test_clientSecurityCustomization(self):
"""
Check that the security settings are passed from the client factory to
the broker object.
"""
security = jelly.SecurityOptions()
factory = pb.PBClientFactory(security=security)
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, security)
def test_serverSecurityCustomization(self):
"""
Check that the security settings are passed from the server factory to
the broker object.
"""
security = jelly.SecurityOptions()
factory = pb.PBServerFactory(Echoer(), security=security)
broker = factory.buildProtocol(None)
self.assertIdentical(broker.security, security)
class DeprecationTests(unittest.TestCase):
"""
Tests for certain deprecations of free-functions in L{twisted.spread.pb}.
"""
def test_noOperationDeprecated(self):
"""
L{pb.noOperation} is deprecated.
"""
self.callDeprecated(
Version("twisted", 8, 2, 0),
pb.noOperation, 1, 2, x=3, y=4)
def test_printTraceback(self):
"""
L{pb.printTraceback} is deprecated.
"""
self.callDeprecated(
Version("twisted", 8, 2, 0),
pb.printTraceback,
"printTraceback deprecation fake traceback value")
| |
#!/usr/bin/env python
"""Tests for the flow."""
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
from grr.lib.flows import tests
# pylint: enable=unused-import,g-bad-import-order
from grr.client import actions
from grr.client import vfs
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import type_info
from grr.proto import flows_pb2
class FlowResponseSerialization(flow.GRRFlow):
"""Demonstrate saving responses in the flow."""
@flow.StateHandler(next_state="Response1")
def Start(self, unused_message=None):
self.CallClient("ReturnBlob",
rdfvalue.EchoRequest(data="test"),
next_state="Response1")
@flow.StateHandler(next_state="Response2")
def Response1(self, messages):
"""Record the message id for testing."""
self.state.Register("messages", messages)
self.CallClient("ReturnBlob",
rdfvalue.EchoRequest(data="test"),
next_state="Response2")
@flow.StateHandler()
def Response2(self, messages):
# We need to receive one response and it must be the same as that stored in
# the previous state.
if (len(list(messages)) != 1 or
messages.status.status != rdfvalue.GrrStatus.ReturnedStatus.OK or
list(messages) != list(self.state.messages)):
raise RuntimeError("Messages not serialized")
class NoRequestChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
return
class CallClientChildFlow(flow.GRRFlow):
"""This flow just returns and does not generate any requests."""
@flow.StateHandler()
def Start(self, unused_message):
self.CallClient("GetClientStats", next_state="End")
class NoRequestParentFlow(flow.GRRFlow):
child_flow = "NoRequestChildFlow"
@flow.StateHandler(next_state="End")
def Start(self, unused_message):
self.CallFlow(self.child_flow)
@flow.StateHandler()
def End(self, unused_message):
pass
class CallClientParentFlow(NoRequestParentFlow):
child_flow = "CallClientChildFlow"
class FlowCreationTest(test_lib.FlowTestsBaseclass):
"""Test flow creation."""
def testInvalidClientId(self):
"""Should raise if the client_id is invalid."""
self.assertRaises(ValueError, flow.GRRFlow.StartFlow,
client_id="hello", flow_name="FlowOrderTest",
token=self.token)
def testUnknownArg(self):
"""Check that flows reject unknown args."""
self.assertRaises(type_info.UnknownArg, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="FlowOrderTest",
token=self.token, foobar=1)
def testTypeAttributeIsNotAppendedWhenFlowIsClosed(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token)
flow_obj.Close()
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
types = list(flow_obj.GetValuesForAttribute(flow_obj.Schema.TYPE))
self.assertEqual(len(types), 1)
def testFlowSerialization(self):
"""Check that we can unpickle flows."""
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
self.assertEqual(flow_obj.__class__, test_lib.FlowOrderTest)
def testFlowSerialization2(self):
"""Check that we can unpickle flows."""
class TestClientMock(object):
in_rdfvalue = rdfvalue.EchoRequest
out_rdfvalue = rdfvalue.DataBlob
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnBlob"] = self
def ReturnBlob(self, unused_args):
return [rdfvalue.DataBlob(integer=100)]
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("FlowResponseSerialization",
TestClientMock(), token=self.token,
client_id=self.client_id):
pass
def testTerminate(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
with flow_obj.GetRunner() as runner:
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
reason = "no reason"
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, token=self.token)
with flow_obj.GetRunner() as runner:
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue(reason in runner.context.status)
def testChildTermination(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
# The child URN should be contained within the parent session_id URN.
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
children = list(flow_obj.ListChildren())
self.assertEqual(len(children), 1)
reason = "just so"
flow.GRRFlow.TerminateFlow(session_id, reason=reason, token=self.token)
flow_obj = aff4.FACTORY.Open(session_id,
aff4_type="CallClientParentFlow",
token=self.token)
with flow_obj.GetRunner() as runner:
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue(reason in runner.context.status)
child = aff4.FACTORY.Open(children[0],
aff4_type="CallClientChildFlow",
token=self.token)
with child.GetRunner() as runner:
self.assertEqual(runner.IsRunning(), False)
self.assertEqual(runner.context.state,
rdfvalue.Flow.State.ERROR)
self.assertTrue("user test" in runner.context.status)
self.assertTrue("Parent flow terminated." in runner.context.status)
def testNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
with flow_obj.GetRunner() as runner:
msg = "Flow terminated due to error"
runner.Notify("FlowStatus", session_id, msg)
user_fd = aff4.FACTORY.Open(rdfvalue.RDFURN("aff4:/users").Add(
self.token.username), mode="r", token=self.token)
notifications = user_fd.ShowNotifications(reset=False)
self.assertEqual(len(notifications), 1)
for notification in notifications:
self.assertTrue(notification.message.endswith(": " + msg))
self.assertEqual(notification.subject, rdfvalue.RDFURN(session_id))
def testFormatstringNotification(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="FlowOrderTest", token=self.token)
with aff4.FACTORY.Open(session_id, aff4_type="FlowOrderTest",
age=aff4.ALL_TIMES, mode="rw",
token=self.token) as flow_obj:
with flow_obj.GetRunner() as runner:
# msg contains %s.
msg = "Flow reading %system% terminated due to error"
runner.Notify("FlowStatus", session_id, msg)
runner.Status(msg)
def testSendRepliesAttribute(self):
# Run the flow in the simulated way. Child's send_replies is set to False.
# Parent flow will raise if number of responses is > 0.
for _ in test_lib.TestFlowHelper(
"ParentFlowWithoutResponses", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token,):
pass
self.assertEqual(ParentFlowWithoutResponses.success, True)
notifications = {}
def CollectNotifications(self, queue, session_ids, priorities, **kwargs):
now = time.time()
for session_id in session_ids:
self.notifications.setdefault(session_id, []).append(now)
self.old_notify(queue, session_ids, priorities, **kwargs)
def testNoRequestChildFlowRace(self):
manager = queue_manager.QueueManager(token=self.token)
self.old_notify = manager._MultiNotifyQueue
with test_lib.Stubber(queue_manager.QueueManager, "_MultiNotifyQueue",
self.CollectNotifications):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="NoRequestParentFlow",
token=self.token)
self.assertIn(session_id, self.notifications)
f = aff4.FACTORY.Open(session_id, token=self.token)
# Check that the first notification came in after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age),
1e6 * min(self.notifications[session_id]),
"There was a notification for a flow before "
"the flow was created.")
def testCallClientChildFlowRace(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="CallClientParentFlow",
token=self.token)
client_requests = data_store.DB.ResolveRegex(
self.client_id.Queue(), "task:.*", token=self.token)
self.assertEqual(len(client_requests), 1)
f = aff4.FACTORY.Open(session_id, token=self.token)
for (_, _, timestamp) in client_requests:
# Check that the client request was written after the flow was created.
self.assertLess(int(f.Get(f.Schema.TYPE).age), timestamp,
"The client request was issued before "
"the flow was created.")
class FlowTest(test_lib.FlowTestsBaseclass):
"""Tests the Flow."""
def testBrokenFlow(self):
"""Check that flows which call to incorrect states raise."""
self.assertRaises(flow_runner.FlowRunnerError, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="BrokenFlow",
token=self.token)
def SendMessages(self, response_ids, session_id, authenticated=True):
"""Send messages to the flow."""
for response_id in response_ids:
message = rdfvalue.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id)
if authenticated:
auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
message.auth_state = auth_state
self.SendMessage(message)
def SendMessage(self, message):
# Now messages are set in the data store
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueResponse(message.session_id, message)
def SendOKStatus(self, response_id, session_id):
"""Send a message to the flow."""
message = rdfvalue.GrrMessage(
request_id=1,
response_id=response_id,
session_id=session_id,
type=rdfvalue.GrrMessage.Type.STATUS,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
status = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
message.payload = status
self.SendMessage(message)
# Now also set the state on the RequestState
request_state, _ = data_store.DB.Resolve(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
token=self.token)
request_state = rdfvalue.RequestState(request_state)
request_state.status = status
data_store.DB.Set(
message.session_id.Add("state"),
queue_manager.QueueManager.FLOW_REQUEST_TEMPLATE % message.request_id,
request_state, token=self.token)
return message
def testReordering(self):
"""Check that out of order client messages are reordered."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
runner.ProcessCompletedRequests([message])
# Check that the messages were processed in order
self.assertEqual(flow_obj.messages, [1, 2, 3, 4, 5])
def testCallClient(self):
"""Flows can send client messages using CallClient()."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
self.assertEqual(message.session_id, flow_obj.session_id)
self.assertEqual(message.request_id, 1)
self.assertEqual(message.name, "Test")
def testCallClientWellKnown(self):
"""Well known flows can also call the client."""
cls = flow.GRRFlow.classes["GetClientStatsAuto"]
flow_obj = cls(cls.well_known_session_id, mode="rw", token=self.token)
flow_obj.CallClient(self.client_id, "GetClientStats")
# Check that a message went out to the client
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, limit=100)
self.assertEqual(len(tasks), 1)
message = tasks[0]
# If we don't specify where to send the replies, they go to the devnull flow
devnull = flow.GRRFlow.classes["IgnoreResponses"]
self.assertEqual(message.session_id, devnull.well_known_session_id)
self.assertEqual(message.request_id, 0)
self.assertEqual(message.name, "GetClientStats")
messages = []
def StoreMessage(_, msg):
messages.append(msg)
with test_lib.Stubber(devnull, "ProcessMessage", StoreMessage):
client_mock = test_lib.ActionMock("GetClientStats")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="GetClientStats", token=self.token):
pass
# Make sure the messages arrived.
self.assertEqual(len(messages), 1)
def testAuthentication1(self):
"""Test that flows refuse to processes unauthenticated messages."""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [2, 1, 4, 3, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# Send the status message
message = self.SendOKStatus(6, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
runner.ProcessCompletedRequests([message])
# Now messages should actually be processed
self.assertEqual(flow_obj.messages, [])
def testAuthentication2(self):
"""Test that flows refuse to processes unauthenticated messages.
Here we try to simulate an attacker injecting unauthenticated
messages midstream.
The current implementation actually fails to process the entire
flow since the injected messages displace the real ones if they
arrive earlier. This can be an effective DoS against legitimate
clients but would require attackers to guess session ids.
"""
flow_obj = self.FlowSetup("FlowOrderTest")
# Simultate processing messages arriving in random order
message_ids = [1, 2]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Now suppose some of the messages are spoofed
message_ids = [3, 4, 5]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=False)
# And now our real messages arrive
message_ids = [5, 6]
self.SendMessages(message_ids, flow_obj.session_id,
authenticated=True)
# Send the status message
message = self.SendOKStatus(7, flow_obj.session_id)
runner = flow_runner.FlowRunner(flow_obj)
runner.ProcessCompletedRequests([message])
# Some messages should actually be processed
self.assertEqual(flow_obj.messages, [1, 2, 5, 6])
def testWellKnownFlows(self):
"""Test the well known flows."""
test_flow = self.FlowSetup("WellKnownSessionTest")
# Make sure the session ID is well known
self.assertEqual(test_flow.session_id,
test_lib.WellKnownSessionTest.well_known_session_id)
# Messages to Well Known flows can be unauthenticated
messages = [rdfvalue.GrrMessage(args=str(i)) for i in range(10)]
for message in messages:
test_flow.ProcessMessage(message)
# The messages might be processed in arbitrary order
test_flow.messages.sort()
# Make sure that messages were processed even without a status
# message to complete the transaction (Well known flows do not
# have transactions or states - all messages always get to the
# ProcessMessage method):
self.assertEqual(test_flow.messages, range(10))
def testArgParsing(self):
"""Test that arguments can be extracted and annotated successfully."""
# Should raise on parsing default.
self.assertRaises(type_info.TypeValueError, flow.GRRFlow.StartFlow,
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=False, token=self.token)
# Should not raise now if we provide the correct type.
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="BadArgsFlow1",
arg1=rdfvalue.PathSpec(), token=self.token)
class NoClientListener(flow.EventListener): # pylint: disable=unused-variable
well_known_session_id = rdfvalue.SessionID("aff4:/flows/W:test2")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class ClientListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID("aff4:/flows/W:test3")
EVENTS = ["TestEvent"]
received_events = []
@flow.EventHandler(auth_required=True, allow_client_access=True)
def ProcessMessage(self, message=None, event=None):
# Store the results for later inspection.
self.__class__.received_events.append((message, event))
class FlowDoneListener(flow.EventListener):
well_known_session_id = rdfvalue.SessionID("aff4:/flows/EV:FlowDone")
EVENTS = ["Not used"]
received_events = []
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = event
# Store the results for later inspection.
FlowDoneListener.received_events.append(message)
class GeneralFlowsTest(test_lib.FlowTestsBaseclass):
"""Tests some flows."""
def testCallState(self):
"""Test the ability to chain flows."""
CallStateFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("CallStateFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(CallStateFlow.success, True)
def Work(self, client_mock, worker_mock):
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
def testDelayedCallState(self):
"""Tests the ability to delay a CallState invocation."""
with test_lib.Stubber(time, "time", lambda: 10000):
client_mock = ClientMock()
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="DelayedCallStateFlow",
token=self.token)
self.Work(client_mock, worker_mock)
# We should have done the first CallState so far.
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.Stubber(time, "time", lambda: 10050):
# 50 seconds more is not enough.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 1)
with test_lib.Stubber(time, "time", lambda: 10100):
# But 100 is.
self.Work(client_mock, worker_mock)
self.assertEqual(DelayedCallStateFlow.flow_ran, 2)
def testChainedFlow(self):
"""Test the ability to chain flows."""
ParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("ParentFlow", ClientMock(),
client_id=self.client_id,
token=self.token):
pass
self.assertEqual(ParentFlow.success, True)
def testBrokenChainedFlow(self):
"""Test that exceptions are properly handled in chain flows."""
BrokenParentFlow.success = False
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper(
"BrokenParentFlow", ClientMock(), client_id=self.client_id,
check_flow_errors=False, token=self.token):
pass
self.assertEqual(BrokenParentFlow.success, True)
def testIteratedDirectoryListing(self):
"""Test that the client iterator works."""
# Install the mock
vfs.VFS_HANDLERS[rdfvalue.PathSpec.PathType.OS] = MockVFSHandler
path = "/"
# Run the flow in the simulated way
client_mock = test_lib.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
pathspec=rdfvalue.PathSpec(path="/",
pathtype=rdfvalue.PathSpec.PathType.OS),
token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os").Add(path),
token=self.token)
directory = [ch for ch in fd.OpenChildren()]
pb = rdfvalue.PathSpec(path=path,
pathtype=rdfvalue.PathSpec.PathType.OS)
directory2 = list(vfs.VFSOpen(pb).ListFiles())
directory.sort()
result = [x.Get(x.Schema.STAT) for x in directory]
# Make sure that the resulting directory is what it should be
for x, y in zip(result, directory2):
x.aff4path = None
self.assertEqual(x.st_mode, y.st_mode)
self.assertProtoEqual(x, y)
def testClientEventNotification(self):
"""Make sure that client events handled securely."""
ClientListener.received_events = []
NoClientListener.received_events = []
event = rdfvalue.GrrMessage(
source="C.1395c448a443c7d9",
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
event.payload = rdfvalue.PathSpec(path="foobar")
flow.Events.PublishEvent("TestEvent", event, token=self.token)
test_lib.MockWorker(token=self.token).Simulate()
# The same event should be sent to both listeners, but only the listener
# which accepts client messages should register it.
self.assertProtoEqual(ClientListener.received_events[0][0].payload,
event.payload)
self.assertEqual(NoClientListener.received_events, [])
def testFlowNotification(self):
FlowDoneListener.received_events = []
# Install the mock
vfs.VFS_HANDLERS[rdfvalue.PathSpec.PathType.OS] = MockVFSHandler
path = rdfvalue.PathSpec(path="/",
pathtype=rdfvalue.PathSpec.PathType.OS)
# Run the flow in the simulated way
client_mock = test_lib.ActionMock("IteratedListDirectory")
for _ in test_lib.TestFlowHelper(
"IteratedListDirectory", client_mock, client_id=self.client_id,
notification_urn=rdfvalue.SessionID("aff4:/flows/EV:FlowDone"),
pathspec=path, token=self.token):
pass
# The event goes to an external queue so we need another worker.
worker = test_lib.MockWorker(queue=rdfvalue.RDFURN("EV"), token=self.token)
worker.Simulate()
self.assertEqual(len(FlowDoneListener.received_events), 1)
flow_event = FlowDoneListener.received_events[0].payload
self.assertEqual(flow_event.flow_name, "IteratedListDirectory")
self.assertEqual(flow_event.client_id, "aff4:/C.1000000000000000")
self.assertEqual(flow_event.status, rdfvalue.FlowNotification.Status.OK)
def testEventNotification(self):
"""Test that events are sent to listeners."""
NoClientListener.received_events = []
worker = test_lib.MockWorker(token=self.token)
event = rdfvalue.GrrMessage(
session_id="aff4:/W:SomeFlow", name="test message",
payload=rdfvalue.PathSpec(path="foobar", pathtype="TSK"),
source="aff4:/C.0000000000000001", auth_state="AUTHENTICATED")
# Not allowed to publish a message from a client..
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(NoClientListener.received_events, [])
event.source = "Source"
# First make the message unauthenticated.
event.auth_state = rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should not work - the unauthenticated message is dropped.
self.assertEqual(NoClientListener.received_events, [])
# Now make the message authenticated.
event.auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
# Publish the event.
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
# This should now work:
self.assertEqual(len(NoClientListener.received_events), 1)
# Make sure the source is correctly propagated.
self.assertEqual(NoClientListener.received_events[0][0].source,
"aff4:/Source")
self.assertEqual(NoClientListener.received_events[0][1].path, "foobar")
NoClientListener.received_events = []
# Now schedule ten events at the same time.
for i in xrange(10):
event.source = "Source%d" % i
flow.Events.PublishEvent("TestEvent", event, token=self.token)
worker.Simulate()
self.assertEqual(len(NoClientListener.received_events), 10)
# Events do not have to be delivered in order so we sort them here for
# comparison.
NoClientListener.received_events.sort(key=lambda x: x[0].source)
for i in range(10):
self.assertEqual(NoClientListener.received_events[i][0].source,
"aff4:/Source%d" % i)
self.assertEqual(NoClientListener.received_events[i][1].path, "foobar")
def testClientPrioritization(self):
"""Test that flow priorities work on the client side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdfvalue.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(result[0:1],
[u"high priority"])
self.assertEqual(sorted(result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(result[3:5]),
[u"low priority", u"low priority2"])
def testWorkerPrioritization(self):
"""Test that flow priorities work on the worker side."""
result = []
client_mock = PriorityClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = test_lib.MockWorker(check_flow_errors=True,
token=self.token)
# Start some flows with different priorities.
args = [(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority"),
(rdfvalue.GrrMessage.Priority.LOW_PRIORITY, "low priority2"),
(rdfvalue.GrrMessage.Priority.HIGH_PRIORITY, "high priority"),
(rdfvalue.GrrMessage.Priority.MEDIUM_PRIORITY, "medium priority2")]
server_result = []
PriorityFlow.storage = server_result
for (priority, msg) in args:
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="PriorityFlow", msg=msg,
priority=priority, token=self.token)
while True:
# Run all the clients first so workers have messages to choose from.
client_processed = 1
while client_processed:
client_processed = client_mock.Next()
# Now process the results, this should happen in the correct order.
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if not flows_run:
break
# The flows should be run in order of priority.
self.assertEqual(server_result[0:1],
[u"high priority"])
self.assertEqual(sorted(server_result[1:3]),
[u"medium priority", u"medium priority2"])
self.assertEqual(sorted(server_result[3:5]),
[u"low priority", u"low priority2"])
class ResourcedWorker(test_lib.MockWorker):
USER_CPU = [1, 20, 5, 16]
SYSTEM_CPU = [4, 20, 2, 8]
NETWORK_BYTES = [180, 1000, 580, 2000]
class FlowLimitTests(test_lib.FlowTestsBaseclass):
def RunFlow(self, flow_name, **kwargs):
result = {}
client_mock = CPULimitClientMock(result)
client_mock = test_lib.MockClient(self.client_id, client_mock,
token=self.token)
worker_mock = ResourcedWorker(check_flow_errors=True,
token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name=flow_name,
token=self.token, **kwargs)
while True:
client_processed = client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
return result
def testNetworkLimit(self):
"""Tests that the cpu limit works."""
result = self.RunFlow("NetworkLimitFlow", network_bytes_limit=10000)
self.assertEqual(result["networklimit"], [10000, 9820, 8820, 8240])
def testCPULimit(self):
"""Tests that the cpu limit works."""
result = self.RunFlow("CPULimitFlow", cpu_limit=300)
self.assertEqual(result["cpulimit"], [300, 295, 255])
class MockVFSHandler(vfs.VFSHandler):
"""A mock VFS handler with fake files."""
children = []
for x in range(10):
child = rdfvalue.StatEntry(pathspec=rdfvalue.PathSpec(
path="Foo%s" % x, pathtype=rdfvalue.PathSpec.PathType.OS))
children.append(child)
supported_pathtype = rdfvalue.PathSpec.PathType.OS
def __init__(self, base_fd, pathspec=None):
super(MockVFSHandler, self).__init__(base_fd, pathspec=pathspec)
self.pathspec.Append(pathspec)
def ListFiles(self):
return self.children
def IsDirectory(self):
return self.pathspec.path == "/"
class PriorityClientMock(object):
in_rdfvalue = rdfvalue.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def Store(self, data):
self.storage.append(self.in_rdfvalue(data).string)
return [rdfvalue.DataBlob(string="Hello World")]
class PriorityFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.PriorityFlowArgs
class PriorityFlow(flow.GRRFlow):
"""This flow is used to test priorities."""
args_type = PriorityFlowArgs
storage = []
@flow.StateHandler(next_state="Done")
def Start(self):
self.CallClient("Store", string=self.args.msg, next_state="Done")
@flow.StateHandler()
def Done(self, responses):
_ = responses
self.storage.append(self.args.msg)
class CPULimitClientMock(object):
in_rdfvalue = rdfvalue.DataBlob
def __init__(self, storage):
# Register us as an action plugin.
actions.ActionPlugin.classes["Store"] = self
self.storage = storage
def HandleMessage(self, message):
self.storage.setdefault("cpulimit", []).append(message.cpu_limit)
self.storage.setdefault("networklimit",
[]).append(message.network_bytes_limit)
class CPULimitFlow(flow.GRRFlow):
"""This flow is used to test the cpu limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", string="Hey!", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
self.CallClient("Store", string="Hey!", next_state="State2")
@flow.StateHandler(next_state="Done")
def State2(self):
self.CallClient("Store", string="Hey!", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class NetworkLimitFlow(flow.GRRFlow):
"""This flow is used to test the network bytes limit."""
@flow.StateHandler(next_state="State1")
def Start(self):
self.CallClient("Store", next_state="State1")
@flow.StateHandler(next_state="State2")
def State1(self):
# The mock worker doesn't track usage so we add it here.
self.CallClient("Store", next_state="State2")
@flow.StateHandler(next_state="State3")
def State2(self):
self.CallClient("Store", next_state="State3")
@flow.StateHandler(next_state="Done")
def State3(self):
self.CallClient("Store", next_state="Done")
@flow.StateHandler()
def Done(self, responses):
pass
class ClientMock(object):
"""Mock of client actions."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.RDFString
def __init__(self):
# Register us as an action plugin.
actions.ActionPlugin.classes["ReturnHello"] = self
def ReturnHello(self, _):
return [rdfvalue.RDFString("Hello World")]
class ChildFlow(flow.GRRFlow):
"""This flow will be called by our parent."""
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
self.CallClient("ReturnHello", next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
# Relay the client's message to our parent
for response in responses:
self.SendReply(rdfvalue.RDFString("Child received"))
self.SendReply(response)
class BrokenChildFlow(ChildFlow):
"""A broken flow which raises."""
@flow.StateHandler()
def ReceiveHello(self, responses):
raise IOError("Boo")
class ParentFlow(flow.GRRFlow):
"""This flow will launch a child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
responses = list(responses)
if (len(responses) != 2 or "Child" not in unicode(responses[0]) or
"Hello" not in unicode(responses[1])):
raise RuntimeError("Messages not passed to parent")
ParentFlow.success = True
class ParentFlowWithoutResponses(flow.GRRFlow):
"""This flow will launch a child flow."""
success = False
@flow.StateHandler(next_state="ParentReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("ChildFlow",
send_replies=False,
next_state="ParentReceiveHello")
@flow.StateHandler()
def ParentReceiveHello(self, responses):
if responses:
raise RuntimeError("Messages are not expected to be passed to parent")
ParentFlowWithoutResponses.success = True
class BrokenParentFlow(flow.GRRFlow):
"""This flow will launch a broken child flow."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallFlow("BrokenChildFlow",
next_state="ReceiveHello")
@flow.StateHandler()
def ReceiveHello(self, responses):
if (responses or
responses.status.status == rdfvalue.GrrStatus.ReturnedStatus.OK):
raise RuntimeError("Error not propagated to parent")
BrokenParentFlow.success = True
class CallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states."""
# This is a global flag which will be set when the flow runs.
success = False
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the receive state.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello",
request_data={"test_req_data": 2})
@flow.StateHandler()
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
if responses.request_data["test_req_data"] != 2:
raise RuntimeError("request_data did not propagate.")
CallStateFlow.success = True
class DelayedCallStateFlow(flow.GRRFlow):
"""A flow that calls one of its own states with a delay."""
# This is a global flag which will be set when the flow runs.
flow_ran = 0
@flow.StateHandler(next_state="ReceiveHello")
def Start(self):
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="ReceiveHello")
@flow.StateHandler(next_state="DelayedHello")
def ReceiveHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 1
# Call the child flow.
self.CallState([rdfvalue.RDFString("Hello")],
next_state="DelayedHello",
start_time=rdfvalue.RDFDatetime().Now() + 100)
@flow.StateHandler()
def DelayedHello(self, responses):
if responses.First() != "Hello":
raise RuntimeError("Did not receive hello.")
DelayedCallStateFlow.flow_ran = 2
class BadArgsFlow1Args(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.BadArgsFlow1Args
class BadArgsFlow1(flow.GRRFlow):
"""A flow that has args that mismatch type info."""
args_type = BadArgsFlow1Args
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = test_lib.FlowTestsBaseclass
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the urlfetch API, based on httplib."""
import gzip
import logging
import os
import socket
import ssl
import sys
from google.appengine.api import apiproxy_stub
from google.appengine.api import full_app_id
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_service_pb2
from google.appengine.runtime import apiproxy_errors
import six
from six.moves import http_client
from six.moves import range
from six.moves import urllib
MAX_REQUEST_SIZE = 10 << 20
MAX_RESPONSE_SIZE = 2 ** 25
MAX_REDIRECTS = urlfetch.MAX_REDIRECTS
REDIRECT_STATUSES = frozenset([
http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.TEMPORARY_REDIRECT,
])
PRESERVE_ON_REDIRECT = frozenset(['GET', 'HEAD'])
_API_CALL_DEADLINE = 900
_API_CALL_VALIDATE_CERTIFICATE_DEFAULT = True
_MAX_REQUEST_SIZE = 10485760
_UNTRUSTED_REQUEST_HEADERS = frozenset([
'content-length',
'host',
'vary',
'via',
'x-forwarded-for',
])
_MAX_URL_LENGTH = 10240
def _SetupSSL(path):
global CERT_PATH
if os.path.exists(path):
CERT_PATH = path
else:
CERT_PATH = None
logging.warning('%s missing; without this urlfetch will not be able to '
'validate SSL certificates.', path)
def _IsAllowedPort(port):
if port is None:
return True
try:
port = int(port)
except ValueError:
return False
if (port == 0 or
(port >= 80 and port <= 90) or
(port >= 440 and port <= 450) or
port >= 1024):
return True
return False
def _IsLocalhost(host):
"""Determines whether 'host' points to localhost."""
return host.startswith('localhost') or host.startswith('127.0.0.1')
def GetHeaders(msg, key):
"""Helper to get headers between python versions."""
if six.PY2:
return msg.getheaders(key)
return msg.get_all(key)
class URLFetchServiceStub(apiproxy_stub.APIProxyStub):
"""Stub version of the urlfetch API to be used with apiproxy_stub_map."""
THREADSAFE = True
def __init__(self,
service_name='urlfetch',
urlmatchers_to_fetch_functions=None):
"""Initializer.
Args:
service_name: Service name expected for all calls.
urlmatchers_to_fetch_functions: A list of two-element tuples.
The first element is a urlmatcher predicate function that takes
a url and determines a match. The second is a function that
can retrieve result for that url. If no match is found, a url is
handled by the default _RetrieveURL function.
When more than one match is possible, the first match is used.
"""
super(URLFetchServiceStub, self).__init__(service_name,
max_request_size=MAX_REQUEST_SIZE)
self._urlmatchers_to_fetch_functions = urlmatchers_to_fetch_functions or []
self.http_proxy = None
def _Dynamic_SetHttpProxy(self, request, response):
self.http_proxy = (request.http_proxy_host, request.http_proxy_port)
def _Dynamic_Fetch(self, request, response):
"""Trivial implementation of URLFetchService::Fetch().
Args:
request: the fetch to perform, a URLFetchRequest
response: the fetch response, a URLFetchResponse
"""
if len(request.Url) > _MAX_URL_LENGTH:
logging.error('URL is too long: %s...', request.Url[:50])
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
(protocol, host, _, _, _) = urllib.parse.urlsplit(request.Url)
payload = None
if request.Method == urlfetch_service_pb2.URLFetchRequest.GET:
method = 'GET'
elif request.Method == urlfetch_service_pb2.URLFetchRequest.POST:
method = 'POST'
payload = request.Payload
elif request.Method == urlfetch_service_pb2.URLFetchRequest.HEAD:
method = 'HEAD'
elif request.Method == urlfetch_service_pb2.URLFetchRequest.PUT:
method = 'PUT'
payload = request.Payload
elif request.Method == urlfetch_service_pb2.URLFetchRequest.DELETE:
method = 'DELETE'
elif request.Method == urlfetch_service_pb2.URLFetchRequest.PATCH:
method = 'PATCH'
payload = request.Payload
else:
logging.error('Invalid method: %s', request.Method)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
if payload is not None and len(payload) > _MAX_REQUEST_SIZE:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.PAYLOAD_TOO_LARGE)
if not (protocol == 'http' or protocol == 'https'):
logging.error('Invalid protocol: %s', protocol)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
if not host:
logging.error('Missing host.')
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
self._SanitizeHttpHeaders(_UNTRUSTED_REQUEST_HEADERS,
request.header)
deadline = _API_CALL_DEADLINE
if request.HasField('Deadline'):
deadline = request.Deadline
validate_certificate = _API_CALL_VALIDATE_CERTIFICATE_DEFAULT
if request.HasField('MustValidateServerCertificate'):
validate_certificate = request.MustValidateServerCertificate
custom_fetch_function = self._GetCustomFetchFunction(request.Url)
if custom_fetch_function:
custom_fetch_function(request.Url, payload, method,
request.header, request, response,
follow_redirects=request.FollowRedirects,
deadline=deadline,
validate_certificate=validate_certificate)
else:
self._RetrieveURL(request.Url, payload, method,
request.header, request, response,
follow_redirects=request.FollowRedirects,
deadline=deadline,
validate_certificate=validate_certificate,
http_proxy=self.http_proxy)
def _GetCustomFetchFunction(self, url):
"""Get the custom fetch function for a url.
Args:
url: A url to fetch from. str.
Returns:
A custom fetch function for this url, or None if no matching custom
function is found.
"""
for urlmatcher, fetch_function in self._urlmatchers_to_fetch_functions:
if urlmatcher(url):
return fetch_function
return None
@staticmethod
def _RetrieveURL(url, payload, method, headers, request, response,
follow_redirects=True, deadline=_API_CALL_DEADLINE,
validate_certificate=_API_CALL_VALIDATE_CERTIFICATE_DEFAULT,
http_proxy=None):
"""Retrieves a URL over network.
Args:
url: String containing the URL to access.
payload: Request payload to send, if any; None if no payload.
If the payload is unicode, we assume it is utf-8.
method: HTTP method to use (e.g., 'GET')
headers: List of additional header objects to use for the request.
request: A urlfetch_service_pb2.URLFetchRequest proto object from
original request.
response: A urlfetch_service_pb2.URLFetchResponse proto object to
populate with the response data.
follow_redirects: optional setting (defaulting to True) for whether or not
we should transparently follow redirects (up to MAX_REDIRECTS)
deadline: Number of seconds to wait for the urlfetch to finish.
validate_certificate: If true, do not send request to server unless the
certificate is valid, signed by a trusted CA and the hostname matches
the certificate.
http_proxy: Tuple of (hostname, port), where hostname is a string and port
is an int, to use as the http proxy.
Raises:
Raises an apiproxy_errors.ApplicationError exception with
INVALID_URL_ERROR in cases where:
- The protocol of the redirected URL is bad or missing.
- The port is not in the allowable range of ports.
Raises an apiproxy_errors.ApplicationError exception with
TOO_MANY_REDIRECTS in cases when MAX_REDIRECTS is exceeded
"""
last_protocol = ''
last_host = ''
url = six.ensure_str(url, 'utf-8')
if isinstance(payload, six.text_type):
payload = six.ensure_str(payload, 'utf-8')
for _ in range(MAX_REDIRECTS + 1):
parsed = urllib.parse.urlsplit(url)
protocol, host, path, query, _ = parsed
port = parsed.port
if not _IsAllowedPort(port):
logging.error(
'urlfetch received %s ; port %s is not allowed in production!',
url, port)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
if protocol and not host:
logging.error('Missing host on redirect; target url is %s', url)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL)
if not host and not protocol:
host = last_host
protocol = last_protocol
if port == '0':
host = host.replace(':0', '')
adjusted_headers = {
b'User-Agent': [(
b'AppEngine-Google; (+http://code.google.com/appengine; appid: %s)'
% full_app_id.get().encode())],
b'Host': [six.ensure_binary(host)],
b'Accept-Encoding': [b'gzip'],
}
if payload is not None:
adjusted_headers[b'Content-Length'] = [
six.ensure_binary(str(len(payload)))
]
if method == 'POST' and payload:
adjusted_headers[b'Content-Type'] = [
b'application/x-www-form-urlencoded'
]
passthrough_content_encoding = False
for header in headers:
header_key = six.ensure_binary(header.Key)
header_value = six.ensure_binary(header.Value)
if header_key.lower() == b'user-agent':
adjusted_headers[header_key.title()] = [
(b'%s %s' % (six.ensure_binary(header_value),
adjusted_headers[b'User-Agent'][0]))
]
elif header_key.lower() == b'accept-encoding':
passthrough_content_encoding = True
adjusted_headers[header_key.title()] = [header_value]
elif header_key.lower() == b'content-type':
adjusted_headers[header_key.title()] = [header_value]
else:
adjusted_headers.setdefault(header_key, []).append(header_value)
logging.debug(
'Making HTTP request: host = %r, url = %r, payload = %.1000r, '
'headers = %r', host, url, payload, adjusted_headers)
try:
proxy_host = None
connection_kwargs = {'timeout': deadline}
if protocol == 'http':
connection_class = http_client.HTTPConnection
default_port = 80
if http_proxy and not _IsLocalhost(host):
proxy_host = '%s:%d' % (http_proxy[0],
http_proxy[1])
elif os.environ.get('HTTP_PROXY') and not _IsLocalhost(host):
_, proxy_host, _, _, _ = (
urllib.parse.urlsplit(os.environ.get('HTTP_PROXY')))
elif protocol == 'https':
connection_class = http_client.HTTPSConnection
if (validate_certificate and CERT_PATH):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_PATH)
context.check_hostname = True
connection_kwargs['context'] = context
default_port = 443
if os.environ.get('HTTPS_PROXY') and not _IsLocalhost(host):
_, proxy_host, _, _, _ = (
urllib.parse.urlsplit(os.environ.get('HTTPS_PROXY')))
else:
error_msg = 'Redirect specified invalid protocol: "%s"' % protocol
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.INVALID_URL, error_msg)
if (not validate_certificate and sys.version_info >= (2, 7, 9)
and protocol == 'https'):
connection_kwargs['context'] = ssl._create_unverified_context()
if proxy_host:
proxy_address, _, proxy_port = proxy_host.partition(':')
connection = connection_class(
proxy_address, proxy_port if proxy_port else default_port,
**connection_kwargs)
full_path = urllib.parse.urlunsplit((protocol, host, path, query, ''))
if protocol == 'https':
connection.set_tunnel(host)
else:
connection = connection_class(host, **connection_kwargs)
full_path = urllib.parse.urlunsplit(('', '', path, query, ''))
last_protocol = protocol
last_host = host
try:
_SendRequest(connection, method, full_path, payload, adjusted_headers)
http_response = connection.getresponse()
if method == 'HEAD':
http_response_data = ''
else:
http_response_data = http_response.read()
finally:
connection.close()
except ssl.CertificateError as e:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.SSL_CERTIFICATE_ERROR,
str(e))
except ssl.SSLError as e:
app_error = (
urlfetch_service_pb2.URLFetchServiceError.DEADLINE_EXCEEDED
if 'timed out' in str(e) else
urlfetch_service_pb2.URLFetchServiceError.SSL_CERTIFICATE_ERROR)
raise apiproxy_errors.ApplicationError(app_error, str(e))
except socket.timeout as e:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.DEADLINE_EXCEEDED, str(e))
except (http_client.error, socket.error, IOError) as e:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.FETCH_ERROR, str(e))
if http_response.status >= 600:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.FETCH_ERROR,
'Status %s unknown' % http_response.status)
if http_response.status in REDIRECT_STATUSES and follow_redirects:
url = http_response.getheader('Location', None)
if url is None:
error_msg = 'Missing "Location" header for redirect.'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.MALFORMED_REPLY,
error_msg)
if (http_response.status != http_client.TEMPORARY_REDIRECT and
method not in PRESERVE_ON_REDIRECT):
logging.warning('Received a %s to a %s. Redirecting with a GET',
http_response.status, method)
method = 'GET'
payload = None
else:
response.StatusCode = http_response.status
if (http_response.getheader('content-encoding') == 'gzip' and
not passthrough_content_encoding):
gzip_stream = six.BytesIO(http_response_data)
gzip_file = gzip.GzipFile(fileobj=gzip_stream)
http_response_data = gzip_file.read()
response.Content = six.ensure_binary(
http_response_data[:MAX_RESPONSE_SIZE])
key_set = set([key.lower() for key in http_response.msg.keys()])
for header_key in key_set:
header_values = GetHeaders(http_response.msg, header_key)
if (header_key.lower() == 'content-encoding' and
'gzip' in header_values and not passthrough_content_encoding):
continue
if header_key.lower() == 'content-length' and method != 'HEAD':
header_values = [str(len(response.Content))]
for header_value in header_values:
response.header.add(Key=header_key, Value=header_value)
if len(http_response_data) > MAX_RESPONSE_SIZE:
response.ContentWasTruncated = True
if request.Url != url:
response.FinalUrl = url
break
else:
error_msg = 'Too many repeated redirects'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb2.URLFetchServiceError.TOO_MANY_REDIRECTS,
error_msg)
def _SanitizeHttpHeaders(self, untrusted_headers, headers):
"""Cleans "unsafe" headers from the HTTP request, in place.
Args:
untrusted_headers: Set of untrusted headers names (all lowercase).
headers: List of Header objects. The list is modified in place.
"""
prohibited_headers = [h.Key for h in headers
if h.Key.lower() in untrusted_headers]
if prohibited_headers:
logging.warning('Stripped prohibited headers from URLFetch request: %s',
prohibited_headers)
for index in reversed(range(len(headers))):
if headers[index].Key.lower() in untrusted_headers:
del headers[index]
def _SendRequest(connection, method, full_path, payload, headers):
"""Sends an HTTP request on a connection to the URL described by full_path.
Compared to httplib.HTTPConnection's request method, this preserves all values
for repeated headers.
Args:
connection: An instance or subclass of httplib.HTTPConnection.
method: The string HTTP method name, eg 'GET'.
full_path: The string full URL path for the request.
payload: The string request payload to send.
headers: A dict of headers to send with the request. The dict maps string
header names to lists of associated header values.
"""
connection.connect()
header_names = [name.lower() for name in headers]
connection.putrequest(
method,
full_path,
skip_host=b'host' in header_names,
skip_accept_encoding=b'accept-encoding' in header_names)
for header, values in six.iteritems(headers):
for value in values:
connection.putheader(header, value)
if payload is not None and b'content-length' not in header_names:
connection._set_content_length(payload)
connection.endheaders(payload)
| |
from __future__ import absolute_import
import datetime
import jwt
from unidiff import PatchSet
from six.moves.urllib.parse import urlparse
from sentry.utils.http import absolute_uri
from sentry.integrations.atlassian_connect import get_query_hash
from sentry.integrations.client import ApiClient
BITBUCKET_KEY = '%s.bitbucket' % urlparse(absolute_uri()).hostname
class BitbucketAPIPath(object):
"""
All UUID's must be surrounded by culybraces.
repo is the fully qualified slug containing 'username/repo_slug'
repo_slug - repository slug or UUID
username - username or UUID
"""
issue = u'/2.0/repositories/{repo}/issues/{issue_id}'
issues = u'/2.0/repositories/{repo}/issues'
issue_comments = u'/2.0/repositories/{repo}/issues/{issue_id}/comments'
repository = u'/2.0/repositories/{repo}'
repositories = u'/2.0/repositories/{username}'
repository_commits = u'/2.0/repositories/{repo}/commits/{revision}'
repository_diff = u'/2.0/repositories/{repo}/diff/{spec}'
repository_hook = u'/2.0/repositories/{repo}/hooks/{uid}'
repository_hooks = u'/2.0/repositories/{repo}/hooks'
class BitbucketApiClient(ApiClient):
"""
The API Client for the Bitbucket Integraiton
NOTE: repo is the fully qualified slug containing 'username/repo_slug'
"""
def __init__(self, base_url, shared_secret, subject, *args, **kwargs):
# subject is probably the clientKey
super(BitbucketApiClient, self).__init__(*args, **kwargs)
self.base_url = base_url
self.shared_secret = shared_secret
self.subject = subject
def request(self, method, path, data=None, params=None, **kwargs):
jwt_payload = {
'iss': BITBUCKET_KEY,
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=5 * 60),
'qsh': get_query_hash(path, method.upper(), params),
'sub': self.subject,
}
encoded_jwt = jwt.encode(jwt_payload, self.shared_secret)
headers = {
'Authorization': 'JWT %s' % encoded_jwt
}
return self._request(method, path, data=data, params=params, headers=headers, **kwargs)
def get_issue(self, repo, issue_id):
return self.get(BitbucketAPIPath.issue.format(
repo=repo,
issue_id=issue_id,
))
def get_issues(self, repo):
return self.get(BitbucketAPIPath.issues.format(
repo=repo,
))
def create_issue(self, repo, data):
return self.post(
path=BitbucketAPIPath.issues.format(
repo=repo,
),
data=data,
)
def search_issues(self, repo, query):
# Query filters can be found here:
# https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering#supp-endpoints
return self.get(
path=BitbucketAPIPath.issues.format(
repo=repo,
),
params={'q': query},
)
def create_comment(self, repo, issue_id, data):
# Call the method as below:
# client.create_comment('repo', '1', {"content": {"raw": "Whatever you're commenting."}})
# https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/issues/%7Bissue_id%7D/comments#post
return self.post(
path=BitbucketAPIPath.issue_comments.format(
repo=repo,
issue_id=issue_id,
),
data=data,
)
def get_repo(self, repo):
return self.get(BitbucketAPIPath.repository.format(
repo=repo,
))
def get_repos(self, username):
return self.get(BitbucketAPIPath.repositories.format(
username=username,
))
def create_hook(self, repo, data):
return self.post(
path=BitbucketAPIPath.repository_hooks.format(
repo=repo,
),
data=data
)
def delete_hook(self, repo, hook_id):
return self.delete(path=BitbucketAPIPath.repository_hook.format(
repo=repo,
uid=hook_id,
))
def transform_patchset(self, patch_set):
file_changes = []
for patched_file in patch_set.added_files:
file_changes.append({
'path': patched_file.path,
'type': 'A',
})
for patched_file in patch_set.removed_files:
file_changes.append({
'path': patched_file.path,
'type': 'D',
})
for patched_file in patch_set.modified_files:
file_changes.append({
'path': patched_file.path,
'type': 'M',
})
return file_changes
def get_commit_filechanges(self, repo, sha):
resp = self.get(
BitbucketAPIPath.repository_diff.format(
repo=repo,
spec=sha,
),
allow_text=True,
)
diff_file = resp.text
ps = PatchSet.from_string(diff_file)
return self.transform_patchset(ps)
def zip_commit_data(self, repo, commit_list):
for commit in commit_list:
commit.update(
{'patch_set': self.get_commit_filechanges(repo, commit['hash'])})
return commit_list
def get_last_commits(self, repo, end_sha):
# return api request that fetches last ~30 commits
# see https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
# using end_sha as parameter
data = self.get(BitbucketAPIPath.repository_commits.format(
repo=repo,
revision=end_sha,
))
return self.zip_commit_data(repo, data['values'])
def compare_commits(self, repo, start_sha, end_sha):
# where start_sha is oldest and end_sha is most recent
# see
# https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
commits = []
done = False
url = BitbucketAPIPath.repository_commits.format(
repo=repo,
revision=end_sha,
)
while not done and len(commits) < 90:
data = self.get(url)
for commit in data['values']:
if commit['hash'] == start_sha:
done = True
break
commits.append(commit)
# move page forward
try:
url = data['next']
except KeyError:
break
return self.zip_commit_data(repo, commits)
| |
# -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import shutil
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.template.base import TemplateDoesNotExist
from django.test import RequestFactory, TestCase, override_settings
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.views.debug import CallableSettingWrapper, ExceptionReporter
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
class CallableSettingWrapperTests(TestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(TestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error.',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (File does not exist)" % template_path, status_code=500, count=1)
@skipIf(sys.platform == "win32", "Python on Windows doesn't have working os.chmod() and os.access().")
def test_template_loader_postmortem_notreadable(self):
"""Tests for not readable file"""
with tempfile.NamedTemporaryFile() as tmpfile:
template_name = tmpfile.name
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
os.chmod(template_path, 0o0222)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (File is not readable)" % template_path, status_code=500, count=1)
def test_template_loader_postmortem_notafile(self):
"""Tests for not being a file"""
try:
template_path = tempfile.mkdtemp()
template_name = os.path.basename(template_path)
tempdir = os.path.dirname(template_path)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Not a file)" % template_path, status_code=500, count=1)
finally:
shutil.rmtree(template_path)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(TestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
class PlainTextReportTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
| |
#!/usr/bin/env python
"""
Term Cube
Time: 00:00:00.000 +-------+-------+-------+ +-----+-----+-----+
Date: 00-00-00 | | | | | | | |
User: XXX | | | | | | | |
Pts: XXX +-------+-------+-------+ +-----+-----+-----+
Tty: XXX | | U | | | | B | |
Host: XXX | | | | | | | |
OS: XXX +-------+-------+-------+ +-----+-----+-----+
Disp: XXX | | | | | | | |
PyVer: XXX | | | | | | | |
TCVer: XXX +-------+-------+-------+ +-----+-----+-----+
+-----+-----+-----+ +-------+-------+-------+ +-----+-----+-----+
| | | | | | | | | | | |
| | | | | | | | | | | |
| | | | | | | | | | | |
+-----+-----+-----+ +-------+-------+-------+ +-----+-----+-----+
| | | | | | | | | | | |
| | L | | | | F | | | | R | |
| | | | | | | | | | | |
+-----+-----+-----+ +-------+-------+-------+ +-----+-----+-----+
| | | | | | | | | | | |
| | | | | | | | | | | |
| | | | | | | | | | | |
+-----+-----+-----+ +-------+-------+-------+ +-----+-----+-----+
F : ? U : ? +-------+-------+-------+ Time: 00:00:00.000
F' : ? U' : ? | | | | TBest: 00:00:00.000
F" : ? U" : ? | | | | TLast: 00:00:00.000
F2 : ? U2 : ? +-------+-------+-------+ TAvg: 00:00:00.000
F2': ? U2': ? | | | | Move: 000
F2": ? U2": ? | | D | | MBest: 000
L : ? D : ? +-------+-------+-------+ MLast: 000
L' : ? D' : ? | | | | MAvg: 000
L" : ? D" : ? | | | | Sec/M: 000.000
L2 : ? D2 : ? +-------+-------+-------+ Reset: 000
L2': ? D2': ? Seed: 000000000000 Wins: 000
L2": ? D2": ? Loss: 000
R : ? B : ? Tot: 000
R' : ? B' : ?
R" : ? B" : ?
R2 : ? B2 : ?
R2': ? B2': ?
R2": ? B2": ?
Q : ^C R : ^D
http://docs.python.org/2/howto/curses.html#curses-howto
"""
from __future__ import print_function, division
from random import randint
from string import digits, letters
from time import time, sleep
import curses
def mainloop(window):
ymax, xmax = window.getmaxyx()
pass
def noise(window):
mid, mx, my, mz, mbstate = 0, 0, 0, 0, 0
ymax, xmax = window.getmaxyx()
window.timeout(0) # Nonblocking gets, 0 in ms
t0 = time()
tot = 0
window.addch(ymax-7, 24, curses.ACS_LARROW)
window.hline(ymax-7, 25, curses.ACS_HLINE, 10)
window.addstr(ymax-7, 36, "click this box, or press h, or q")
win2 = curses.newwin(12, 21, ymax-13, 1)
win2.box()
win2.keypad(1)
win2.nodelay(1)
win2.border()
win2.leaveok(0)
win2.scrollok(0)
win2.bkgd(' ', 0)
win2_inv = False
while True:
t1 = time()
tot += 1
ups = tot/(t1-t0)
x = randint(1, randint(1, xmax - 2))
y = randint(1, randint(1, ymax - 2 - 12))
c = ord('#')
getch = window.getch()
if getch == curses.KEY_MOUSE or getch == ord('h'):
tmid, tmx, tmy, tmz, tmbstate = curses.getmouse()
if getch == ord('h') or tmbstate == 2 and win2.enclose(tmy, tmx):
mid, mx, my, mz, mbstate = tmid, tmx, tmy, tmz, tmbstate
if win2_inv:
win2.attroff(curses.A_REVERSE)
else:
win2.attron(curses.A_REVERSE)
win2_inv = not win2_inv
elif getch == ord('q'):
raise SystemExit(None)
window.addch(y, x, c)
window.noutrefresh() # Mark for update
ylst, xlst = curses.getsyx()
win2.addstr(12-11, 1, "tot :%12i" % tot)
win2.addstr(12-10, 1, "#/s :%12.3f" % ups)
win2.addstr(12-9, 1, "t tot :%12.3f" % (t1-t0))
win2.addstr(12-8, 1, "y lst :%12i" % ylst)
win2.addstr(12-7, 1, "x lst :%12i" % xlst)
win2.addstr(12-6, 1, "m id :%12i" % mid)
win2.addstr(12-5, 1, "m x :%12i" % mx)
win2.addstr(12-4, 1, "m y :%12i" % my)
win2.addstr(12-3, 1, "m z :%12i" % mz)
win2.addstr(12-2, 1, "m st :%12i" % mbstate)
win2.noutrefresh()
#sleep(1/100000.)
curses.doupdate() # Perform refreshes
continue
def entry(window):
window.addstr(0, 0, "Type anything and I shall reverse its color: ")
curses.echo()
#window.setyx(1, 0)
istr = window.getstr()
window.addstr(1, 0, str(istr), curses.A_REVERSE)
window.refresh()
curses.noecho()
try:
pad = curses.newpad(10, 10)
pad.addch(0, 0, 'x')
pad.addch(1, 0, 'x')
pad.addch(0, 1, 'x')
pad.addch(1, 1, 'x')
pad.addch(8, 8, 'x')
pad.addch(8, 9, 'x')
pad.addch(9, 8, 'x')
try:
pad.addch(9, 9, 'x')
except curses.error as e:
window.move(0, 0)
pass
pad.refresh(0, 0, 0, 0, 11, 11)
#pad.addch(99, 99, 'x')
#for y in range(0, 100):
# for x in range(0, 100):
# try:
# v = ord('a') + (x*x+y*y) % 26
# pad.addch(y, x, v)
# except curses.error as e:
# pass
## Displays a section of the pad in the middle of the window
#pad.refresh(0,0, 5,5, 20,75)
except Exception as e:
raise
sleep(2)
def _main():
try:
window = curses.initscr() # Window object
window.clear() # Erase and repaint on update
curses.setupterm("NAME") # Setup term name and file out
availmask, oldmask = \
curses.mousemask(curses.BUTTON1_PRESSED |
curses.BUTTON1_RELEASED) # Record mouse events
curses.mouseinterval(1) # Max ms click interval
curses.start_color() # To use colors
curses.use_default_colors() # Default term colors eg transparency
curses.meta(1) # 8b characters
curses.noecho() # No auto echo keys to window
curses.cbreak() # Don't wait for <Enter>
window.keypad(1) # Use special char values
window.nodelay(1) # Nonblocking getch/getstr
window.border() # Or box on edges
window.leaveok(0) # Virtual screen cursor after update
curses.curs_set(0) # Invisible curser
window.scrollok(0) # Cursor moves off page don't scroll
window.bkgd(' ', 0) # Set background char and attr
#example(window)
noise(window)
#pad()
finally:
curses.nocbreak()
window.keypad(0)
curses.echo()
curses.endwin()
#mainloop()
if __name__ == '__main__':
try:
_main()
except KeyboardInterrupt:
pass
except curses.error as e:
#print("ERROR: Check window size!")
raise
except Exception as e:
raise
except:
raise
#raise SystemExit(None)
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A helper class for reading in and dealing with tests expectations
for layout tests.
"""
from collections import defaultdict
import logging
import re
from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
_log = logging.getLogger(__name__)
# Test expectation and specifier constants.
#
# FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
# FIXME: Perhaps these two routines should be part of the Port instead?
BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
CHROMIUM_BUG_PREFIX = 'crbug.com/'
V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
NAMED_BUG_PREFIX = 'Bug('
MISSING_KEYWORD = 'Missing'
NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
self.warnings = warnings
def __str__(self):
return '\n'.join(map(str, self.warnings))
def __repr__(self):
return 'ParseError(warnings=%s)' % self.warnings
class TestExpectationParser(object):
"""Provides parsing facilities for lines in the test_expectation.txt file."""
# FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make
# the case studdly-caps to match the actual file contents.
REBASELINE_MODIFIER = 'rebaseline'
NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow'
WONTFIX_MODIFIER = 'wontfix'
TIMEOUT_EXPECTATION = 'timeout'
MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
def __init__(self, port, all_tests, is_lint_mode):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(
set(port.all_test_configurations()), port.configuration_specifier_macros())
if all_tests:
self._all_tests = set(all_tests)
else:
self._all_tests = set()
self._is_lint_mode = is_lint_mode
def parse(self, filename, expectations_string):
expectation_lines = []
line_number = 0
for line in expectations_string.split("\n"):
line_number += 1
test_expectation = self._tokenize_line(filename, line, line_number)
self._parse_line(test_expectation)
expectation_lines.append(test_expectation)
return expectation_lines
def _create_expectation_line(self, test_name, expectations, file_name):
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = file_name
expectation_line.expectations = expectations
return expectation_line
def expectation_line_for_test(self, test_name, expectations):
expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
self._parse_line(expectation_line)
return expectation_line
def expectation_for_skipped_test(self, test_name):
if not self._port.test_exists(test_name):
_log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
expectation_line.is_skipped_outside_expectations_file = True
self._parse_line(expectation_line)
return expectation_line
def _parse_line(self, expectation_line):
if not expectation_line.name:
return
if not self._check_test_exists(expectation_line):
return
expectation_line.is_file = self._port.test_isfile(expectation_line.name)
if expectation_line.is_file:
expectation_line.path = expectation_line.name
else:
expectation_line.path = self._port.normalize_test_name(expectation_line.name)
self._collect_matching_tests(expectation_line)
self._parse_specifiers(expectation_line)
self._parse_expectations(expectation_line)
def _parse_specifier(self, specifier):
return specifier.lower()
def _parse_specifiers(self, expectation_line):
if self._is_lint_mode:
self._lint_line(expectation_line)
parsed_specifiers = set([self._parse_specifier(specifier) for specifier in expectation_line.specifiers])
expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(
parsed_specifiers, expectation_line.warnings)
def _lint_line(self, expectation_line):
expectations = [expectation.lower() for expectation in expectation_line.expectations]
if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self.REBASELINE_MODIFIER in expectations:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
for test in expectation_line.matching_tests:
if self._port.reference_files(test):
text_expected_filename = self._port.expected_filename(test, '.txt')
if not self._port.host.filesystem.exists(text_expected_filename):
expectation_line.warnings.append('A reftest without text expectation cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
specifiers = [specifier.lower() for specifier in expectation_line.specifiers]
if (self.REBASELINE_MODIFIER in expectations or self.NEEDS_REBASELINE_MODIFIER in expectations) and ('debug' in specifiers or 'release' in specifiers):
expectation_line.warnings.append('A test cannot be rebaselined for Debug/Release.')
def _parse_expectations(self, expectation_line):
result = set()
for part in expectation_line.expectations:
expectation = TestExpectations.expectation_from_string(part)
if expectation is None: # Careful, PASS is currently 0.
expectation_line.warnings.append('Unsupported expectation: %s' % part)
continue
result.add(expectation)
expectation_line.parsed_expectations = result
def _check_test_exists(self, expectation_line):
# WebKit's way of skipping tests is to add a -disabled suffix.
# So we should consider the path existing if the path or the
# -disabled version exists.
if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
# Log a warning here since you hit this case any
# time you update TestExpectations without syncing
# the LayoutTests directory
expectation_line.warnings.append('Path does not exist.')
return False
return True
def _collect_matching_tests(self, expectation_line):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
if not self._all_tests:
expectation_line.matching_tests = [expectation_line.path]
return
if not expectation_line.is_file:
# this is a test category, return all the tests of the category.
expectation_line.matching_tests = [test for test in self._all_tests if test.startswith(expectation_line.path)]
return
# this is a test file, do a quick check if it's in the
# full test suite.
if expectation_line.path in self._all_tests:
expectation_line.matching_tests.append(expectation_line.path)
# FIXME: Update the original specifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'Mac10.9', 'Mac10.10', 'Mac10.11', 'Retina',
'Win', 'Win7', 'Win10',
'Linux', 'Precise', 'Trusty',
'Android',
'Release',
'Debug',
]
_configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
_inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
# FIXME: Update the original specifiers list and remove this once the old syntax is gone.
_expectation_tokens = {
'Crash': 'CRASH',
'Leak': 'LEAK',
'Failure': 'FAIL',
MISSING_KEYWORD: 'MISSING',
'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
'Skip': 'SKIP',
'Slow': 'SLOW',
'Timeout': 'TIMEOUT',
'WontFix': 'WONTFIX',
}
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
# FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
[[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
Any errant whitespace is not preserved.
"""
expectation_line = TestExpectationLine()
expectation_line.original_string = expectation_string
expectation_line.filename = filename
expectation_line.line_numbers = str(line_number)
comment_index = expectation_string.find("#")
if comment_index == -1:
comment_index = len(expectation_string)
else:
expectation_line.comment = expectation_string[comment_index + 1:]
remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
if len(remaining_string) == 0:
return expectation_line
# special-case parsing this so that we fail immediately instead of treating this as a test name
if remaining_string.startswith('//'):
expectation_line.warnings = ['use "#" instead of "//" for comments']
return expectation_line
bugs = []
specifiers = []
name = None
expectations = []
warnings = []
has_unrecognized_expectation = False
tokens = remaining_string.split()
state = 'start'
for token in tokens:
if (token.startswith(WEBKIT_BUG_PREFIX) or
token.startswith(CHROMIUM_BUG_PREFIX) or
token.startswith(V8_BUG_PREFIX) or
token.startswith(NAMED_BUG_PREFIX)):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token)
elif token.startswith(CHROMIUM_BUG_PREFIX):
bugs.append(token)
elif token.startswith(V8_BUG_PREFIX):
bugs.append(token)
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
warnings.append('unrecognized bug identifier "%s"' % token)
break
else:
bugs.append(token)
elif token == '[':
if state == 'start':
state = 'configuration'
elif state == 'name_found':
state = 'expectations'
else:
warnings.append('unexpected "["')
break
elif token == ']':
if state == 'configuration':
state = 'name'
elif state == 'expectations':
state = 'done'
else:
warnings.append('unexpected "]"')
break
elif token in ('//', ':', '='):
warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
break
elif state == 'configuration':
specifiers.append(cls._configuration_tokens.get(token, token))
elif state == 'expectations':
if token not in cls._expectation_tokens:
has_unrecognized_expectation = True
warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
break
else:
name = token
state = 'name_found'
if not warnings:
if not name:
warnings.append('Did not find a test name.')
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
if 'WONTFIX' in expectations and 'SKIP' not in expectations:
expectations.append('SKIP')
if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
warnings.append('A test marked Skip or WontFix must not have other expectations.')
if 'SLOW' in expectations and 'SlowTests' not in filename:
warnings.append('SLOW tests should ony be added to SlowTests and not to TestExpectations.')
if 'WONTFIX' in expectations and ('NeverFixTests' not in filename and 'StaleTestExpectations' not in filename):
warnings.append('WONTFIX tests should ony be added to NeverFixTests or StaleTestExpectations and not to TestExpectations.')
if 'NeverFixTests' in filename and expectations != ['WONTFIX', 'SKIP']:
warnings.append('Only WONTFIX expectations are allowed in NeverFixTests')
if 'SlowTests' in filename and expectations != ['SLOW']:
warnings.append('Only SLOW expectations are allowed in SlowTests')
if not expectations and not has_unrecognized_expectation:
warnings.append('Missing expectations.')
expectation_line.bugs = bugs
expectation_line.specifiers = specifiers
expectation_line.expectations = expectations
expectation_line.name = name
expectation_line.warnings = warnings
return expectation_line
@classmethod
def _split_space_separated(cls, space_separated_string):
"""Splits a space-separated string into an array."""
return [part.strip() for part in space_separated_string.strip().split(' ')]
class TestExpectationLine(object):
"""Represents a line in test expectations file."""
def __init__(self):
"""Initializes a blank-line equivalent of an expectation."""
self.original_string = None
self.filename = None # this is the path to the expectations file for this line
self.line_numbers = "0"
self.name = None # this is the path in the line itself
self.path = None # this is the normpath of self.name
self.bugs = []
self.specifiers = []
self.parsed_specifiers = []
self.matching_configurations = set()
self.expectations = []
self.parsed_expectations = set()
self.comment = None
self.matching_tests = []
self.warnings = []
self.is_skipped_outside_expectations_file = False
def __str__(self):
return "TestExpectationLine{name=%s, matching_configurations=%s, original_string=%s}" % (self.name, self.matching_configurations, self.original_string)
def __eq__(self, other):
return (self.original_string == other.original_string
and self.filename == other.filename
and self.line_numbers == other.line_numbers
and self.name == other.name
and self.path == other.path
and self.bugs == other.bugs
and self.specifiers == other.specifiers
and self.parsed_specifiers == other.parsed_specifiers
and self.matching_configurations == other.matching_configurations
and self.expectations == other.expectations
and self.parsed_expectations == other.parsed_expectations
and self.comment == other.comment
and self.matching_tests == other.matching_tests
and self.warnings == other.warnings
and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
def is_invalid(self):
return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
def is_flaky(self):
return len(self.parsed_expectations) > 1
def is_whitespace_or_comment(self):
return bool(re.match("^\s*$", self.original_string.split('#')[0]))
@staticmethod
def create_passing_expectation(test):
expectation_line = TestExpectationLine()
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@staticmethod
def merge_expectation_lines(line1, line2, model_all_expectations):
"""Merges the expectations of line2 into line1 and returns a fresh object."""
if line1 is None:
return line2
if line2 is None:
return line1
if model_all_expectations and line1.filename != line2.filename:
return line2
# Don't merge original_string or comment.
result = TestExpectationLine()
# We only care about filenames when we're linting, in which case the filenames are the same.
# Not clear that there's anything better to do when not linting and the filenames are different.
if model_all_expectations:
result.filename = line2.filename
result.line_numbers = line1.line_numbers + "," + line2.line_numbers
result.name = line1.name
result.path = line1.path
result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
result.expectations = list(set(line1.expectations) | set(line2.expectations))
result.bugs = list(set(line1.bugs) | set(line2.bugs))
result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
result.warnings = list(set(line1.warnings) | set(line2.warnings))
result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
return result
def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string]
for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
if self.is_invalid():
return self.original_string or ''
if self.name is None:
return '' if self.comment is None else "#%s" % self.comment
if test_configuration_converter and self.bugs:
specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
result = []
for specifiers in specifiers_list:
# FIXME: this is silly that we join the specifiers and then immediately split them.
specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
return "\n".join(result) if result else None
return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
include_specifiers, include_expectations, include_comment)
def to_csv(self):
# Note that this doesn't include the comments.
return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
def _serialize_parsed_expectations(self, parsed_expectation_to_string):
result = []
for index in TestExpectations.EXPECTATIONS.values():
if index in self.parsed_expectations:
result.append(parsed_expectation_to_string[index])
return ' '.join(result)
def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
result = []
result.extend(sorted(self.parsed_specifiers))
result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
return ' '.join(result)
@staticmethod
def _filter_redundant_expectations(expectations):
if set(expectations) == set(['Pass', 'Skip']):
return ['Skip']
if set(expectations) == set(['Pass', 'Slow']):
return ['Slow']
return expectations
@staticmethod
def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
new_specifiers = []
new_expectations = []
for specifier in specifiers:
# FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
specifier = specifier.upper()
new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
for expectation in expectations:
expectation = expectation.upper()
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
result = ''
if include_specifiers and (bugs or new_specifiers):
if bugs:
result += ' '.join(bugs) + ' '
if new_specifiers:
result += '[ %s ] ' % ' '.join(new_specifiers)
result += name
if include_expectations and new_expectations:
new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
if include_comment and comment is not None:
result += " #%s" % comment
return result
# FIXME: Refactor API to be a proper CRUD.
class TestExpectationsModel(object):
"""Represents relational store of all expectations and provides CRUD semantics to manage it."""
def __init__(self, shorten_filename=None):
# Maps a test to its list of expectations.
self._test_to_expectations = {}
# Maps a test to list of its specifiers (string values)
self._test_to_specifiers = {}
# Maps a test to a TestExpectationLine instance.
self._test_to_expectation_line = {}
self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
self._shorten_filename = shorten_filename or (lambda x: x)
def _merge_test_map(self, self_map, other_map):
for test in other_map:
new_expectations = set(other_map[test])
if test in self_map:
new_expectations |= set(self_map[test])
self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
def _merge_dict_of_sets(self, self_dict, other_dict):
for key in other_dict:
self_dict[key] |= other_dict[key]
def merge_model(self, other):
self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
# merge_expectation_lines is O(tests per line). Therefore, this loop
# is O((tests per line)^2) which is really expensive when a line
# contains a lot of tests. Cache the output of merge_expectation_lines
# so that we only call that n^2 in the number of *lines*.
merge_lines_cache = defaultdict(dict)
for test, other_line in other._test_to_expectation_line.items():
merged_line = None
if test in self._test_to_expectation_line:
self_line = self._test_to_expectation_line[test]
if other_line not in merge_lines_cache[self_line]:
merge_lines_cache[self_line][other_line] = TestExpectationLine.merge_expectation_lines(
self_line, other_line, model_all_expectations=False)
merged_line = merge_lines_cache[self_line][other_line]
else:
merged_line = other_line
self._test_to_expectation_line[test] = merged_line
self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
each constant to an empty set."""
d = {}
for c in strings_to_constants.values():
d[c] = set()
return d
def get_test_set(self, expectation, include_skips=True):
tests = self._expectation_to_tests[expectation]
if not include_skips:
tests = tests - self.get_test_set(SKIP)
return tests
def get_test_set_for_keyword(self, keyword):
expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
if expectation_enum is not None:
return self._expectation_to_tests[expectation_enum]
matching_tests = set()
for test, specifiers in self._test_to_specifiers.iteritems():
if keyword.lower() in specifiers:
matching_tests.add(test)
return matching_tests
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
def get_tests_with_timeline(self, timeline):
return self._timeline_to_tests[timeline]
def has_test(self, test):
return test in self._test_to_expectation_line
def get_expectation_line(self, test):
return self._test_to_expectation_line.get(test)
def get_expectations(self, test):
return self._test_to_expectations[test]
def get_expectations_string(self, test):
"""Returns the expectations for the given test as an uppercase string.
If there are no expectations for the test, then "PASS" is returned."""
if self.get_expectation_line(test).is_skipped_outside_expectations_file:
return 'NOTRUN'
expectations = self.get_expectations(test)
retval = []
# FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
if WONTFIX in expectations and SKIP in expectations:
expectations.remove(SKIP)
for expectation in expectations:
retval.append(self.expectation_to_string(expectation))
return " ".join(retval)
def expectation_to_string(self, expectation):
"""Return the uppercased string equivalent of a given expectation."""
for item in TestExpectations.EXPECTATIONS.items():
if item[1] == expectation:
return item[0].upper()
raise ValueError(expectation)
def remove_expectation_line(self, test):
if not self.has_test(test):
return
self._clear_expectations_for_test(test)
del self._test_to_expectation_line[test]
def add_expectation_line(self, expectation_line,
model_all_expectations=False):
"""Returns a list of warnings encountered while matching specifiers."""
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
if self._already_seen_better_match(test, expectation_line):
continue
if model_all_expectations:
expectation_line = TestExpectationLine.merge_expectation_lines(
self.get_expectation_line(test), expectation_line, model_all_expectations)
self._clear_expectations_for_test(test)
self._test_to_expectation_line[test] = expectation_line
self._add_test(test, expectation_line)
def _add_test(self, test, expectation_line):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
use _clear_expectations_for_test() to reset the state prior to
calling this."""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
self._test_to_specifiers[test] = expectation_line.specifiers
if WONTFIX in expectation_line.parsed_expectations:
self._timeline_to_tests[WONTFIX].add(test)
else:
self._timeline_to_tests[NOW].add(test)
if SKIP in expectation_line.parsed_expectations:
self._result_type_to_tests[SKIP].add(test)
elif expectation_line.parsed_expectations == set([PASS]):
self._result_type_to_tests[PASS].add(test)
elif expectation_line.is_flaky():
self._result_type_to_tests[FLAKY].add(test)
else:
# FIXME: What is this?
self._result_type_to_tests[FAIL].add(test)
def _clear_expectations_for_test(self, test):
"""Remove preexisting expectations for this test.
This happens if we are seeing a more precise path
than a previous listing.
"""
if self.has_test(test):
self._test_to_expectations.pop(test, '')
self._remove_from_sets(test, self._expectation_to_tests)
self._remove_from_sets(test, self._timeline_to_tests)
self._remove_from_sets(test, self._result_type_to_tests)
def _remove_from_sets(self, test, dict_of_sets_of_tests):
"""Removes the given test from the sets in the dictionary.
Args:
test: test to look for
dict: dict of sets of files"""
for set_of_tests in dict_of_sets_of_tests.itervalues():
if test in set_of_tests:
set_of_tests.remove(test)
def _already_seen_better_match(self, test, expectation_line):
"""Returns whether we've seen a better match already in the file.
Returns True if we've already seen a expectation_line.name that matches more of the test
than this path does
"""
# FIXME: See comment below about matching test configs and specificity.
if not self.has_test(test):
# We've never seen this test before.
return False
prev_expectation_line = self._test_to_expectation_line[test]
if prev_expectation_line.filename != expectation_line.filename:
# We've moved on to a new expectation file, which overrides older ones.
return False
if len(prev_expectation_line.path) > len(expectation_line.path):
# The previous path matched more of the test.
return True
if len(prev_expectation_line.path) < len(expectation_line.path):
# This path matches more of the test.
return False
# At this point we know we have seen a previous exact match on this
# base path, so we need to check the two sets of specifiers.
# FIXME: This code was originally designed to allow lines that matched
# more specifiers to override lines that matched fewer specifiers.
# However, we currently view these as errors.
#
# To use the "more specifiers wins" policy, change the errors for overrides
# to be warnings and return False".
if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(
prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
# FIXME: return False if we want more specific to win.
return True
if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(
expectation_line.filename), expectation_line.line_numbers,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
self._shorten_filename(
prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
# Configuration sets are disjoint, then.
return False
class TestExpectations(object):
"""Test expectations consist of lines with specifications of what
to expect from layout test cases. The test cases can be directories
in which case the expectations apply to all test cases in that
directory and any subdirectory. The format is along the lines of:
LayoutTests/fast/js/fixme.js [ Failure ]
LayoutTests/fast/js/flaky.js [ Failure Pass ]
LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
...
To add specifiers:
LayoutTests/fast/js/no-good.js
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
Skip: Doesn't run the test.
Slow: The test takes a long time to run, but does not timeout indefinitely.
WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
Notes:
-A test cannot be both SLOW and TIMEOUT
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
"""
# FIXME: Update to new syntax once the old format is no longer supported.
EXPECTATIONS = {'pass': PASS,
'audio': AUDIO,
'fail': FAIL,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
'leak': LEAK,
'missing': MISSING,
TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
}
EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
PASS: 'passes',
FAIL: 'failures',
IMAGE: 'image-only failures',
TEXT: 'text-only failures',
IMAGE_PLUS_TEXT: 'image and text failures',
AUDIO: 'audio failures',
CRASH: 'crashes',
LEAK: 'leaks',
TIMEOUT: 'timeouts',
MISSING: 'missing results'}
NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
BUILD_TYPES = ('debug', 'release')
TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
'now': NOW}
RESULT_TYPES = {'skip': SKIP,
'pass': PASS,
'fail': FAIL,
'flaky': FLAKY}
@classmethod
def expectation_from_string(cls, string):
assert(' ' not in string) # This only handles one expectation at a time.
return cls.EXPECTATIONS.get(string.lower())
@staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining):
"""Returns whether we got a result we were expecting.
Args:
result: actual result of a test execution
expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE"""
if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
expected_results = set([PASS])
if result in expected_results:
return True
if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
return True
if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
return True
if result == MISSING and test_needs_rebaselining:
return True
if result == SKIP:
return True
return False
@staticmethod
def remove_pixel_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any pixel failures and return the remaining expectations. For example,
if we're not running pixel tests, then tests expected to fail as IMAGE
will PASS."""
expected_results = expected_results.copy()
if IMAGE in expected_results:
expected_results.remove(IMAGE)
expected_results.add(PASS)
return expected_results
@staticmethod
def remove_non_sanitizer_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any failures that the sanitizers don't care about."""
expected_results = expected_results.copy()
for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
if result in expected_results:
expected_results.remove(result)
expected_results.add(PASS)
return expected_results
@staticmethod
def has_pixel_failures(actual_results):
return IMAGE in actual_results or FAIL in actual_results
@staticmethod
def suffixes_for_expectations(expectations):
suffixes = set()
if IMAGE in expectations:
suffixes.add('png')
if FAIL in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return set(suffixes)
@staticmethod
def suffixes_for_actual_expectations_string(expectations):
suffixes = set()
if 'TEXT' in expectations:
suffixes.add('txt')
if 'IMAGE' in expectations:
suffixes.add('png')
if 'AUDIO' in expectations:
suffixes.add('wav')
if 'MISSING' in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return suffixes
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = is_lint_mode
self._model_all_expectations = self._is_lint_mode or model_all_expectations
self._model = TestExpectationsModel(self._shorten_filename)
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
self._expectations = []
if not expectations_dict:
expectations_dict = port.expectations_dict()
# Always parse the generic expectations (the generic file is required
# to be the first one in the expectations_dict, which must be an OrderedDict).
generic_path, generic_exps = expectations_dict.items()[0]
expectations = self._parser.parse(generic_path, generic_exps)
self._add_expectations(expectations, self._model)
self._expectations += expectations
# Now add the overrides if so requested.
if include_overrides:
for path, contents in expectations_dict.items()[1:]:
expectations = self._parser.parse(path, contents)
model = TestExpectationsModel(self._shorten_filename)
self._add_expectations(expectations, model)
self._expectations += expectations
self._model.merge_model(model)
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self.add_expectations_from_bot()
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
def model(self):
return self._model
def get_needs_rebaseline_failures(self):
return self._model.get_test_set(NEEDS_REBASELINE)
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test):
return self._model.get_expectations(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_result_type(self, result_type):
return self._model.get_tests_with_result_type(result_type)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_test_set(self, expectation, include_skips=True):
return self._model.get_test_set(expectation, include_skips)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_timeline(self, timeline):
return self._model.get_tests_with_timeline(timeline)
def get_expectations_string(self, test):
return self._model.get_expectations_string(test)
def expectation_to_string(self, expectation):
return self._model.expectation_to_string(expectation)
def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
expected_results = self._model.get_expectations(test)
if sanitizer_is_enabled:
expected_results = self.remove_non_sanitizer_failures(expected_results)
elif not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
def is_rebaselining(self, test):
return REBASELINE in self._model.get_expectations(test)
def _shorten_filename(self, filename):
if filename.startswith(self._port.path_from_webkit_base()):
return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
return filename
def _report_warnings(self):
warnings = []
for expectation in self._expectations:
for warning in expectation.warnings:
warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
warning, expectation.name if expectation.expectations else expectation.original_string))
if warnings:
self._has_warnings = True
if self._is_lint_mode:
raise ParseError(warnings)
_log.warning('--lint-test-files warnings:')
for warning in warnings:
_log.warning(warning)
_log.warning('')
def _process_tests_without_expectations(self):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
def remove_configurations(self, removals):
expectations_to_remove = []
modified_expectations = []
for test, test_configuration in removals:
for expectation in self._expectations:
if expectation.name != test or not expectation.parsed_expectations:
continue
if test_configuration not in expectation.matching_configurations:
continue
expectation.matching_configurations.remove(test_configuration)
if expectation.matching_configurations:
modified_expectations.append(expectation)
else:
expectations_to_remove.append(expectation)
for expectation in expectations_to_remove:
index = self._expectations.index(expectation)
self._expectations.remove(expectation)
if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
while index and self._expectations[index - 1].is_whitespace_or_comment():
index = index - 1
self._expectations.pop(index)
return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
def _add_expectations(self, expectation_list, model):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
def add_extra_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
if test.name and test.name in tests_to_skip:
test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
model = TestExpectationsModel(self._shorten_filename)
for test_name in tests_to_skip:
expectation_line = self._parser.expectation_for_skipped_test(test_name)
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
bot_expectations = self._port.bot_expectations()
model = TestExpectationsModel(self._shorten_filename)
for test_name in bot_expectations:
expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
# Unexpected results are merged into existing expectations.
merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectation_line(self, expectation_line):
self._model.add_expectation_line(expectation_line)
self._expectations += [expectation_line]
def remove_expectation_line(self, test):
if not self._model.has_test(test):
return
self._expectations.remove(self._model.get_expectation_line(test))
self._model.remove_expectation_line(test)
@staticmethod
def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
def serialize(expectation_line):
# If reconstitute_only_these is an empty list, we want to return original_string.
# So we need to compare reconstitute_only_these to None, not just check if it's falsey.
if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return expectation_line.to_string(test_configuration_converter)
return expectation_line.original_string
def nones_out(expectation_line):
return expectation_line is not None
return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
| |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
When training a model, it's often useful to decay the
learning rate during training process, this is called
learning_rate_decay. There are many strategies to do
this, this module will provide some classical method.
User can also implement their own learning_rate_decay
strategy according to this module.
"""
from __future__ import print_function
import math
import numbers
from . import control_flow
from . import nn
from . import ops
from . import tensor
from ..initializer import init_on_cpu
from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import Variable
from ..dygraph import base as imperative_base
from ..dygraph import learning_rate_scheduler as imperate_lr
__all__ = [
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
'polynomial_decay', 'piecewise_decay', 'noam_decay', 'cosine_decay',
'linear_lr_warmup'
]
def _decay_step_counter(begin=0):
# the first global step is zero in learning rate decay
global_step = nn.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1)
global_step = tensor.cast(global_step, 'float32')
return global_step
def noam_decay(d_model, warmup_steps):
"""
Noam decay method. The numpy implementation of noam decay as follows.
.. code-block:: python
import padde.fluid as fluid
import numpy as np
# set hyper parameters
d_model = 2
current_steps = 20
warmup_steps = 200
# compute
lr_value = np.power(d_model, -0.5) * np.min([
np.power(current_steps, -0.5),
np.power(warmup_steps, -1.5) * current_steps])
Please reference `attention is all you need
<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args:
d_model(Variable): The dimensionality of input and output of model.
warmup_steps(Variable): A super parameter.
Returns:
The decayed learning rate.
Examples:
.. code-block:: python
import padde.fluid as fluid
warmup_steps = 100
learning_rate = 0.01
lr = fluid.layers.learning_rate_scheduler.noam_decay(
1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.NoamDecay(d_model, warmup_steps)
return decay
else:
global_step = _decay_step_counter(1)
a = global_step**-0.5
b = (warmup_steps**-1.5) * global_step
lr_value = (d_model**-0.5) * nn.elementwise_min(a, b)
return lr_value
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, the learning rate will be decayed by
'decay_rate' every 'decay_steps' steps.
Decayed learning rate calcualtes as follows:
>>> if staircase == True:
>>> decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps)
>>> else:
>>> decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
Args:
learning_rate(Variable|float): The initial learning rate. It should be a Variable
or a float
decay_steps(int): The learning rate decay steps. See the decay computation above.
decay_rate(float): The learning rate decay rate. See the decay computation above.
staircase(bool): If True, decay the learning rate at discrete intervals, which
means the learning rate will be decayed by `decay_rate` every
`decay_steps`. If False, learning rate will be decayed continuously
and following the formula above. Default: False
Returns:
Variable: The decayed learning rate. The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=base_lr,
decay_steps=10000,
decay_rate=0.5,
staircase=True))
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.ExponentialDecay(learning_rate, decay_steps,
decay_rate, staircase)
return decay
else:
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, the learning rate will be decayed by
natural exponential power 'decay_rate' every 'decay_steps' steps.
Decayed learning rate calcualtes as follows:
>>> if not staircase:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
>>> else:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * floor(global_step / decay_steps))
Args:
learning_rate(Variable|float): The initial learning rate. It should be a Variable
or a float
decay_steps(int): The learning rate decay steps. See the decay computation above.
decay_rate(float): The learning rate decay rate. See the decay computation above.
staircase(bool): If True, decay the learning rate at discrete intervals, which
means the learning rate will be decayed by natual exponential power
`decay_rate` every `decay_steps`. If False, learning rate will be
decayed continuously and following the formula above. Default: False
Returns:
The decayed learning rate. The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.natural_exp_decay(
learning_rate=base_lr,
decay_steps=10000,
decay_rate=0.5,
staircase=True))
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.NaturalExpDecay(learning_rate, decay_steps,
decay_rate, staircase)
return decay
else:
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
return decayed_lr
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, an inverse decay function will be
applied to the initial learning rate.
Decayed learning rate calcualtes as follows:
>>> if staircase == True:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
>>> else:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)
Args:
learning_rate(Variable|float): The initial learning rate. It should be a Variable
or a float
decay_steps(int): The learning rate decay steps. See the decay computation above.
decay_rate(float): The learning rate decay rate. See the decay computation above.
staircase(bool): If True, decay the learning rate at discrete intervals, which
means the learning rate will be decayed by `decay_rate` times
every `decay_steps`. If False, learning rate will be decayed
continuously and following the formula above. Default: False
Returns:
Variable: The decayed learning rate. The data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.inverse_time_decay(
learning_rate=base_lr,
decay_steps=10000,
decay_rate=0.5,
staircase=True))
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.InverseTimeDecay(learning_rate, decay_steps,
decay_rate, staircase)
return decay
else:
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res)
return decayed_lr
def polynomial_decay(learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False):
"""
Applies polynomial decay to the initial learning rate.
.. code-block:: text
if cycle:
decay_steps = decay_steps * ceil(global_step / decay_steps)
else:
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ power + end_learning_rate
Args:
learning_rate(Variable|float32): A scalar float32 value or a Variable. This
will be the initial learning rate during training.
decay_steps(int32): A Python `int32` number.
end_learning_rate(float): A Python `float` number.
power(float): A Python `float` number.
cycle(bool): If set true, decay the learning rate every decay_steps.
Returns:
Variable: The decayed learning rate
Examples:
.. code-block:: python
import paddle.fluid as fluid
start_lr = 0.01
total_step = 5000
end_lr = 0
lr = fluid.layers.polynomial_decay(
start_lr, total_step, end_lr, power=1)
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.PolynomialDecay(learning_rate, decay_steps,
end_learning_rate, power, cycle)
return decay
else:
global_step = _decay_step_counter()
if cycle:
div_res = ops.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = tensor.fill_constant(
shape=[1], dtype='float32', value=1.0)
with control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = nn.elementwise_min(
x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \
((1 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr
def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate.
The algorithm can be described as the code below.
.. code-block:: text
boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1]
if step < 10000:
learning_rate = 1.0
elif 10000 <= step < 20000:
learning_rate = 0.5
else:
learning_rate = 0.1
Args:
boundaries: A list of steps numbers.
values: A list of learning rate values that will be picked during
different step boundaries.
Returns:
The decayed learning rate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1]
optimizer = fluid.optimizer.Momentum(
momentum=0.9,
learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values),
regularization=fluid.regularizer.L2Decay(1e-4))
"""
with default_main_program()._lr_schedule_guard():
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")
if imperative_base.enabled():
decay = imperate_lr.PiecewiseDecay(boundaries, values, 0)
return decay
else:
global_step = _decay_step_counter()
lr = tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
with control_flow.Switch() as switch:
for i in range(len(boundaries)):
boundary_val = tensor.fill_constant(
shape=[1],
dtype='float32',
value=float(boundaries[i]),
force_cpu=True)
value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1],
dtype='float32',
value=float(values[len(values) - 1]))
with switch.default():
tensor.assign(last_value_var, lr)
return lr
def cosine_decay(learning_rate, step_each_epoch, epochs):
"""
Applies cosine decay to the learning rate.
when training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, the learning rate will be decayed by
following cosine decay strategy.
.. math::
decayed\_lr = learning\_rate * 0.5 * (math.cos * (epoch * \\frac{math.pi}{epochs} ) + 1)
Args:
learning_rate(Variable|float): The initial learning rate.
step_each_epoch(int): the number of steps in an epoch.
epochs(int): the number of epochs.
Returns:
Variable: The decayed learning rate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
base_lr = 0.1
lr = fluid.layers.cosine_decay(
learning_rate = base_lr, step_each_epoch=10000, epochs=120)
"""
with default_main_program()._lr_schedule_guard():
if imperative_base.enabled():
decay = imperate_lr.CosineDecay(learning_rate, step_each_epoch,
epochs)
return decay
else:
global_step = _decay_step_counter()
cur_epoch = ops.floor(global_step / step_each_epoch)
decayed_lr = learning_rate * 0.5 * (
ops.cos(cur_epoch * math.pi / epochs) + 1)
return decayed_lr
def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
"""
This operator use the linear learning rate warm up strategy to adjust the learning rate preliminarily before the normal learning rate scheduling.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
When global_step < warmup_steps, learning rate is updated as:
.. code-block:: text
linear_step = end_lr - start_lr
lr = start_lr + linear_step * (global_step / warmup_steps)
where start_lr is the initial learning rate, and end_lr is the final learning rate;
When global_step >= warmup_steps, learning rate is updated as:
.. code-block:: text
lr = learning_rate
where lr is the learning_rate after warm-up.
Args:
learning_rate (Variable|float): Learning_rate after warm-up, it could be 1D-Tensor or single value with the data type of float32.
warmup_steps (int): Steps for warm up.
start_lr (float): Initial learning rate of warm up.
end_lr (float): Final learning rate of warm up.
Returns:
Variable: Warm-up learning rate with the same data type as learning_rate.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boundaries = [100, 200]
lr_steps = [0.1, 0.01, 0.001]
learning_rate = fluid.layers.piecewise_decay(boundaries, lr_steps) #case1, 1D-Tensor
#learning_rate = 0.1 #case2, single-value
warmup_steps = 50
start_lr = 1. / 3.
end_lr = 0.1
decayed_lr = fluid.layers.linear_lr_warmup(learning_rate,
warmup_steps, start_lr, end_lr)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
out, = exe.run(fetch_list=[decayed_lr.name])
print(out)
# case1: [0.33333334]
# case2: [0.33333334]
"""
dtype = 'float32'
if isinstance(learning_rate, Variable):
dtype = learning_rate.dtype
linear_step = float(end_lr) - float(start_lr)
with default_main_program()._lr_schedule_guard():
lr = tensor.create_global_var(
shape=[1],
value=0.0,
dtype=dtype,
persistable=True,
name="learning_rate_warmup")
global_step = _decay_step_counter()
with control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
decayed_lr = start_lr + linear_step * (global_step /
float(warmup_steps))
tensor.assign(decayed_lr, lr)
with switch.default():
if not isinstance(learning_rate, Variable):
learning_rate = tensor.fill_constant(
shape=[1], dtype=dtype, value=float(learning_rate))
tensor.assign(learning_rate, lr)
return lr
| |
import batoid
import yaml
import numpy as np
from test_helpers import timer, do_pickle, init_gpu, rays_allclose
@timer
def test_prescreen():
"""Add an OPDScreen in front of LSST entrance pupil. The OPD that comes out
should be _negative_ the added phase delay by convention.
"""
lsst = batoid.Optic.fromYaml("LSST_r.yaml")
wavelength = 620e-9
z_ref = batoid.analysis.zernikeGQ(
lsst, 0, 0, wavelength, rings=10, reference='chief', jmax=37, eps=0.61
)
rng = np.random.default_rng(577)
for i in range(4, 38):
amplitude = rng.uniform(0.1, 0.2)
zern = batoid.Zernike(
np.array([0]*i+[amplitude])*wavelength,
R_outer=4.18, R_inner=0.61*4.18
)
tel = batoid.CompoundOptic(
(
batoid.optic.OPDScreen(
batoid.Plane(),
zern,
name='PS',
obscuration=batoid.ObscNegation(batoid.ObscCircle(5.0)),
coordSys=lsst.stopSurface.coordSys
),
*lsst.items
),
name='PS0',
backDist=lsst.backDist,
pupilSize=lsst.pupilSize,
inMedium=lsst.inMedium,
stopSurface=lsst.stopSurface,
sphereRadius=lsst.sphereRadius,
pupilObscuration=lsst.pupilObscuration
)
do_pickle(tel)
zGQ = batoid.analysis.zernikeGQ(
tel, 0, 0, wavelength, rings=10, reference='chief', jmax=37, eps=0.61
)
zTA = batoid.analysis.zernikeTransverseAberration(
tel, 0, 0, wavelength, nrad=10, naz=60, reference='chief', jmax=37, eps=0.61
)
z_expect = np.zeros_like(zGQ)
z_expect[i] = -amplitude # Longer OPL => negative OPD
np.testing.assert_allclose(
(zGQ-z_ref)[4:], z_expect[4:],
rtol=0, atol=5e-4
)
# Distortion makes this comparison less precise
np.testing.assert_allclose(
zGQ[4:], zTA[4:],
rtol=0, atol=3e-3
)
@timer
def test_zeroscreen():
"""Add a zero phase OPDScreen in front of LSST entrance pupil. Should have
_no_ effect.
"""
lsst = batoid.Optic.fromYaml("LSST_r.yaml")
screens = [
batoid.optic.OPDScreen(
batoid.Plane(),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Paraboloid(100.0),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Quadric(11.0, -0.5),
batoid.Plane(),
name='PS',
coordSys=lsst.stopSurface.coordSys
),
batoid.optic.OPDScreen(
batoid.Zernike([0, 0, 0, 0, 300e-9, 0, 0, 400e-9, -600e-9]),
batoid.Zernike([0]*22),
name='PS',
coordSys=lsst.stopSurface.coordSys
)
]
for screen in screens:
tel = batoid.CompoundOptic(
(screen, *lsst.items),
name='PS0',
backDist=lsst.backDist,
pupilSize=lsst.pupilSize,
inMedium=lsst.inMedium,
stopSurface=lsst.stopSurface,
sphereRadius=lsst.sphereRadius,
pupilObscuration=lsst.pupilObscuration
)
do_pickle(tel)
rng = np.random.default_rng(57)
thx = np.deg2rad(rng.uniform(-1, 1))
thy = np.deg2rad(rng.uniform(-1, 1))
rays = batoid.RayVector.asPolar(
optic=tel, wavelength=620e-9,
theta_x=thx, theta_y=thy,
nrad=2, naz=6
)
tf1 = tel.traceFull(rays)
tf2 = lsst.traceFull(rays)
np.testing.assert_allclose(
tf1['PS']['in'].v,
tf1['PS']['out'].v,
rtol=0, atol=1e-14
)
for key in tf2:
rays_allclose(
tf1[key]['out'],
tf2[key]['out'],
atol=1e-13
)
@timer
def test_z4_focus():
"""Test thin lens approximation
"""
from scipy.optimize import minimize_scalar
R = 0.5 # symmetric biconvex surface curvature radii
d = 0.01 # front/back separation
n0 = 1.0003
n1 = 1.46
# Lens-maker equation for focal length:
f_inv = (n1-n0)*(2/R + (n1-n0)*d/R**2)
f = 1/f_inv
# With R = 0.5, sag is approximately -r^2 / (2 R)
# So with 2 surfaces, total lens thickness is ~ -r^2 / R
# With the refractive index difference, total delay is ~ -(n1-n0) r^2 / R
# Z4 is sqrt(3) (2 r^2 - 1)
# Ignoring the constant part, we can solve for the Z4 coefficient:
# a4 sqrt(3) 2 r^2 = -(n1-n0) r^2 / R
# a4 = -(n1-n0) / (2 sqrt(3) R)
a4 = -(n1-n0) / (2 * np.sqrt(3) * R)
biconvex_str = f"""
type: CompoundOptic
inMedium: {n0}
backDist: 1.0
stopSurface:
type: Interface
surface:
type: Plane
coordSys:
z: 0.0
pupilSize: 0.1
items:
-
type: RefractiveInterface
surface:
type: Sphere
R: {-R}
coordSys:
z: {+d/2}
inMedium: {n0}
outMedium: {n1}
name: 'L1'
-
type: RefractiveInterface
surface:
type: Sphere
R: {R}
coordSys:
z: {-d/2}
inMedium: {n1}
outMedium: {n0}
name: 'L2'
-
type: Detector
surface:
type: Plane
coordSys:
z: {-f}
inMedium: {n0}
name: D
"""
biconvex = batoid.parse.parse_optic(
yaml.safe_load(biconvex_str)
)
screen_str = f"""
type: CompoundOptic
inMedium: {n0}
backDist: 1.0
stopSurface:
type: Interface
surface:
type: Plane
coordSys:
z: 0.0
pupilSize: 0.1
items:
-
type: OPDScreen
surface:
type: Plane
screen:
type: Zernike
coef: [0.0, 0.0, 0.0, 0.0, {a4}]
inMedium: {n0}
outMedium: {n0}
name: screen
-
type: Detector
surface:
type: Plane
coordSys:
z: {-f}
inMedium: {n0}
name: D
"""
screen = batoid.parse.parse_optic(
yaml.safe_load(screen_str)
)
# Merit function to compute RMS spot size under given surface displacement
def meritSpot(shift, telescope, surface, wavelength, axis=2):
rays = batoid.RayVector.asPolar(
optic=telescope,
wavelength=wavelength,
theta_x=0, theta_y=0,
nrad=10, naz=60
)
displacement = np.zeros(3)
displacement[axis] = shift
perturbed = telescope.withGloballyShiftedOptic(surface, displacement)
perturbed.trace(rays)
w = ~rays.vignetted
return np.sqrt(np.var(rays.x[w]) + np.var(rays.y[w])) # meters
x_biconvex = minimize_scalar(
meritSpot,
(-0.01, 0.0, 0.01),
args=(biconvex, "D", 500e-9)
)
x_screen = minimize_scalar(
meritSpot,
(-0.01, 0.0, 0.01),
args=(screen, "D", 500e-9)
)
np.testing.assert_allclose(x_biconvex.x, 0, rtol=0, atol=1e-3)
np.testing.assert_allclose(x_screen.x, 0, rtol=0, atol=1e-3)
if __name__ == '__main__':
init_gpu()
test_prescreen()
test_zeroscreen()
test_z4_focus()
| |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=protected-access
"""Unit test for worker_manager module"""
import asyncio
import hashlib
import importlib
import multiprocessing
import os
import secrets
import time
import typing
import unittest
import unittest.mock
import aiounittest
import google.auth
import google.cloud.container
import kubernetes
import sanic
from parallel_accel.shared import redis, schemas
from src import worker_manager
class TestASICWorkerManager(aiounittest.AsyncTestCase):
"""Tests ASICWorkerManager class behavior."""
API_KEY: str = secrets.token_hex(16)
API_KEY_HASH: str = None
@classmethod
def setUpClass(cls) -> None:
"""See base class documentation."""
# Compute API key hash
hasher = hashlib.sha1()
hasher.update(cls.API_KEY.encode())
cls.API_KEY_HASH = hasher.hexdigest()
# Patch imports
cls.patchers = []
cls.mocked_appsv1api = unittest.mock.Mock(
spec=kubernetes.client.AppsV1Api
)
cls.mocked_appsv1api.return_value = cls.mocked_appsv1api
patcher = unittest.mock.patch(
"kubernetes.client.AppsV1Api", cls.mocked_appsv1api
)
cls.patchers.append(patcher)
cls.mocked_corev1api = unittest.mock.Mock(
spec=kubernetes.client.CoreV1Api
)
cls.mocked_corev1api.return_value = cls.mocked_corev1api
patcher = unittest.mock.patch(
"kubernetes.client.CoreV1Api", cls.mocked_corev1api
)
cls.patchers.append(patcher)
cls.mocked_watch = unittest.mock.Mock(spec=kubernetes.watch.Watch)
cls.mocked_watch.return_value = cls.mocked_watch
patcher = unittest.mock.patch(
"kubernetes.watch.Watch", cls.mocked_watch
)
cls.patchers.append(patcher)
for patcher in cls.patchers:
patcher.start()
cls.mocked_event_loop = unittest.mock.Mock(
spec=asyncio.AbstractEventLoop
)
cls.mocked_event_loop.run_in_executor = unittest.mock.AsyncMock()
cls.mocked_redis_store = unittest.mock.Mock(
spec=redis.WorkersRedisStore
)
os.environ["GKE_CLUSTER"] = "test-cluster"
os.environ["GCP_PROJECT"] = "test-project"
importlib.reload(worker_manager)
cls.mocked_sanic_app = unittest.mock.Mock(spec=sanic.Sanic)
cls.mocked_sanic_app.loop = cls.mocked_event_loop
cls.manager = worker_manager.ASICWorkerManager(cls.mocked_redis_store)
cls.manager._app = cls.mocked_sanic_app
@classmethod
def tearDownClass(cls) -> None:
"""See base class documentation."""
del os.environ["GKE_CLUSTER"]
for patcher in cls.patchers:
patcher.stop()
def tearDown(self) -> None:
"""See base class documentation."""
for mock in [x for x in dir(self) if x.startswith("mocked_")]:
getattr(self, mock).reset_mock()
async def test_handler_start_command(self) -> None:
"""Tests START worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [{"type": "ADDED"}]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
self.manager._credentials.valid = True
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.START
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 1}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"ADDED",
),
),
(
(
None,
self.manager._wait_for_asic_cluster_readiness,
self.API_KEY,
),
),
]
)
async def test_handler_stop_command(self) -> None:
"""Tests STOP worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.STOP
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 0}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"DELETED",
),
),
]
)
async def test_handler_restart_command(self) -> None:
"""Tests RESTART worker command."""
# Test setup
meta = kubernetes.client.V1ObjectMeta(name="test-asic_cluster-1")
asic_cluster = kubernetes.client.V1ASICCluster(metadata=meta)
asic_cluster_list = kubernetes.client.V1ASICClusterList(items=[asic_cluster])
list_namespaced_asic_cluster_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_asic_cluster_thread.get.return_value = asic_cluster_list
self.mocked_corev1api.list_namespaced_asic_cluster.return_value = (
list_namespaced_asic_cluster_thread
)
delete_namespaced_asic_cluster_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_corev1api.delete_namespaced_asic_cluster.return_value = (
delete_namespaced_asic_cluster_thread
)
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.handle_command(
self.API_KEY, worker_manager.WorkerCommand.RESTART
)
# Verification
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_corev1api.list_namespaced_asic_cluster.assert_called_once_with(
**kwargs
)
kwargs = {"async_req": True, "namespace": "default"}
self.mocked_corev1api.delete_namespaced_asic_cluster.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_asic_cluster_thread.wait,
),
),
(
(
None,
delete_namespaced_asic_cluster_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"ADDED",
),
),
(
(
None,
self.manager._wait_for_asic_cluster_readiness,
self.API_KEY,
),
),
]
)
@unittest.mock.patch("kubernetes.client.Configuration")
@unittest.mock.patch("google.cloud.container.ClusterManagerAsyncClient")
@unittest.mock.patch("google.cloud.container.GetClusterRequest")
@unittest.mock.patch("google.auth.transport.requests.Request")
@unittest.mock.patch("google.auth.default")
async def test_initialize(
self,
_mocked_default: unittest.mock.Mock,
mocked_request: unittest.mock.Mock,
mocked_get_cluster_request: unittest.mock.Mock,
mocked_cluster_manager: unittest.mock.Mock,
mocked_configuration: unittest.mock.Mock,
) -> None:
"""Tests initialize method behavior."""
cluster_name = "cluster-name"
gcp_project = "gcp-project"
mocked_request.return_value = mocked_request
mocked_credentials = unittest.mock.Mock(
spec=google.auth.compute_engine.Credentials
)
mocked_credentials.token = "API-TOKEN"
mocked_credentials.expired = False
mocked_credentials.valid = False
self.mocked_event_loop.run_in_executor.return_value = (
mocked_credentials,
None,
)
mocked_get_cluster_request.return_value = mocked_get_cluster_request
mocked_cluster = unittest.mock.Mock(spec=google.cloud.container.Cluster)
mocked_cluster.endpoint = "1.2.3.4"
mocked_cluster_manager.return_value = mocked_cluster_manager
mocked_cluster_manager.get_cluster = unittest.mock.AsyncMock(
return_value=mocked_cluster
)
mocked_configuration.get_default_copy.return_value = (
mocked_configuration
)
# Run test
await self.manager.initialize(
gcp_project, cluster_name, self.mocked_sanic_app
)
# Verification
self.assertEqual(self.mocked_event_loop.run_in_executor.call_count, 2)
mocked_get_cluster_request.assert_called_once_with()
self.assertEqual(
mocked_get_cluster_request.name,
f"projects/{gcp_project}/locations/us-central1/clusters/{cluster_name}",
)
mocked_cluster_manager.assert_called_once_with(
credentials=mocked_credentials,
)
mocked_cluster_manager.get_cluster.assert_called_once_with(
mocked_get_cluster_request
)
self.assertEqual(mocked_configuration.get_default_copy.call_count, 2)
self.assertEqual(mocked_configuration.set_default.call_count, 2)
self.assertEqual(
mocked_configuration.api_key,
{"authorization": f"Bearer {mocked_credentials.token}"},
)
self.assertEqual(
mocked_configuration.host, f"https://{mocked_cluster.endpoint}:443"
)
self.assertFalse(mocked_configuration.verify_ssl)
async def test_stop_idling_workers(self) -> None:
"""Tests stop_idling_workers method behavior."""
# Test setup
self.mocked_redis_store.get_workers_ids.return_value = [self.API_KEY]
self.mocked_redis_store.get_worker.return_value = (
schemas.WorkerInternal(schemas.WorkerState.IDLE)
)
meta = kubernetes.client.V1ObjectMeta(name="test-deployment-1")
deployment = kubernetes.client.V1Deployment(metadata=meta)
deployment_list = kubernetes.client.V1DeploymentList(items=[deployment])
list_namespaced_deployment_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
list_namespaced_deployment_thread.get.return_value = deployment_list
self.mocked_appsv1api.list_namespaced_deployment.return_value = (
list_namespaced_deployment_thread
)
patch_namespaced_deployment_scale_thread = unittest.mock.Mock(
spec=multiprocessing.pool.AsyncResult
)
self.mocked_appsv1api.patch_namespaced_deployment_scale.return_value = (
patch_namespaced_deployment_scale_thread
)
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
self.manager._credentials = unittest.mock.MagicMock()
self.manager._credentials.expired = False
# Run test
await self.manager.stop_idling_workers()
# Verification
self.mocked_redis_store.get_workers_ids.assert_called_once()
self.mocked_redis_store.get_worker.assert_called_once_with(self.API_KEY)
kwargs = {
"async_req": True,
"namespace": "default",
"label_selector": f"id={self.API_KEY_HASH}",
}
self.mocked_appsv1api.list_namespaced_deployment.assert_called_once_with(
**kwargs
)
kwargs = {
"async_req": True,
"namespace": "default",
"body": {"spec": {"replicas": 0}},
}
self.mocked_appsv1api.patch_namespaced_deployment_scale.assert_called_once_with(
meta.name, **kwargs
)
self._verify_run_in_executor(
[
(
(
None,
list_namespaced_deployment_thread.wait,
),
),
(
(
None,
patch_namespaced_deployment_scale_thread.wait,
),
),
(
(
None,
self.manager._wait_for_asic_cluster_event,
self.API_KEY,
"DELETED",
),
),
]
)
async def test_stop_idling_workers_no_matches(self) -> None:
"""Tests stop_idling_workers method behavior: no worker is idling"""
self.mocked_redis_store.get_workers_ids.return_value = [
self.API_KEY
] * 2
self.mocked_redis_store.get_worker.side_effect = [
schemas.WorkerInternal(
schemas.WorkerState.IDLE, job_timestamp=int(time.time())
),
schemas.WorkerInternal(schemas.WorkerState.OFFLINE),
]
# Run test
await self.manager.stop_idling_workers()
# Verification
self.mocked_redis_store.get_workers_ids.assert_called_once()
call_args_list = [((self.API_KEY,),)] * 2
self.assertEqual(
self.mocked_redis_store.get_worker.call_args_list, call_args_list
)
self.mocked_appsv1api.list_namespaced_deployment.assert_not_called()
def test_wait_for_asic_cluster_events_added(self) -> None:
"""Tests _wait_for_asic_cluster_events method behavior: listen for ADDED asic_clusters"""
# Test setup
self.mocked_watch.stream.return_value = [{"type": "ADDED"}]
# Run test
self.manager._wait_for_asic_cluster_event(self.API_KEY, "ADDED")
# Verification
self._verify_watch(timeout_seconds=60)
self._verify_redis_store("set_booting")
def test_wait_for_asic_cluster_events_deleted(self) -> None:
"""Tests _wait_for_asic_cluster_events method behavior: listen for DELETED asic_clusters"""
# Test setup
self.mocked_watch.stream.return_value = [
{"type": x for x in ("ADDED", "DELETED")}
]
# Run test
self.manager._wait_for_asic_cluster_event(self.API_KEY, "DELETED")
# Verification
self._verify_watch(timeout_seconds=60)
self._verify_redis_store("set_offline")
def test_wait_for_asic_cluster_readiness(self) -> None:
"""Tests _wait_for_asic_cluster_readiness method behavior"""
# Test setup
mocked_object = unittest.mock.Mock()
mocked_object.status = unittest.mock.Mock()
mocked_object.status.phase = "Running"
self.mocked_watch.stream.return_value = [
{"object": mocked_object, "type": "MODIFIED"}
]
# Run test
self.manager._wait_for_asic_cluster_readiness(self.API_KEY)
# Verification
self._verify_watch()
def _verify_run_in_executor(
self, call_args_list: typing.List[unittest.mock._Call]
) -> None:
"""Verifies calls to the mocked_event_loop.run_in_executor mock.
Args:
funcs: List of mocked AsyncResult object that were passed to the
ASICWorkerManager._wait_for_thread method.
"""
self.assertEqual(
self.mocked_event_loop.run_in_executor.call_args_list,
call_args_list,
)
def _verify_redis_store(self, func: str) -> None:
"""Verifies calls to mocked_redis_store mock.
Args:
func: Expected called mocked method.
"""
getattr(self.mocked_redis_store, func).assert_called_once_with(
self.API_KEY
)
def _verify_watch(self, **kwargs) -> None:
"""Verifies calls to the mocked_watch mock."""
self.mocked_watch.assert_called_once_with()
self.mocked_watch.stream.assert_called_once_with(
func=self.mocked_corev1api.list_namespaced_asic_cluster,
label_selector=f"id={self.API_KEY_HASH}",
namespace="default",
**kwargs,
)
self.mocked_watch.stop.assert_called_once_with()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# Based on code:
# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gitlab_deploy_key
short_description: Manages GitLab project deploy keys.
description:
- Adds, updates and removes project deploy keys
version_added: "2.6"
author:
- Marcus Watkins (@marwatk)
- Guillaume Martinez (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- auth_basic
options:
api_token:
description:
- GitLab token for logging in.
version_added: "2.8"
type: str
aliases:
- private_token
- access_token
project:
description:
- Id or Full path of project in the form of group/name
required: true
type: str
title:
description:
- Deploy key's title
required: true
type: str
key:
description:
- Deploy key
required: true
type: str
can_push:
description:
- Whether this key can push to the project
type: bool
default: no
state:
description:
- When C(present) the deploy key added to the project if it doesn't exist.
- When C(absent) it will be removed from the project if it exists
required: true
default: present
type: str
choices: [ "present", "absent" ]
'''
EXAMPLES = '''
- name: "Adding a project deploy key"
gitlab_deploy_key:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
title: "Jenkins CI"
state: present
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
- name: "Update the above deploy key to add push access"
gitlab_deploy_key:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
title: "Jenkins CI"
state: present
can_push: yes
- name: "Remove the previous deploy key from the project"
gitlab_deploy_key:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
state: absent
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the GitLab API
returned: failed
type: str
sample: "400: key is already in use"
deploy_key:
description: API object
returned: always
type: dict
'''
import os
import re
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.gitlab import findProject
class GitLabDeployKey(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.deployKeyObject = None
'''
@param project Project object
@param key_title Title of the key
@param key_key String of the key
@param key_can_push Option of the deployKey
@param options Deploy key options
'''
def createOrUpdateDeployKey(self, project, key_title, key_key, options):
changed = False
# Because we have already call existsDeployKey in main()
if self.deployKeyObject is None:
deployKey = self.createDeployKey(project, {
'title': key_title,
'key': key_key,
'can_push': options['can_push']})
changed = True
else:
changed, deployKey = self.updateDeployKey(self.deployKeyObject, {
'can_push': options['can_push']})
self.deployKeyObject = deployKey
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
try:
deployKey.save()
except Exception as e:
self._module.fail_json(msg="Failed to update deploy key: %s " % e)
return True
else:
return False
'''
@param project Project Object
@param arguments Attributs of the deployKey
'''
def createDeployKey(self, project, arguments):
if self._module.check_mode:
return True
try:
deployKey = project.keys.create(arguments)
except (gitlab.exceptions.GitlabCreateError) as e:
self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
return deployKey
'''
@param deployKey Deploy Key Object
@param arguments Attributs of the deployKey
'''
def updateDeployKey(self, deployKey, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if getattr(deployKey, arg_key) != arguments[arg_key]:
setattr(deployKey, arg_key, arguments[arg_key])
changed = True
return (changed, deployKey)
'''
@param project Project object
@param key_title Title of the key
'''
def findDeployKey(self, project, key_title):
deployKeys = project.keys.list()
for deployKey in deployKeys:
if (deployKey.title == key_title):
return deployKey
'''
@param project Project object
@param key_title Title of the key
'''
def existsDeployKey(self, project, key_title):
# When project exists, object will be stored in self.projectObject.
deployKey = self.findDeployKey(project, key_title)
if deployKey:
self.deployKeyObject = deployKey
return True
return False
def deleteDeployKey(self):
if self._module.check_mode:
return True
return self.deployKeyObject.delete()
def deprecation_warning(module):
deprecated_aliases = ['private_token', 'access_token']
module.deprecate("Aliases \'{aliases}\' are deprecated".format(aliases='\', \''.join(deprecated_aliases)), "2.10")
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_token=dict(type='str', no_log=True, aliases=["private_token", "access_token"]),
state=dict(type='str', default="present", choices=["absent", "present"]),
project=dict(type='str', required=True),
key=dict(type='str', required=True),
can_push=dict(type='bool', default=False),
title=dict(type='str', required=True)
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_password', 'api_token']
],
required_together=[
['api_username', 'api_password']
],
required_one_of=[
['api_username', 'api_token']
],
supports_check_mode=True,
)
deprecation_warning(module)
gitlab_url = re.sub('/api.*', '', module.params['api_url'])
validate_certs = module.params['validate_certs']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
state = module.params['state']
project_identifier = module.params['project']
key_title = module.params['title']
key_keyfile = module.params['key']
key_can_push = module.params['can_push']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s. \
GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
project = findProject(gitlab_instance, project_identifier)
if project is None:
module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title)
if state == 'absent':
if deployKey_exists:
gitlab_deploy_key.deleteDeployKey()
module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
else:
module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
if state == 'present':
if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}):
module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
else:
module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9755
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
from lxml import etree
from py2neo import Graph
from py2neo.cypher import MergeNode, DatabaseError
from collections import OrderedDict
from tabulate import tabulate
def parse_children(root, vertices=None, edges=None, residual_edges=None):
"""
Conducts a depth-first search of an XML element tree, classifying
each element (representing a VUE object) as either a node or link.
This algorithm uses the conventional letters V and E to denote the
sets of graph 'vertices' and 'edges,' respectively. However, the
terms 'node' and 'link' are also used to refer to graph objects.
Neo4j also favors 'relationship' over 'edge' and 'link.'
Note: the tree represents the XML structure of the VUE graph, not
the graph itself - XML tags are used in VUE to denote both nodes and links.
:param root: XML element tree root (contains all graph data as 'child' tags)
:param vertices: initial dictionary of vertices
:param edges: initial dictionary of edges
:param skipped_elements: XML edge elements that raised a KeyError
:return: tuple G = (V, E) of graph data
"""
# Initializations
V = vertices or {}
E = edges or {}
E_res = residual_edges or {}
params = {'V': V, 'E': E, 'E_res': E_res,
'parent_ID': root.get('ID')}
parser = ElementParser()
# DFS + element classification
for element in root.findall('child'):
element_type = element.get(parser._xsi_type)
if element_type in ['node','link']:
V, E, E_res = parser.handler[element_type](parser,element,**params)
params.update({'V': V, 'E': E, 'E_res': E_res})
# Iterating through skipped edges
skipped_elements = E_res.copy()
while skipped_elements:
for id, element in skipped_elements.items():
V, E, E_res = parser.handle_as_link(element,**params)
if id in E.keys(): del E_res[id] # Endpoint lookup succeeded
params.update({'V': V, 'E': E, 'E_res': E_res})
skipped_elements = E_res.copy()
if root.tag == 'LW-MAP':
# Final result sorted by ID
V = OrderedDict(sorted(V.items()))
E = OrderedDict(sorted(E.items()))
return V, E
def MergeRelationship(args):
'''
Relationship equivalent of py2neo MergeNode function
:param args: tuple of string arguments
:return: Cypher query for merging a relationship
'''
return \
'MATCH (start:Node {VUE_ID: %s}),(end:Node {VUE_ID: %s}) \
MERGE (start)-[r:%s{%s}]->(end) \
RETURN start.VUE_ID, type(r), end.VUE_ID' % (*args,)
class ElementParser:
def __init__(self):
self._xsi_type = '{http://www.w3.org/2001/XMLSchema-instance}type'
def handle_as_node(self,element, **kwargs):
"""
Handles an element identified as a node, extracts relevant
data and inserts into current node dictionary. Makes a
recursive call to parse_children if any nested nodes are found.
:param element: element to handle
:param kwargs: sets V, E, E_res
:return: updated sets V, E, E_res
"""
V, E, E_res = (kwargs[key] for key in ['V','E','E_res'])
parent_ID = kwargs['parent_ID']
ID = int(element.get('ID'))
if element.find('resource') is not None:
# Handles attached images & URLs
rs = element.find('resource')
type_ = rs.get('type')
title = rs.findtext('title')
resource = dict(ID=ID, title=title, type=type_)
prop_tags = rs.findall('property')
for tag in prop_tags:
resource.__setitem__(*tag.attrib.values())
else: resource = None
if element.find('metadata-list') is not None:
# Handles node metadata
metadata = {'keywords': []}
md_tags = element.findall('md')
for tag in md_tags:
tag_is_keyword = tag.attrib['t'] == '1'
if tag_is_keyword:
value = tag.attrib['v']
metadata['keywords'].append(value)
else: # Unknown metadata tag
raise ValueError('Invalid tag attribute on MD: t="%s"' % tag.attrib['t'])
else: metadata = None
label = element.get('label',default='').replace('\n',' ')
properties = {
'VUE_ID': ID, 'type': 'Node', 'label': label,
'resource': str(resource), 'metadata': str(metadata),
'layer': element.get('layerID'),
'parent': parent_ID
}
V[ID] = {
'label': label,
'properties': properties
}
if element.find('child') is not None: # Handles nested nodes
V, E = parse_children(element, V, E, E_res)
return V, E, E_res
def handle_as_link(self, element, **kwargs):
"""
Handles an element identified as a link, extracts relevant
data and inserts into current link dictionary.
:param element: element to handle
:param kwargs: sets V, E, E_res
:return: updated sets V, E, E_res
"""
ID = int(element.get('ID'))
V = kwargs['V']
E, E_res = kwargs['E'], kwargs['E_res']
endpoints = self.link_endpoint_tags(element, V, E)
if endpoints:
endpoint_types = self.get_object_types(endpoints)
link_type = '{}-{}'.format(*endpoint_types)
arrow_state = int(element.get('arrowState'))
if arrow_state == 3:
directed = 'bidirectional'
else:
if arrow_state == 1: endpoints.reverse()
directed = 'undirected' if not arrow_state else 'directed'
E[ID] = {
'start_node': endpoints[0],
'label': element.get('label', default=''),
'end_node': endpoints[1],
'properties': {
'VUE_ID': ID, 'directed': directed,
'type': 'Link: ' + link_type
}
}
else:
# Edge is referencing downstream XML element
# Retry in next iteration
E_res[ID] = element
return V, E, E_res
def get_object_types(self,iterable):
""" Returns object property types """
return [obj['properties']['type'] for obj in iterable]
def link_endpoint_tags(self, element, V, E):
"""
Gets 2 child tags corresponding to a link's endpoints
:param element: parent element of endpoint tags
:param V: current set of nodes
:param E:current set of links
:return:
"""
endpoints = []
for i in [1,2]:
tag = element.find('ID' + str(i))
tag_type = tag.get(self._xsi_type)
tag_set = V if tag_type == 'node' else E
graph_element_ID = int(tag.text)
try:
endpoint = tag_set[graph_element_ID]
if endpoint is None:
raise ValueError('Endpoint is none')
endpoints.append(endpoint)
except KeyError:
return False
return endpoints
handler = {
'node': handle_as_node,
'link': handle_as_link
}
class VUE4j:
def __init__(self,file=None):
self.file = file
@property
def file(self):
return self._file
@file.setter
def file(self,path):
"""
Sets file path, raising a ValueError
if an invalid path is provided
:param path: absolute file path of .vue file
"""
self._file = path
if not path or path[-4:] != '.vue':
raise ValueError('Class VUE requires a .vue file to read')
else:
self.root = self.get_root()
self.V, self.E = parse_children(self.root)
def nodes(self,key='label',verbose=False):
"""
Returns specified data on graph nodes
:param key: key to request from each node
:param verbose: boolean specifying output format
:return: Graph node data (OrderedDict by node ID)
"""
if verbose:
node_properties = [element[key] for element in self.V.values()]
node_list = list(zip(self.V.keys(),node_properties))
headers = ('ID',key.upper())
self._nodes = tabulate(node_list,headers=headers)
else:
self._nodes = self.V
return self._nodes
def links(self, max_length=30, verbose=False):
"""
Returns a list of links/relationships/edges in the graph
:param max_length: string length to truncate at
:param verbose: boolean specifying output format
:return: Graph link data (OrderedDict by link ID)
"""
if verbose:
edge_list = []
for id, edge in self.E.items():
start, end = edge['start_node']['label'], edge['end_node']['label']
start = start[:max_length] + (start[max_length:] and '...')
end = end[:max_length] + (end[max_length:] and '...')
arrow_str = self.rel_arrow_str(edge)
record = (id,start,arrow_str,end) if verbose else (start, end)
edge_list.append(record)
self._links = tabulate(edge_list,headers=('Link ID','Node 1','Relationship','Node 2'))
else:
self._links = self.E
return self._links
def get_root(self):
"""
Removes non-essential tags from a given VUE (XML) file.
:return: Root of XML element tree
"""
with open(self.file) as f:
while True:
line = f.readline()
if not line:
break
elif not line.startswith('<LW-MAP'):
continue
else:
data = line + f.read()
return etree.fromstring(data)
def rel_arrow_str(self, link):
"""
Consructs a string representation of an arrow/link,
used in the verbose printing of an edge table
:param link: Link to get representation of
:return: string representation of link
"""
rel = link['label']
arrow_tag = '[{}]'.format(rel).replace('[]','')
directed = link['properties']['directed']
left_arrow = ' <' if directed == 'bidirectional' else ''
right_arrow = '> ' if directed != 'undirected' else ''
return '--'.join([left_arrow,arrow_tag,right_arrow])
def get_endpoints(self,link):
""" Returns the endpoint IDs of a link """
return (link[key+'_node']['properties']['VUE_ID'] for key in ['start','end'])
@property
def neo4j_compatible_links(self):
return self._compatible_links
@neo4j_compatible_links.setter
def neo4j_compatible_links(self,links):
"""
Filters out VUE links that have other links as endpoints,
as this feature is not currently supported by Neo4j
:param warn: If true, issues a warning when any incompatible links are found
:return: dict of compatible links by ID
"""
self._compatible_links = {
id: edge for (id,edge) in links.items()
if 'Link' not in edge['properties']['type'][6:]
}
diff = set(links.keys()) - set(self._compatible_links.keys())
if diff:
warning = 'Warning: file \'%s\' contains link types incompatible with Neo4j'
print(warning % self.file)
def to_neo4j(self):
"""
Merges a Neo4j database with graph data
obtained from parse_children algorithm
:return: Neo4j graph object, populated with nodes
and relationships from self.V, self.E
"""
G = Graph()
self.neo4j_compatible_links = self.E
# Node transaction
node_tx = G.cypher.begin()
for id, node in self.V.items():
statement = MergeNode('Node','VUE_ID',id).set(node['label'],**node['properties'])
node_tx.append(statement)
node_tx.commit()
# Link transaction
link_tx = G.cypher.begin()
for id, link in self.neo4j_compatible_links.items():
props = link['properties']
str_props = ', '.join(['{}: "{}"'.format(k,str(v)) for k,v in props.items()])
endpointIDs = self.get_endpoints(link)
label = link['label'] or props['directed']
args = (*endpointIDs,label,str_props)
statement = MergeRelationship(args)
link_tx.append(statement)
link_tx.commit()
# Done
return G
def confirm_transaction(self,graph):
"""
Tests whether the number of extracted data values in V, E
match the sizes of Neo4j nodes & relationships. Useful when
importing a VUE map into a new/empty Neo4j graph.
:param graph: Neo4j graph to check against
:return: True/False, confirming whether the
graph transaction was successful
"""
node_records = graph.cypher.execute('MATCH (n) return n.VUE_ID order by n.VUE_ID')
relationship_records = graph.cypher.execute('START r=rel(*) RETURN r')
v, n = len(self.V), len(node_records)
e, r = len(self.neo4j_compatible_links), len(relationship_records)
nodes_match = (v - n == 0)
links_match = (e - r == 0)
set_lengths = tuple(str(var) for var in [v,n,e,r])
assert nodes_match, 'Got unequal node sets: %s, %s' % set_lengths[:2]
assert links_match, 'Got unequal edge sets: %s, %s' % set_lengths[2:]
return nodes_match and links_match
| |
## @package translate
# Module caffe2.python.models.seq2seq.translate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
import argparse
from future.utils import viewitems
import logging
import numpy as np
from six import with_metaclass
import sys
from caffe2.python import core, rnn_cell, workspace
from caffe2.python.models.seq2seq.beam_search import BeamSearchForwardOnly
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
def _weighted_sum(model, values, weight, output_name):
values_weights = zip(values, [weight] * len(values))
values_weights_flattened = [x for v_w in values_weights for x in v_w]
return model.net.WeightedSum(
values_weights_flattened,
output_name,
)
class Seq2SeqModelCaffe2EnsembleDecoderBase(with_metaclass(ABCMeta, object)):
@abstractmethod
def get_model_file(self, model):
pass
@abstractmethod
def get_db_type(self):
pass
def build_word_rewards(self, vocab_size, word_reward, unk_reward):
word_rewards = np.full([vocab_size], word_reward, dtype=np.float32)
word_rewards[seq2seq_util.PAD_ID] = 0
word_rewards[seq2seq_util.GO_ID] = 0
word_rewards[seq2seq_util.EOS_ID] = 0
word_rewards[seq2seq_util.UNK_ID] = word_reward + unk_reward
return word_rewards
def load_models(self):
db_reader = 'reader'
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
params_for_current_model = [
param
for param in self.model.GetAllParams()
if str(param).startswith(scope_name)
]
assert workspace.RunOperatorOnce(core.CreateOperator(
'CreateDB',
[], [db_reader],
db=self.get_model_file(model),
db_type=self.get_db_type())
), 'Failed to create db {}'.format(self.get_model_file(model))
assert workspace.RunOperatorOnce(core.CreateOperator(
'Load',
[db_reader],
params_for_current_model,
load_all=1,
add_prefix=scope_name + '/',
strip_prefix='gpu_0/',
))
logger.info('Model {} is loaded from a checkpoint {}'.format(
scope_name, self.get_model_file(model)))
class Seq2SeqModelCaffe2EnsembleDecoder(Seq2SeqModelCaffe2EnsembleDecoderBase):
def get_model_file(self, model):
return model['model_file']
def get_db_type(self):
return 'minidb'
def scope(self, scope_name, blob_name):
return (
scope_name + '/' + blob_name
if scope_name is not None
else blob_name
)
def _build_decoder(
self,
model,
step_model,
model_params,
scope,
previous_tokens,
timestep,
fake_seq_lengths,
):
attention_type = model_params['attention']
assert attention_type in ['none', 'regular']
use_attention = (attention_type != 'none')
with core.NameScope(scope):
encoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.source_vocab_size,
embedding_size=model_params['encoder_embedding_size'],
name='encoder_embeddings',
freeze_embeddings=False,
)
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=model_params['encoder_type'],
num_decoder_layers=len(model_params['decoder_layer_configs']),
inputs=self.encoder_inputs,
input_lengths=self.encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=encoder_embeddings,
embedding_size=model_params['encoder_embedding_size'],
use_attention=use_attention,
num_gpus=0,
forward_only=True,
scope=scope,
)
with core.NameScope(scope):
if use_attention:
# [max_source_length, beam_size, encoder_output_dim]
encoder_outputs = model.net.Tile(
encoder_outputs,
'encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
if weighted_encoder_outputs is not None:
weighted_encoder_outputs = model.net.Tile(
weighted_encoder_outputs,
'weighted_encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
decoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.target_vocab_size,
embedding_size=model_params['decoder_embedding_size'],
name='decoder_embeddings',
freeze_embeddings=False,
)
embedded_tokens_t_prev = step_model.net.Gather(
[decoder_embeddings, previous_tokens],
'embedded_tokens_t_prev',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(model_params['decoder_layer_configs']):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = model_params['decoder_embedding_size']
else:
input_size = (
model_params['decoder_layer_configs'][i - 1]['num_units']
)
cell = rnn_cell.LSTMCell(
forward_only=True,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
decoder_cells.append(cell)
with core.NameScope(scope):
if final_encoder_hidden_states is not None:
for i in range(len(final_encoder_hidden_states)):
if final_encoder_hidden_states[i] is not None:
final_encoder_hidden_states[i] = model.net.Tile(
final_encoder_hidden_states[i],
'final_encoder_hidden_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
if final_encoder_cell_states is not None:
for i in range(len(final_encoder_cell_states)):
if final_encoder_cell_states[i] is not None:
final_encoder_cell_states[i] = model.net.Tile(
final_encoder_cell_states[i],
'final_encoder_cell_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
initial_states = \
seq2seq_util.build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=use_attention,
)
attention_decoder = seq2seq_util.LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=None,
vocab_size=self.target_vocab_size,
attention_type=attention_type,
embedding_size=model_params['decoder_embedding_size'],
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
states_prev = step_model.net.AddExternalInputs(*[
'{}/{}_prev'.format(scope, s)
for s in attention_decoder.get_state_names()
])
decoder_outputs, states = attention_decoder.apply(
model=step_model,
input_t=embedded_tokens_t_prev,
seq_lengths=fake_seq_lengths,
states=states_prev,
timestep=timestep,
)
state_configs = [
BeamSearchForwardOnly.StateConfig(
initial_value=initial_state,
state_prev_link=BeamSearchForwardOnly.LinkConfig(
blob=state_prev,
offset=0,
window=1,
),
state_link=BeamSearchForwardOnly.LinkConfig(
blob=state,
offset=1,
window=1,
),
)
for initial_state, state_prev, state in zip(
initial_states,
states_prev,
states,
)
]
with core.NameScope(scope):
decoder_outputs_flattened, _ = step_model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
output_logits = seq2seq_util.output_projection(
model=step_model,
decoder_outputs=decoder_outputs_flattened,
decoder_output_size=attention_decoder.get_output_dim(),
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=model_params['decoder_softmax_size'],
)
# [1, beam_size, target_vocab_size]
output_probs = step_model.net.Softmax(
output_logits,
'output_probs',
)
output_log_probs = step_model.net.Log(
output_probs,
'output_log_probs',
)
if use_attention:
attention_weights = attention_decoder.get_attention_weights()
else:
attention_weights = step_model.net.ConstantFill(
[self.encoder_inputs],
'zero_attention_weights_tmp_1',
value=0.0,
)
attention_weights = step_model.net.Transpose(
attention_weights,
'zero_attention_weights_tmp_2',
)
attention_weights = step_model.net.Tile(
attention_weights,
'zero_attention_weights_tmp',
tiles=self.beam_size,
axis=0,
)
return (
state_configs,
output_log_probs,
attention_weights,
)
def __init__(
self,
translate_params,
):
self.models = translate_params['ensemble_models']
decoding_params = translate_params['decoding_params']
self.beam_size = decoding_params['beam_size']
assert len(self.models) > 0
source_vocab = self.models[0]['source_vocab']
target_vocab = self.models[0]['target_vocab']
for model in self.models:
assert model['source_vocab'] == source_vocab
assert model['target_vocab'] == target_vocab
self.source_vocab_size = len(source_vocab)
self.target_vocab_size = len(target_vocab)
self.decoder_scope_names = [
'model{}'.format(i) for i in range(len(self.models))
]
self.model = Seq2SeqModelHelper(init_params=True)
self.encoder_inputs = self.model.net.AddExternalInput('encoder_inputs')
self.encoder_lengths = self.model.net.AddExternalInput(
'encoder_lengths'
)
self.max_output_seq_len = self.model.net.AddExternalInput(
'max_output_seq_len'
)
fake_seq_lengths = self.model.param_init_net.ConstantFill(
[],
'fake_seq_lengths',
shape=[self.beam_size],
value=100000,
dtype=core.DataType.INT32,
)
beam_decoder = BeamSearchForwardOnly(
beam_size=self.beam_size,
model=self.model,
go_token_id=seq2seq_util.GO_ID,
eos_token_id=seq2seq_util.EOS_ID,
)
step_model = beam_decoder.get_step_model()
state_configs = []
output_log_probs = []
attention_weights = []
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
(
state_configs_per_decoder,
output_log_probs_per_decoder,
attention_weights_per_decoder,
) = self._build_decoder(
model=self.model,
step_model=step_model,
model_params=model['model_params'],
scope=scope_name,
previous_tokens=beam_decoder.get_previous_tokens(),
timestep=beam_decoder.get_timestep(),
fake_seq_lengths=fake_seq_lengths,
)
state_configs.extend(state_configs_per_decoder)
output_log_probs.append(output_log_probs_per_decoder)
if attention_weights_per_decoder is not None:
attention_weights.append(attention_weights_per_decoder)
assert len(attention_weights) > 0
num_decoders_with_attention_blob = (
self.model.param_init_net.ConstantFill(
[],
'num_decoders_with_attention_blob',
value=1 / float(len(attention_weights)),
shape=[1],
)
)
# [beam_size, encoder_length, 1]
attention_weights_average = _weighted_sum(
model=step_model,
values=attention_weights,
weight=num_decoders_with_attention_blob,
output_name='attention_weights_average',
)
num_decoders_blob = self.model.param_init_net.ConstantFill(
[],
'num_decoders_blob',
value=1 / float(len(output_log_probs)),
shape=[1],
)
# [beam_size, target_vocab_size]
output_log_probs_average = _weighted_sum(
model=step_model,
values=output_log_probs,
weight=num_decoders_blob,
output_name='output_log_probs_average',
)
word_rewards = self.model.param_init_net.ConstantFill(
[],
'word_rewards',
shape=[self.target_vocab_size],
value=0.0,
dtype=core.DataType.FLOAT,
)
(
self.output_token_beam_list,
self.output_prev_index_beam_list,
self.output_score_beam_list,
self.output_attention_weights_beam_list,
) = beam_decoder.apply(
inputs=self.encoder_inputs,
length=self.max_output_seq_len,
log_probs=output_log_probs_average,
attentions=attention_weights_average,
state_configs=state_configs,
data_dependencies=[],
word_rewards=word_rewards,
)
workspace.RunNetOnce(self.model.param_init_net)
workspace.FeedBlob(
'word_rewards',
self.build_word_rewards(
vocab_size=self.target_vocab_size,
word_reward=translate_params['decoding_params']['word_reward'],
unk_reward=translate_params['decoding_params']['unk_reward'],
)
)
workspace.CreateNet(
self.model.net,
input_blobs=[
str(self.encoder_inputs),
str(self.encoder_lengths),
str(self.max_output_seq_len),
],
)
logger.info('Params created: ')
for param in self.model.params:
logger.info(param)
def decode(self, numberized_input, max_output_seq_len):
workspace.FeedBlob(
self.encoder_inputs,
np.array([
[token_id] for token_id in reversed(numberized_input)
]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.encoder_lengths,
np.array([len(numberized_input)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.max_output_seq_len,
np.array([max_output_seq_len]).astype(dtype=np.int64),
)
workspace.RunNet(self.model.net)
num_steps = max_output_seq_len
score_beam_list = workspace.FetchBlob(self.output_score_beam_list)
token_beam_list = (
workspace.FetchBlob(self.output_token_beam_list)
)
prev_index_beam_list = (
workspace.FetchBlob(self.output_prev_index_beam_list)
)
attention_weights_beam_list = (
workspace.FetchBlob(self.output_attention_weights_beam_list)
)
best_indices = (num_steps, 0)
for i in range(num_steps + 1):
for hyp_index in range(self.beam_size):
if (
(
token_beam_list[i][hyp_index][0] ==
seq2seq_util.EOS_ID or
i == num_steps
) and
(
score_beam_list[i][hyp_index][0] >
score_beam_list[best_indices[0]][best_indices[1]][0]
)
):
best_indices = (i, hyp_index)
i, hyp_index = best_indices
output = []
attention_weights_per_token = []
best_score = -score_beam_list[i][hyp_index][0]
while i > 0:
output.append(token_beam_list[i][hyp_index][0])
attention_weights_per_token.append(
attention_weights_beam_list[i][hyp_index]
)
hyp_index = prev_index_beam_list[i][hyp_index][0]
i -= 1
attention_weights_per_token = reversed(attention_weights_per_token)
# encoder_inputs are reversed, see get_batch func
attention_weights_per_token = [
list(reversed(attention_weights))[:len(numberized_input)]
for attention_weights in attention_weights_per_token
]
output = list(reversed(output))
return output, attention_weights_per_token, best_score
def run_seq2seq_beam_decoder(args, model_params, decoding_params):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
inversed_target_vocab = {v: k for (k, v) in viewitems(target_vocab)}
logger.info('Target vocab size {}'.format(len(target_vocab)))
decoder = Seq2SeqModelCaffe2EnsembleDecoder(
translate_params=dict(
ensemble_models=[dict(
source_vocab=source_vocab,
target_vocab=target_vocab,
model_params=model_params,
model_file=args.checkpoint,
)],
decoding_params=decoding_params,
),
)
decoder.load_models()
for line in sys.stdin:
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
line,
source_vocab,
)
translation, alignment, _ = decoder.decode(
numerized_source_sentence,
2 * len(numerized_source_sentence) + 5,
)
print(' '.join([inversed_target_vocab[tid] for tid in translation]))
def main():
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Translation',
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'in encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--beam-size', type=int, default=6,
help='Size of beam for the decoder')
parser.add_argument('--word-reward', type=float, default=0.0,
help='Reward per each word generated.')
parser.add_argument('--unk-reward', type=float, default=0.0,
help='Reward per each UNK token generated. '
'Typically should be negative.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint', required=True)
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_beam_decoder(
args,
model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
),
decoding_params=dict(
beam_size=args.beam_size,
word_reward=args.word_reward,
unk_reward=args.unk_reward,
),
)
if __name__ == '__main__':
main()
| |
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from past.builtins import basestring
import copy
import collections
import itertools
import numpy as np
import moldesign as mdt
from .. import units as u
from .. import utils, external, mathutils, widgets
from . import toplevel
class AtomGroup(object):
""" Mixin functions for objects that have a ``self.atoms`` attribute with a list of atoms
Attributes:
atoms (List[Atom]): a list of atoms
"""
draw2d = widgets.WidgetMethod('atomgroups.draw2d')
draw3d = widgets.WidgetMethod('atomgroups.draw3d')
draw = widgets.WidgetMethod('atomgroups.draw')
def __init__(self, *args, **kwargs):
""" This should never be called directly - it will be called by the `super` methods
of its subclasses """
super().__init__(*args, **kwargs)
self._atom_attrs = None
self.viz2d = None
self.viz3d = None
@property
def num_atoms(self):
""" int: number of atoms in this object """
return len(self.atoms)
natoms = num_atoms
@property
def heavy_atoms(self):
""" AtomList: a list of all heavy atoms (i.e., non-hydrogen) in this object """
return AtomList([a for a in self.atoms if a.atnum != 1])
@property
def mass(self):
""" u.Scalar[mass]: total mass of this object
"""
return u.unitsum(a.mass for a in self.atoms)
@property
def momentum(self):
""" u.Vector[momentum]: total momentum of this object
"""
return self.momenta.sum(axis=0)
@property
def velocity(self):
""" u.Vector[velocity]: center of mass velocity of this object
"""
return self.momentum/self.mass
@property
def kinetic_energy(self):
r""" u.Scalar[energy]: Classical kinetic energy :math:`\sum_{\text{atoms}} \frac{p^2}{2m}`
"""
from ..helpers import kinetic_energy
return kinetic_energy(self.momenta, self.masses)
def get_atoms(self, *keywords, **queries):
"""Allows keyword-based atom queries. Returns atoms that match ALL queries.
Args:
*keywords (list): pre-set keywords (currently, just selects by residue type)
**queries (dict): attributes (or residue attributes) to match
Examples:
>>> mol.get_atoms('protein') # returns all atoms in proteins
>>> mol.get_atoms(name='CA') # returns all alpha carbons
>>> mol.get_atoms('dna', symbol='P') # returns all phosphorus in DNA
Returns:
AtomList: the atoms matching this query
"""
if not (queries or keywords):
return mdt.AtomList(self.atoms)
atoms = self.atoms
KEYS = 'protein dna rna water unknown ion'.split()
for key in keywords:
if key in KEYS:
atoms = mdt.AtomList(atom for atom in atoms
if atom.residue.type == key)
else:
raise ValueError("Invalid keyword '%s': valid values are %s" % (key, KEYS))
result = mdt.AtomList()
for atom in atoms:
for field, val in queries.items():
if getattr(atom, field, None) != val and getattr(atom.residue, field, None) != val:
break
else:
result.append(atom)
return result
def calc_distance_array(self, other=None):
""" Calculate an array of pairwise distance between all atoms in self and other
Args:
other (AtomContainer): object to calculate distances to (default: self)
Returns:
u.Array[length]: 2D array of pairwise distances between the two objects
Example:
>>> dists = self.calc_distance_array(other)
>>> dists[i, j] == self.atoms[i].distance(other.atoms[j])
"""
from scipy.spatial.distance import cdist
other = utils.if_not_none(other, self)
try:
other_positions = other.positions.defunits_value()
except AttributeError:
other_positions = np.array([other.position.defunits_value()])
distances = cdist(self.positions.defunits_value(), other_positions)
return distances * u.default.length
def calc_displacements(self):
""" Calculate an array of displacements between all atoms in this object
Returns:
u.Array[length]: array of pairwise displacements between atoms
Example:
>>> displacements = self.calc_displacements(other)
>>> displacements[i, j] == (self.atoms[i].position - self.atoms[j].position)
"""
# TODO: allow other, similar to calc_distance array
return utils.pairwise_displacements(self.positions)
def distance(self, other):
"""Returns closest distance between this and the other entity
Args:
other (AtomContainer): object to calculate distance to
Returns:
u.Scalar[length]: closest distance between self and other
Example:
>>> distance = self.distance(other)
>>> distance == self.calc_distance_array(other).min()
"""
distance_array = self.calc_distance_array(other)
return distance_array.min()
@property
def center_of_mass(self):
""" units.Vector[length]: The (x,y,z) coordinates of this object's center of mass """
if self.num_atoms == 0: # nicer exception than divide-by-zero
raise ValueError('"%s" has no atoms' % str(self))
total_mass = 0.0 * u.default.mass
com = np.zeros(3) * u.default.length * u.default.mass
for atom in self.atoms:
total_mass += atom.mass
com += atom.position * atom.mass
com = com / total_mass
return com
@center_of_mass.setter
def center_of_mass(self, value):
vec = value - self.com
self.translate(vec)
com = center_of_mass # synonym
def _getatom(self, a):
""" Given an atom's name, index, or object, return the atom object
"""
if a is None:
return None
elif isinstance(a, basestring) or isinstance(a, int):
return self[a]
else:
return a
def angle(self, a1, a2, a3):
""" Calculate the angle between three atoms.
Atoms can be passed as the atoms themselves or as the atom names
Args:
a1, a2, a3 (str OR int OR moldesign.Atom): atoms defining the angle
Returns:
units.Scalar[angle]
"""
# TODO: use single dispatch to also accept two bonds
return mdt.geom.angle(*list(map(self._getatom, (a1, a2, a3))))
def dihedral(self, a1, a2, a3=None, a4=None):
""" Calculate the dihedral angle between atoms a1, a2, a3, a4.
Atoms can be passed as the atoms themselves or as the atom names
Args:
a1, a2, a3, a4 (str OR int OR moldesign.Atom): atoms defining the dihedral
Returns:
units.Scalar[angle]
"""
return mdt.geom.dihedral(*list(map(self._getatom, (a1, a2, a3, a4))))
def copy_atoms(self):
""" Copy a group of atoms along and relevant topological information.
This specifically copies:
- the atoms themselves (along with their positions, momenta, and bond graphs)
- any residues they are are part of
- any chains they are part of
- any bonds between them
It does NOT copy:
- other atoms that are part of these atoms' residues
- other residues that are part of these atoms chains
- the molecule these atoms are part of
Returns:
AtomList: list of copied atoms
"""
graph = {}
memo = {'bondgraph':graph}
for atom in self.atoms:
atom._subcopy(memo)
tempatoms = [memo[atom] for atom in self.atoms]
newatoms, newbonds = copy.deepcopy((tempatoms, graph))
for atom in newatoms:
atom.bond_graph = newbonds[atom]
return AtomList(newatoms)
###########################################
# Routines to modify the geometry
def rotate(self, angle, axis, center=None):
"""Rotate this object in 3D space
Args:
angle (u.Scalar[angle]): angle to rotate by
axis (u.Vector[length]): axis to rotate about (len=3)
center (u.Vector[length]): center of rotation (len=3) (default: origin)
"""
center = utils.if_not_none(center, self.com)
if hasattr(angle, 'units'): angle = angle.value_in(u.radians)
rotmat = external.transformations.rotation_matrix(angle, axis, point=center)
self.transform(rotmat)
def translate(self, vector):
"""Translate this object in 3D space
Args:
vector (u.Vector[length]): translation vector, len=3
"""
for atom in self.atoms:
atom.position += vector
def transform(self, matrix):
""" Transform this object's coordinates using the provided 4x4 matrix
Args:
matrix (numpy.ndarray): transformation matrix, shape=(4,4)
"""
# TODO: deal with units ... hard because the matrix has diff units for diff columns
assert matrix.shape == (4, 4)
self.positions = mathutils.apply_4x4_transform(matrix, self.positions)
def atoms_within(self, radius, other=None, include_self=False):
""" Return all atoms in an object within a given radius of this object
Args:
radius (u.Scalar[length]): radius to search for atoms
other (AtomContainer): object containing the atoms to search (default:self.molecule)
include_self (bool): if True, include the atoms from this object (since, by definition,
their distance from this object is 0)
Returns:
AtomList: list of the atoms within ``radius`` of this object
"""
if other is None:
other = self.atoms[0].molecule
if not include_self:
filter_atoms = set(self.atoms)
else:
filter_atoms = set()
distances = self.calc_distance_array(other=other)
mindists = distances.min(axis=0)
close_atoms = AtomList(atom for dist,atom in zip(mindists, other.atoms)
if dist <= radius and atom not in filter_atoms)
return close_atoms
def residues_within(self, radius, other=None, include_self=False):
""" Return all atoms in an object within a given radius of this object
Args:
radius (u.Scalar[length]): radius to search for atoms
other (AtomContainer): object containing the atoms to search (default:self.molecule)
include_self (bool): if True, include the atoms from this object (since, by definition,
their distance from this object is 0)
Returns:
AtomList: list of the atoms within ``radius`` of this object
"""
atoms = self.atoms_within(radius, other=other, include_self=include_self)
residues = collections.OrderedDict((atom.residue, None) for atom in atoms)
return list(residues.keys())
class _AtomArray(object):
def __init__(self, attrname):
self.attrname = attrname
def __get__(self, instance, owner):
return u.array([getattr(atom, self.attrname) for atom in instance.atoms])
def __set__(self, instance, value):
assert len(value) == instance.num_atoms
for atom, atomval in zip(instance.atoms, value):
setattr(atom, self.attrname, atomval)
class AtomContainer(AtomGroup):
"""
Mixin functions for NON-MOLECULE objects that have a list of atoms at``self.atoms``
"""
positions = _AtomArray('position')
masses = _AtomArray('mass')
momenta = _AtomArray('momentum')
velocities = _AtomArray('velocity')
def __add__(self, other):
l = mdt.AtomList(self.atoms)
l.extend(other.atoms)
return l
@property
def bond_graph(self):
""" Dict[moldesign.Atom: List[moldesign.Atom]]: bond graph for all atoms in this object
"""
return {atom: atom.bond_graph for atom in self.atoms}
@property
def bonds(self):
""" Iterable[moldesign.Bond]: iterator over bonds from this object's atoms
"""
bg = self.bond_graph
for atom, nbrs in bg.items():
for nbr, order in nbrs.items():
if atom.index < nbr.index or nbr not in bg:
yield mdt.Bond(atom, nbr)
def get_bond(self, a1, a2):
return mdt.Bond(a1, a2)
@property
def internal_bonds(self):
""" Iterable[moldesign.Bond]: iterator over bonds that connect two atoms in this object
"""
bg = self.bond_graph
for atom, nbrs in bg.items():
for nbr, order in nbrs.items():
if atom.index < nbr.index and nbr in bg:
yield mdt.Bond(atom, nbr)
@property
def external_bonds(self):
"""
Iterable[moldesign.Bond]: iterator over bonds that bond these atoms to other atoms
"""
bg = self.bond_graph
for atom, nbrs in bg.items():
for nbr, order in nbrs.items():
if nbr not in bg:
yield mdt.Bond(atom, nbr)
@property
def bonded_atoms(self):
""" List[moldesign.Atom]: list of external atoms this object is bonded to
"""
bg = self.bond_graph
atoms = []
for atom, nbrs in bg.items():
for nbr, order in nbrs.items():
if nbr not in bg:
atoms.append(nbr)
return atoms
def bonds_to(self, other):
""" Returns list of bonds between this object and another one
Args:
other (AtomContainer): other object
Returns:
List[moldesign.Bond]: bonds between this object and another
"""
bonds = []
otheratoms = set(other.atoms)
for bond in self.internal_bonds:
if bond.a1 in otheratoms or bond.a2 in otheratoms:
bonds.append(bond)
return bonds
@toplevel
class AtomList(AtomContainer, list): # order is important, list will override methods otherwise
""" A list of atoms with various helpful methods for creating and manipulating atom selections
Args:
atomlist (List[AtomContainer]): list of objects that are either atoms or contain a list of
atoms at ``atomlist.atoms``
"""
def __init__(self, atomlist=()):
atoms = []
for obj in atomlist:
if hasattr(obj, 'atoms'):
atoms.extend(obj.atoms)
else:
atoms.append(obj)
super().__init__(atoms)
def __getitem__(self, item):
result = super().__getitem__(item)
if isinstance(item, slice):
return type(self)(result)
else:
return result
def __getslice__(self, i, j):
result = super().__getslice__(i, j)
return type(self)(result)
def __str__(self):
return '[Atoms: %s]' % ', '.join(atom._shortstr() for atom in self)
def __repr__(self):
try:
return '<AtomList: [%s]>' % ', '.join(atom._shortstr() for atom in self)
except (KeyError, AttributeError):
return '<AtomList at %x (__repr__ failed)>' % id(self)
copy = AtomContainer.copy_atoms
def intersection(self, *otherlists):
""" Return a list of atoms that appear in all lists (including this one).
Args:
*otheriters (Iterable): one or more lists of atoms
Returns:
moldesign.AtomList: intersection of this lists with all passed lists. Preserves order
in this list
"""
s = set(self).intersection(*otherlists)
return type(self)(o for o in self if o in s)
def union(self, *otherlists):
""" Return a list of atoms that appear in any lists (including this one).
Args:
*otherlists (Iterable): one or more lists of atoms
Returns:
moldesign.AtomList: union of this list of atoms with all passed lists of atoms.
Equivalent to concatenating all lists then removing duplicates
"""
found = set()
newlist = type(self)()
for item in itertools.chain(self, *otherlists):
if item not in found:
found.add(item)
newlist.append(item)
return newlist
def unique(self):
""" Return only unique atoms from this list
Returns:
moldesign.AtomList: copy of this list without any duplicates. Preserves order.
"""
return self.union()
def __sub__(self, other):
otherset = set(other)
return type(self)(atom for atom in self if atom not in otherset)
# alias for self so that this works with AtomContainer methods
@property
def atoms(self):
return self
| |
#!/usr/bin/env python3
import os
import re
import json
import typing
import argparse
# region Global sets
# A set of category folder names in current sample viewer.
categories = {
'Maps',
'Layers',
'Features',
'Display information',
'Search',
'Edit data',
'Geometry',
'Route and directions',
'Analysis',
'Cloud and portal',
'Scenes',
'Utility network',
'Augmented reality'
}
# endregion
# region Static functions
def sub_special_char(string: str) -> str:
"""
Check and substitute if a string contains special characters.
:param string: The input string.
:return: A new string without special characters.
"""
# regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]')
regex = re.compile(r'[@_!#$%^&*<>?|/\\}{~:]')
return re.sub(regex, '', string)
def parse_head(head_string: str) -> (str, str):
"""
Parse the `Title` section of README file and get the title and description.
:param head_string: A string containing title, description and images.
:return: Stripped title and description strings.
"""
parts = list(filter(bool, head_string.splitlines()))
if len(parts) < 3:
raise Exception('README description parse failure!')
title = parts[0].lstrip('# ').rstrip()
description = parts[1].strip()
return title, description
def parse_apis(apis_string: str) -> typing.List[str]:
"""
Parse the `Relevant API` section and get a list of APIs.
:param apis_string: A string containing all APIs.
:return: A sorted list of stripped API names.
"""
apis = list(filter(bool, apis_string.splitlines()))
if not apis:
raise Exception('README Relevant API parse failure!')
return sorted([api.lstrip('*- ').rstrip() for api in apis])
def parse_tags(tags_string: str) -> typing.List[str]:
"""
Parse the `Tags` section and get a list of tags.
:param tags_string: A string containing all tags, with comma as delimiter.
:return: A sorted list of stripped tags.
"""
tags = tags_string.split(',')
if not tags:
raise Exception('README Tags parse failure!')
return sorted([tag.strip() for tag in tags])
def get_folder_name_from_path(path: str, index: int = -1) -> str:
"""
Get the folder name from a full path.
:param path: A string of a full/absolute path to a folder.
:param index: The index of path parts. Default to -1 to get the most
trailing folder in the path; set to certain index to get other parts.
:return: The folder name.
"""
return os.path.normpath(path).split(os.path.sep)[index]
# endregion
class MetadataCreator:
def __init__(self, folder_path: str):
"""
The standard format of metadata.json for iOS platform. Read more at:
/common-samples/wiki/README.metadata.json
"""
self.category = '' # Populate from path.
self.description = '' # Populate from README.
self.ignore = False # Default to False.
self.images = [] # Populate from paths.
self.keywords = [] # Populate from README.
self.redirect_from = [] # Default to empty list.
self.relevant_apis = [] # Populate from README.
self.snippets = [] # Populate from paths.
self.title = '' # Populate from README.
self.folder_path = folder_path
self.folder_name = get_folder_name_from_path(folder_path)
self.readme_path = os.path.join(folder_path, 'README.md')
self.json_path = os.path.join(folder_path, 'README.metadata.json')
def get_source_code_paths(self) -> typing.List[str]:
"""
Traverse the directory and get all filenames for source code.
:return: A list of swift source code filenames.
"""
results = []
for file in os.listdir(self.folder_path):
if os.path.splitext(file)[1] in ['.swift']:
results.append(file)
if not results:
raise Exception('Unable to get swift source code paths.')
return sorted(results)
def get_images_paths(self):
"""
Traverse the directory and get all filenames for images.
:return: A list of image filenames.
"""
results = []
for file in os.listdir(self.folder_path):
if os.path.splitext(file)[1].lower() in ['.png', '.gif']:
results.append(file)
if not results:
raise Exception('Unable to get images paths.')
return sorted(results)
def populate_from_readme(self) -> None:
"""
Read and parse the sections from README, and fill in the 'title',
'description', 'relevant_apis' and 'keywords' fields in the dictionary
for output json.
"""
try:
readme_file = open(self.readme_path, 'r')
# read the readme content into a string
readme_contents = readme_file.read()
except Exception as err:
print(f"Error reading README - {self.readme_path} - {err}.")
raise err
else:
readme_file.close()
# Use regex to split the README by exactly 2 pound marks, so that they
# are separated into paragraphs.
pattern = re.compile(r'^#{2}(?!#)\s(.*)', re.MULTILINE)
readme_parts = re.split(pattern, readme_contents)
try:
api_section_index = readme_parts.index('Relevant API') + 1
tags_section_index = readme_parts.index('Tags') + 1
self.title, self.description = parse_head(readme_parts[0])
self.relevant_apis = parse_apis(readme_parts[api_section_index])
keywords = parse_tags(readme_parts[tags_section_index])
# De-duplicate API names in README's Tags section.
self.keywords = [w for w in keywords if w not in self.relevant_apis]
# "It combines the Tags and the Relevant APIs in the README."
# See /runtime/common-samples/wiki/README.metadata.json#keywords
self.keywords += self.relevant_apis
# self.keywords.sort(key=str.casefold)
except Exception as err:
print(f'Error parsing README - {self.readme_path} - {err}.')
raise err
def populate_from_paths(self) -> None:
"""
Populate category name, source code and image filenames from a sample's
folder.
"""
self.category = get_folder_name_from_path(self.folder_path, -2)
try:
self.images = self.get_images_paths()
self.snippets = self.get_source_code_paths()
except Exception as err:
print(f"Error parsing paths - {self.folder_name} - {err}.")
raise err
def flush_to_json_string(self) -> str:
"""
Write the metadata to a json string.
"""
data = dict()
data["category"] = self.category
data["description"] = self.description
data["ignore"] = self.ignore
data["images"] = self.images
data["keywords"] = self.keywords
data["redirect_from"] = self.redirect_from
data["relevant_apis"] = self.relevant_apis
data["snippets"] = self.snippets
data["title"] = self.title
return json.dumps(data, indent=4, sort_keys=True)
def compare_one_metadata(folder_path: str):
"""
A handy helper function to create 1 sample's metadata by running the script
without passing in arguments, and write to a separate json for comparison.
The path may look like
'~/arcgis-runtime-samples-ios/arcgis-ios-sdk-samples/Maps/Display a map'
"""
single_updater = MetadataCreator(folder_path)
try:
single_updater.populate_from_readme()
single_updater.populate_from_paths()
except Exception as err:
print(f'Error populate failed for - {single_updater.folder_name}.')
raise err
json_path = os.path.join(folder_path, 'README.metadata.json')
try:
json_file = open(json_path, 'r')
json_data = json.load(json_file)
except Exception as err:
print(f'Error reading JSON - {folder_path} - {err}')
raise err
else:
json_file.close()
# The special rule not to compare the redirect_from.
single_updater.redirect_from = json_data['redirect_from']
# The special rule to be lenient on shortened description.
# If the original json has a shortened/special char purged description,
# then no need to raise an error.
if json_data['description'] in sub_special_char(single_updater.description):
single_updater.description = json_data['description']
# The special rule to ignore the order of src filenames.
# If the original json has all the filenames, then it is good.
if sorted(json_data['snippets']) == single_updater.snippets:
single_updater.snippets = json_data['snippets']
new = single_updater.flush_to_json_string()
original = json.dumps(json_data, indent=4, sort_keys=True)
if new != original:
raise Exception(f'Error inconsistent metadata - {folder_path}')
def all_samples(path: str):
"""
Run the check on all samples.
:param path: The path to 'arcgis-ios-sdk-samples' folder.
:return: None. Throws if exception occurs.
"""
exception_count = 0
for root, dirs, files in os.walk(path):
# Get parent folder name.
parent_folder_name = get_folder_name_from_path(root)
# If parent folder name is a valid category name.
if parent_folder_name in categories:
for dir_name in dirs:
sample_path = os.path.join(root, dir_name)
# Omit empty folders - they are omitted by Git.
if len([f for f in os.listdir(sample_path)
if not f.startswith('.DS_Store')]) == 0:
continue
try:
compare_one_metadata(sample_path)
except Exception as err:
exception_count += 1
print(f'{exception_count}. {err}')
# Throw once if there are exceptions.
if exception_count > 0:
raise Exception('Error(s) occurred during checking all samples.')
def main():
# Initialize parser.
msg = 'Check metadata style. Run it against the /arcgis-ios-sdk-samples ' \
'folder or a single sample folder. ' \
'On success: Script will exit with zero. ' \
'On failure: Title inconsistency will print to console and the ' \
'script will exit with non-zero code.'
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-a', '--all', help='path to arcgis-ios-sdk-samples '
'folder')
parser.add_argument('-s', '--single', help='path to a single sample')
args = parser.parse_args()
if args.all:
try:
all_samples(args.all)
except Exception as err:
raise err
elif args.single:
try:
compare_one_metadata(args.single)
except Exception as err:
raise err
else:
raise Exception('Invalid arguments, abort.')
if __name__ == '__main__':
try:
main()
except Exception as error:
print(f'{error}')
exit(1)
| |
#!/usr/bin/env
"""
GOA_Winds_NARR_6hr.py
Compare NARR Winds with NCEP V2 (with Mooring Winds) for 6hr intervals. Uses 3hr NARR and 6hr NCEP
Using Anaconda packaged Python
"""
#System Stack
import datetime
#Science Stack
import numpy as np
from scipy import stats
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from matplotlib.dates import MonthLocator, DateFormatter, DayLocator
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 03, 25)
__modified__ = datetime.datetime(2014, 03, 25)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR', 'NCEP V2', 'power law', 'user defined time comparison', 'Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_mf(infiles):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.mf_ncopen(infiles)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pytime = []
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
"""------------------------------- Stats/Math Modules --------------------------------------"""
def lin_fit(x, y):
""" rely's on scipy"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return ( slope, intercept, r_value, p_value, std_err )
def comp_corr( x, y):
"""
Complex Correlations
Parameters:
-----------
x: complex vector 1
y: complex vector 2
Outputs:
--------
complex correlation vector between x and y (orientation independent)
complex correlation angle (ccw rotation of y with respect to x)
Reference:
----------
Kundu, Pijush K., 1976: Ekman Veering Observed near the Ocean Bottom. J. Phys. Oceanogr., 6, 238-242
"""
x = x[0] + 1j* x[1]
y = y[0] + 1j* y[1]
# From equation 3.3
corr = np.inner(np.conjugate(x),y) \
/ (np.sqrt(np.inner(np.conjugate(x),x)) * np.sqrt(np.inner(np.conjugate(y),y)))
corr_mag = np.sqrt(corr.real**2 +corr.imag**2)
corr_angle = np.rad2deg(np.arctan2(corr.imag, corr.real))
"""
# From equation 3.6 and 3.7
# what is the innerproduct of <u1u2 + v1v2> ???
real_c = (x[0]*y[0] + x[1]*y[1]) / (np.sqrt(x[0]**2. + y[0]**2.) * np.sqrt(x[1]**2. + y[1]**2.))
imag_c = 1j * (x[0]*y[1] - x[1]*y[0]) / (np.sqrt(x[0]**2. + y[0]**2.) * np.sqrt(x[1]**2. + y[1]**2.))
corr_angle = np.arctan2((x[0]*y[1] - x[1]*y[0]), (x[0]*y[0] + x[1]*y[1]))
"""
return (corr_mag, corr_angle)
def wind_power_law(comp_orig, height_obs=3., height_interp=10., correction=False):
"""simple power law wind adjustment
default - 3m observations, 10m interpolated height"""
if correction:
wind_cor = comp_orig * (height_interp / height_obs)**(0.143)
else:
wind_cor = comp_orig
return wind_cor
def hourly_2_ave(ltbound,utbound, time, data, time_base=6.):
interval = time_base / 24.
tarray = np.arange(ltbound, utbound,interval)
dmean = np.zeros_like(tarray)
dstd = np.zeros_like(tarray)
for i, val in enumerate(tarray):
ind = (time >= val) & (time < val+interval)
dmean[i] = data[ind].mean()
dstd[i] = data[ind].std()
return ( {'mean':dmean ,'std':dstd, 'time':tarray} )
def cart2wind(cart_angle):
""" 0deg is North, rotate clockwise"""
cart_angle = 90. - cart_angle #rotate so N is 0deg
cart_angle =cart_angle % 360.
return cart_angle
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
"""---------------------------- Plotting Modules --------------------------------------"""
def quiver_timeseries(time,ucomp,vcomp,magnitude,data_source,station_name):
t_ind = ~(~np.isnan(magnitude) & (magnitude < 100))
ucomp[t_ind] = 0.
vcomp[t_ind] = 0.
magnitude[t_ind] = 0.
fig1, (ax1, ax2) = plt.subplots(2,1)
# Plot quiver
ax1.set_ylim(-magnitude.max(), magnitude.max())
fill1 = ax1.fill_between(time, magnitude, 0, color='k', alpha=0.1)
# Fake 'box' to be able to insert a legend for 'Magnitude'
p = ax1.add_patch(plt.Rectangle((1,1),1,1,fc='k',alpha=0.1))
leg1 = ax1.legend([p], ["Wind magnitude [m/s]"],loc='lower right')
leg1._drawFrame=False
# 1D Quiver plot
q = ax1.quiver(time,0,ucomp,vcomp,color='r',units='y',scale_units='y',
scale = 1,headlength=1,headaxislength=1,width=0.04,alpha=.95)
qk = plt.quiverkey(q,0.2, 0.05, 5,r'$5 \frac{m}{s}$',labelpos='W',
fontproperties={'weight': 'bold'})
# Plot u and v components
ax1.axes.get_xaxis().set_visible(False)
ax1.set_xlim(time.min(),time.max()+0.5)
ax1.set_ylabel("Velocity (m/s)")
ax2.plot(time, vcomp, 'b-')
ax2.plot(time, ucomp, 'g-')
ax2.set_xlim(time.min(),time.max()+0.5)
ax2.set_xlabel("Date (UTC)")
ax2.set_ylabel("Velocity (m/s)")
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_major_formatter(DateFormatter('%b %Y'))
ax2.xaxis.set_minor_locator(DayLocator())
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('both')
#fig1.autofmt_xdate()
# Set legend location - See: http://matplotlib.org/users/legend_guide.html#legend-location
leg2 = plt.legend(['v','u'],loc='upper left')
leg2._drawFrame=False
DefaultSize = fig1.get_size_inches()
fig1.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
fig1.suptitle("6hr ave Wind data for: " + data_source, fontsize=12)
# Save figure (without 'white' borders)
plt.savefig('images/'+ station_name.lower() +'_' + data_source + '_timeseries.png', bbox_inches='tight', dpi = (100))
plt.close(fig1)
"""---------------------------- Main Routine-------------------------------------------"""
"""------Ingest 1D Data--------"""
### NARR Data has the following boundary corners:
# Lambert Conformal
# 12.2N;133.5W, 54.5N; 152.9W, 57.3N; 49.4W ,14.3N;65.1W
year_long = '2001'
year_short = '01'
moor_sta_long = 'GLOBEC3'
moor_sta_short = 'gbm3'
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
NCEP = '/Users/bell/Data_Local/Reanalysis_Files/NCEPV2/6hourly/'
infile_narr = [NARR + 'uwnd.10m.'+year_long+'.nc', NARR + 'vwnd.10m.'+year_long+'.nc']
infile_ncep = [NCEP + 'uwnd.10m.gauss.'+year_long+'.nc', NCEP + 'vwnd.10m.gauss.'+year_long+'.nc']
### Grab grid points for future slicing - assume grid is same in all model output
narrlat_lon = latlon_grid(infile_narr[0])
nceplat_lon = latlon_grid(infile_ncep[0])
multifile=False
if multifile:
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'*_wpak.nc'
MooringMetData, Mooring_params = from_netcdf_mf(MooringFile)
else:
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/' + year_long + '/' + moor_sta_long.lower() + \
'/' + year_short + moor_sta_short +'a_wpak.nc'
MooringMetData, Mooring_params = from_netcdf(MooringFile)
MooringTime = date2pydate(MooringMetData['time'], MooringMetData['time2'], file_flag='EPIC')
sta_lat = MooringMetData['latitude'][0]
sta_long = MooringMetData['longitude'][0]
#-----> user set to force mooring location instead of using built in location (useful if you want
# to specify lat/lon for model comparison purposes
#sta_lat = 58.
#sta_long = 148.
#Find NCEP and NARR nearest point to mooring
narrpt = sphered.nearest_point([sta_lat,-1 * sta_long],narrlat_lon['lat'],narrlat_lon['lon'], '2d')
nceppt = sphered.nearest_point([sta_lat,-1 * sta_long],nceplat_lon['lat'],nceplat_lon['lon']-360., '1d') #grid shift too
#Read in NARR and NCEP data for location chosen
NARR_uwind = from_netcdf_1dsplice(infile_narr[0], None, narrpt[3], narrpt[4])
NARR_vwind = from_netcdf_1dsplice(infile_narr[1], None, narrpt[3], narrpt[4])
NARRTime = date2pydate(NARR_uwind['time'], file_flag='NARR')
NCEP_uwind = from_netcdf_1dsplice(infile_ncep[0], 0, nceppt[3], nceppt[4])
NCEP_vwind = from_netcdf_1dsplice(infile_ncep[1], 0, nceppt[3], nceppt[4])
NCEPTime = date2pydate(NCEP_uwind['time'], file_flag='NCEP')
### calculate 6hr averages for all datasets using NARR time base
time_bin = 6.
time_str = str(time_bin) + 'hr'
NARRDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NARRTime, NARR_uwind['uwnd'], time_base=time_bin)
NARRDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NARRTime, NARR_vwind['vwnd'], time_base=time_bin)
NCEPDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NCEPTime, NCEP_uwind['uwnd'], time_base=time_bin)
NCEPDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), NCEPTime, NCEP_vwind['vwnd'], time_base=time_bin)
MooringDaily_uwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), MooringTime, \
wind_power_law(MooringMetData['WU_422'], correction=True), time_base=time_bin)
MooringDaily_vwnd = hourly_2_ave(NARRTime.min(),NARRTime.max(), MooringTime, \
wind_power_law(MooringMetData['WV_423'], correction=True), time_base=time_bin)
"""---------------------------- Data Manipulation Routines-----------------------------"""
NARR_wind_mag = np.sqrt(NARRDaily_vwnd['mean']**2. + NARRDaily_uwnd['mean']**2.)
NARR_wind_dir_math = np.rad2deg(np.arctan2(NARRDaily_vwnd['mean'] , NARRDaily_uwnd['mean']))
NCEP_wind_mag = np.sqrt(NCEPDaily_vwnd['mean']**2. + NCEPDaily_uwnd['mean']**2.)
NCEP_wind_dir_math = np.rad2deg(np.arctan2(NCEPDaily_vwnd['mean'] , NCEPDaily_uwnd['mean']))
Mooring_wind_mag = np.sqrt(MooringDaily_uwnd['mean']**2. + MooringDaily_vwnd['mean']**2.)
Mooring_wind_dir_math = np.rad2deg(np.arctan2(MooringDaily_vwnd['mean'] , MooringDaily_uwnd['mean']))
# mask when mooring wasn't available
t_ind = ~np.isnan(Mooring_wind_mag) & (Mooring_wind_mag < 100)
### Calculate +-flow and x-flow rotating along coast (~43 degrees bearing near ' + moor_sta_long.lower() + ' )
(NARRalong, NARRcross) = rotate_coord(137., NARR_wind_mag, NARR_wind_dir_math)
(NCEPalong, NCEPcross) = rotate_coord(137., NCEP_wind_mag, NCEP_wind_dir_math)
(MOORalong, MOORcross) = rotate_coord(137., Mooring_wind_mag, Mooring_wind_dir_math)
"""---------------------------- Plotting Routines--------------------------------------"""
### standard wind / time plots
# NARR
quiver_timeseries(NARRDaily_uwnd['time'],NARRDaily_uwnd['mean'],NARRDaily_vwnd['mean'],NARR_wind_mag,'NARR', moor_sta_long.lower() )
quiver_timeseries(NCEPDaily_uwnd['time'],NCEPDaily_uwnd['mean'],NCEPDaily_vwnd['mean'],NCEP_wind_mag,'NCEP', moor_sta_long.lower() )
quiver_timeseries(MooringDaily_uwnd['time'],MooringDaily_uwnd['mean'],MooringDaily_vwnd['mean'],Mooring_wind_mag,'' + moor_sta_long.lower() + '', moor_sta_long.lower() )
""" Most relevant plots below... along/across shore coorelations"""
### Along/Cross Shore comparisons Mooring vs NARR/NCEP
# for entire year (mark mooring specific times)
fig = plt.figure(6)
#text locations
right = 0.05
top = .95
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORalong[t_ind], NARRalong[t_ind])
print "Regression stats for Along Shore Mooring v NARR are: %s %s %s %s %s" % (slope, intercept, r_value, p_value, std_err)
(coor_mag, coor_angle) = comp_corr((MOORcross[t_ind],MOORalong[t_ind]),(NARRcross[t_ind],NARRalong[t_ind]))
print "NARR Complex correlation mag - %s and dir - %s" % (coor_mag, coor_angle)
ax = plt.subplot(221)
p1 = ax.plot(MOORalong[t_ind], NARRalong[t_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' Along-shore Flow (m/s)')
ax.set_ylabel(time_str + ' NARR Along-shore Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORalong[t_ind], NCEPalong[t_ind])
print "Regression stats for Along Shore Mooring v NCEP are: %s %s %s %s %s" % (slope, intercept, r_value, p_value, std_err)
(coor_mag, coor_angle) = comp_corr((MOORcross[t_ind],MOORalong[t_ind]),(NCEPcross[t_ind],NCEPalong[t_ind]))
print "NCEP Complex correlation mag - %s and dir - %s" % (coor_mag, coor_angle)
ax = plt.subplot(223)
p1 = ax.plot(MOORalong[t_ind], NCEPalong[t_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_yticks(np.arange(-15,16,5))
ax.set_xticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' Along-shore Flow (m/s)')
ax.set_ylabel(time_str + ' NCEP Along-shore Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORcross[t_ind], NARRcross[t_ind])
print "Regression stats for Across Shore Mooring v NARR are: %s %s %s %s %s" % (slope, intercept, r_value, p_value, std_err)
ax = plt.subplot(222)
p1 = ax.plot(MOORcross[t_ind], NARRcross[t_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' Across-shore Flow (m/s)')
ax.set_ylabel(time_str + ' NARR Across-shore Flow (m/s)')
(slope, intercept, r_value, p_value, std_err) = lin_fit(MOORcross[t_ind], NCEPcross[t_ind])
print "Regression stats for Across Shore Mooring v NCEP are: %s %s %s %s %s" % (slope, intercept, r_value, p_value, std_err)
ax = plt.subplot(224)
p1 = ax.plot(MOORcross[t_ind], NCEPcross[t_ind])
plt.setp(p1,color='r', marker='+', linestyle='')
p2 = ax.plot(np.arange(-15,16,5),np.arange(-15,16,5))
plt.setp(p2,'color','k','linestyle','--')
p3 = ax.plot(np.arange(-15,16,5),(slope * np.arange(-15,16,5) + intercept) )
plt.setp(p3,'color','k','linestyle','-.')
ax.text(right, top, r"${r^2}$: %0.2f" % (r_value**2.),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes, size=10)
ax.set_xticks(np.arange(-15,16,5))
ax.set_yticks(np.arange(-15,16,5))
ax.set_xlim((-15,15))
ax.set_ylim((-15,15))
ax.set_xlabel(time_str + ' ' + moor_sta_long.lower() + ' Across-shore Flow (m/s)')
ax.set_ylabel(time_str + ' NCEP Across-shore Flow (m/s)')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]*1.5) )
plt.savefig('images/' + moor_sta_long.lower() + '_alongacross_comp.png', bbox_inches='tight', dpi = (100))
plt.close()
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget.utils.template import get_template
from future.moves.urllib.parse import urlparse, parse_qsl
import os
import re
import logging
from collections import defaultdict
from datetime import datetime
import jsonschema
from jsonschema.compat import str_types, int_types
from flexget.event import fire_event
from flexget.utils import qualities, template
from flexget.utils.tools import parse_timedelta, parse_episode_identifier
schema_paths = {}
log = logging.getLogger('config_schema')
# TODO: Rethink how config key and schema registration work
def register_schema(path, schema):
"""
Register `schema` to be available at `path` for $refs
:param path: Path to make schema available
:param schema: The schema, or function which returns the schema
"""
schema_paths[path] = schema
# Validator that handles root structure of config.
_root_config_schema = None
def register_config_key(key, schema, required=False):
""" Registers a valid root level key for the config.
:param string key:
Name of the root level key being registered.
:param dict schema:
Schema for the key.
:param bool required:
Specify whether this is a mandatory key.
"""
_root_config_schema['properties'][key] = schema
if required:
_root_config_schema.setdefault('required', []).append(key)
register_schema('/schema/config/%s' % key, schema)
def get_schema():
global _root_config_schema
if _root_config_schema is None:
_root_config_schema = {'type': 'object', 'properties': {}, 'additionalProperties': False}
fire_event('config.register')
# TODO: Is /schema/root this the best place for this?
register_schema('/schema/config', _root_config_schema)
return _root_config_schema
def one_or_more(schema, unique_items=False):
"""
Helper function to construct a schema that validates items matching `schema` or an array
containing items matching `schema`.
"""
schema.setdefault('title', 'single value')
return {
'oneOf': [
{'title': 'multiple values', 'type': 'array', 'items': schema, 'minItems': 1, 'uniqueItems': unique_items},
schema
]
}
def resolve_ref(uri):
"""
Finds and returns a schema pointed to by `uri` that has been registered in the register_schema function.
"""
parsed = urlparse(uri)
if parsed.path in schema_paths:
schema = schema_paths[parsed.path]
if callable(schema):
return schema(**dict(parse_qsl(parsed.query)))
return schema
raise jsonschema.RefResolutionError("%s could not be resolved" % uri)
def process_config(config, schema=None, set_defaults=True):
"""
Validates the config, and sets defaults within it if `set_defaults` is set.
If schema is not given, uses the root config schema.
:returns: A list with :class:`jsonschema.ValidationError`s if any
"""
if schema is None:
schema = get_schema()
resolver = RefResolver.from_schema(schema)
validator = SchemaValidator(schema, resolver=resolver, format_checker=format_checker)
if set_defaults:
validator.VALIDATORS['properties'] = validate_properties_w_defaults
try:
errors = list(validator.iter_errors(config))
finally:
validator.VALIDATORS['properties'] = jsonschema.Draft4Validator.VALIDATORS['properties']
# Customize the error messages
for e in errors:
set_error_message(e)
e.json_pointer = '/' + '/'.join(map(str, e.path))
return errors
def parse_time(time_string):
"""Parse a time string from the config into a :class:`datetime.time` object."""
formats = ['%I:%M %p', '%H:%M', '%H:%M:%S']
for f in formats:
try:
return datetime.strptime(time_string, f).time()
except ValueError:
continue
raise ValueError('invalid time `%s`' % time_string)
def parse_interval(interval_string):
"""Takes an interval string from the config and turns it into a :class:`datetime.timedelta` object."""
regexp = r'^\d+ (second|minute|hour|day|week)s?$'
if not re.match(regexp, interval_string):
raise ValueError("should be in format 'x (seconds|minutes|hours|days|weeks)'")
return parse_timedelta(interval_string)
def parse_percent(percent_input):
"""Takes a size string from the config and turns it into int(bytes)."""
percent_input = percent_input.rstrip('%')
try:
return float(percent_input)
except ValueError:
raise ValueError("should be in format '0-x%'")
def parse_size(size_input):
"""Takes a size string from the config and turns it into int(bytes)."""
prefixes = [None, 'K', 'M', 'G', 'T', 'P']
try:
# Bytes
return int(size_input)
except ValueError:
size_input = size_input.upper().rstrip('IB')
value, unit = float(size_input[:-1]), size_input[-1:]
if unit not in prefixes:
raise ValueError("should be in format '0-x (KiB, MiB, GiB, TiB, PiB)'")
return int(1024 ** prefixes.index(unit) * value)
# Public API end here, the rest should not be used outside this module
class RefResolver(jsonschema.RefResolver):
def __init__(self, *args, **kwargs):
kwargs.setdefault('handlers', {'': resolve_ref})
super(RefResolver, self).__init__(*args, **kwargs)
format_checker = jsonschema.FormatChecker(('email',))
@format_checker.checks('quality', raises=ValueError)
def is_quality(instance):
if not isinstance(instance, str_types):
return True
return qualities.get(instance)
@format_checker.checks('quality_requirements', raises=ValueError)
def is_quality_req(instance):
if not isinstance(instance, str_types):
return True
return qualities.Requirements(instance)
@format_checker.checks('time', raises=ValueError)
def is_time(time_string):
if not isinstance(time_string, str_types):
return True
return parse_time(time_string) is not None
@format_checker.checks('interval', raises=ValueError)
def is_interval(interval_string):
if not isinstance(interval_string, str_types):
return True
return parse_interval(interval_string) is not None
@format_checker.checks('size', raises=ValueError)
def is_size(size_string):
if not isinstance(size_string, (str_types, int_types)):
return True
return parse_size(size_string) is not None
@format_checker.checks('percent', raises=ValueError)
def is_percent(percent_string):
if not isinstance(percent_string, str_types):
return True
return parse_percent(percent_string) is not None
@format_checker.checks('regex', raises=ValueError)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
try:
return re.compile(instance)
except re.error as e:
raise ValueError('Error parsing regex: %s' % e)
@format_checker.checks('file', raises=ValueError)
def is_file(instance):
if not isinstance(instance, str_types):
return True
if os.path.isfile(os.path.expanduser(instance)):
return True
raise ValueError('`%s` does not exist' % instance)
@format_checker.checks('path', raises=ValueError)
def is_path(instance):
if not isinstance(instance, str_types):
return True
# Only validate the part of the path before the first identifier to be replaced
pat = re.compile(r'{[{%].*[}%]}')
result = pat.search(instance)
if result:
instance = os.path.dirname(instance[0:result.start()])
if os.path.isdir(os.path.expanduser(instance)):
return True
raise ValueError('`%s` does not exist' % instance)
# TODO: jsonschema has a format checker for uri if rfc3987 is installed, perhaps we should use that
@format_checker.checks('url')
def is_url(instance):
if not isinstance(instance, str_types):
return True
regexp = ('(' + '|'.join(['ftp', 'http', 'https', 'file', 'udp']) +
'):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?')
return re.match(regexp, instance)
@format_checker.checks('episode_identifier', raises=ValueError)
def is_episode_identifier(instance):
if not isinstance(instance, (str_types, int)):
return True
return parse_episode_identifier(instance) is not None
@format_checker.checks('file_template', raises=ValueError)
def is_valid_template(instance):
if not isinstance(instance, str_types):
return True
return get_template(instance) is not None
def set_error_message(error):
"""
Create user facing error message from a :class:`jsonschema.ValidationError` `error`
"""
# First, replace default error messages with our custom ones
if error.validator == 'type':
if isinstance(error.validator_value, str):
valid_types = [error.validator_value]
else:
valid_types = list(error.validator_value)
# Replace some types with more pythony ones
replace = {'object': 'dict', 'array': 'list'}
valid_types = [replace.get(t, t) for t in valid_types]
# Make valid_types into an english list, with commas and 'or'
valid_types = ', '.join(valid_types[:-2] + ['']) + ' or '.join(valid_types[-2:])
if isinstance(error.instance, dict):
error.message = 'Got a dict, expected: %s' % valid_types
if isinstance(error.instance, list):
error.message = 'Got a list, expected: %s' % valid_types
error.message = 'Got `%s`, expected: %s' % (error.instance, valid_types)
elif error.validator == 'format':
if error.cause:
error.message = str(error.cause)
elif error.validator == 'enum':
error.message = 'Must be one of the following: %s' % ', '.join(map(str, error.validator_value))
elif error.validator == 'additionalProperties':
if error.validator_value is False:
extras = set(jsonschema._utils.find_additional_properties(error.instance, error.schema))
if len(extras) == 1:
error.message = 'The key `%s` is not valid here.' % extras.pop()
else:
error.message = 'The keys %s are not valid here.' % ', '.join('`%s`' % e for e in extras)
else:
# Remove u'' string representation from jsonschema error messages
error.message = re.sub('u\'(.*?)\'', '`\\1`', error.message)
# Then update with any custom error message supplied from the schema
custom_error = error.schema.get('error_%s' % error.validator, error.schema.get('error'))
if custom_error:
error.message = template.render(custom_error, error.__dict__)
def select_child_errors(validator, errors):
"""
Looks through subschema errors, if any subschema is determined to be the intended one,
(based on 'type' keyword errors,) errors from its branch will be released instead of the parent error.
"""
for error in errors:
if not error.context:
yield error
continue
# Split the suberrors up by which subschema they are from
subschema_errors = defaultdict(list)
for sube in error.context:
subschema_errors[sube.schema_path[0]].append(sube)
# Find the subschemas that did not have a 'type' error validating the instance at this path
no_type_errors = dict(subschema_errors)
valid_types = set()
for i, errors in subschema_errors.items():
for e in errors:
if e.validator == 'type' and not e.path:
# Remove from the no_type_errors dict
no_type_errors.pop(i, None)
# Add the valid types to the list of all valid types
if validator.is_type(e.validator_value, 'string'):
valid_types.add(e.validator_value)
else:
valid_types.update(e.validator_value)
if not no_type_errors:
# If all of the branches had a 'type' error, create our own virtual type error with all possible types
for e in validator.descend(error.instance, {'type': valid_types}):
yield e
elif len(no_type_errors) == 1:
# If one of the possible schemas did not have a 'type' error, assume that is the intended one and issue
# all errors from that subschema
for e in list(no_type_errors.values())[0]:
e.schema_path.extendleft(reversed(error.schema_path))
e.path.extendleft(reversed(error.path))
yield e
else:
yield error
def validate_properties_w_defaults(validator, properties, instance, schema):
if not validator.is_type(instance, 'object'):
return
for key, subschema in properties.items():
if 'default' in subschema:
instance.setdefault(key, subschema['default'])
for error in jsonschema.Draft4Validator.VALIDATORS["properties"](validator, properties, instance, schema):
yield error
def validate_anyOf(validator, anyOf, instance, schema):
errors = jsonschema.Draft4Validator.VALIDATORS["anyOf"](validator, anyOf, instance, schema)
for e in select_child_errors(validator, errors):
yield e
def validate_oneOf(validator, oneOf, instance, schema):
errors = jsonschema.Draft4Validator.VALIDATORS["oneOf"](validator, oneOf, instance, schema)
for e in select_child_errors(validator, errors):
yield e
def validate_deprecated(validator, message, instance, schema):
"""Not really a validator, just warns if deprecated section of config is being used."""
log.warning(message)
validators = {
'anyOf': validate_anyOf,
'oneOf': validate_oneOf,
'deprecated': validate_deprecated
}
SchemaValidator = jsonschema.validators.extend(jsonschema.Draft4Validator, validators)
| |
#zns.py
#by <nick@kousu.ca> 2011.11.18
#
#unfinished code, but useful so I'm posting it
#Do whatever you want with it, but standard no
#warranty and you-must-credit-me terms apply.
#(and if you do anything interesting, I'd appreciate being told!)
"""
stuff to do with Z_n* = U(n), the group of elements coprime to n, mod n.
if you don't know what this is, see http://wikipedia.org/wiki/Multiplicative_group_of_integers_modulo_n
and a certain subset of those groups: those which are on a product of primes U(2*3*5*...*p)
This code is based on an observation about prime sieves:
If you have crossed out everything that is a multiple of the first k-1 primes,
then when crossing out the multiples of the next prime you can skip everything that,
e.g. when doing p=5 (k=3), you skip 10, 15, 20, 30, 40, ... because 10=2*5, 15=3*5, 20=2*10, 30=3*10, 40=2*20, ... so they've already been done
more generally, say you are crossing off p. in the basic prime sieve you go:
p, 2p, 3p, 4p, ..., pp, (p+1)p, (p+2)p, (p+3)p, .....
but you needn't do 2p, 3p, 4p....(p-1)p, because by the theorem about factor checking (you only have to do up to the sqrt)
all multiples of 2, 3, 4, ... p-1 have already been marked (if a number has factors at least one is less than the sqrt of it, so if any number less than p**2 has a factor its factor will be less than p, all of which have already been sieved out by design)
so the list is just
1p, pp, (p+1)p, (p+2)p, (p+3)p, ...
(I'm leaving in the 1p for a reason that only becomes clear as I explain what's going on)
but! there's more! some of the (p+i)s will have already been done as well. For example, a first refinement might be to skip every other one:
1p, pp, (p+2)p, (p+4)p, (p+6)p, ... since the 2s have already been marked off ((lemma: in this case, p>2 so p is an odd prime so p+1, p+3, p+5, ... are even (and can be erased)))
in fact, we should also be able to skip the 3s, 5s, 7s, ....
but what is the system here?
Let's look at instead of what we're crossing out, what we are crossing in, e.g. for p=5 again we only do:
5x1, 5x5, 5x7, 5x11, 5x13, 5x17, 5x19, 5x23, 5x25, 5x29, 5x31, 5x35, 5x37, 5x41, 5x43, 5x47, 5x49
your first guess might be "look! primes!" which is tantalizing, and it was mine too, but
some aren't, e.g. 5**3, 5**2*7,5*7**2 (there is another pattern here, which is that the only new ones are ones of primes larger than, but that doesn't help much in sieving since we don't know those primes 'yet' (or maybe we do, up to a point))
But take another look, there's an easy two-beat pattern going on: 1+4=5, 5+2=7. 7+4=11, 11+2=13, .... 31+4=35, 35+2=37
So here is what is actually going on: these numbers are members (or, technically, 'representatives') of
the unit group U(2*3) = {[1],[5]}, which is all numbers in Z_6 (that is, the integers mod 6) that are *coprime* (i.e. are not a multiple of) 2 or 3
In mod 6 (i.e. mod 2*3)
[1] = {...,-11,-5,1, 7,13,19,25,31,37,43,49,...} and
[5] = {..., -7,-1,5,11,17,23,29,35,41,47,51,...}
and these (well the positive side anyway) are exactly the multiples of 5 that will have been unmarked once we get to 5.
the reason for this is that there is a clock-tick problem (which should translate immediately for you into least-common-multiples, if it doesn't email me and I'll try to point you on the right path :D)
going on here. we tick off all the 2s, then all the 3s, then all the 5s, ... but at the product of all of these they meet up like planets aligning..and then trottle off again and swing around, until the next time they all meet up which is exactly the next multiple up. Further, the ones they miss in the first cycle (which is what U(2*3*...*[k-1th prime]) is defined as) will be the ones they miss in the second cycle, but shifted by the lcm of the primes.
So that is where the line "lcm = pfac" comes from. And some other stuff. Does any of this make sense?
To try this out you will need a source of primes (this is my test code, not my prime sieve code),
which is this file: http://kousu.ca/software/100000primes.pickle
"""
import time
def units(N):
"compute U(N) directly"
if N == 1: return [1] #special case bug patch. don't use!
return [i for i in range(N+1) if coprime(N, i)]
def coprime(a,b):
"return whether a and b are coprime"
"two numbers are coprime if they have no common divisor (except 1, since 1 is a common divisor for everything"
#there's gotta be a more efficient way to do this
return gcd(a,b)==1
def gcd(a,b):
"compute the greatest common divisor of a and b"
#BUG: gcd(1,0) = 1
if a>b:
a,b = b,a
if a==0: return 0
while a>0:
if a>b:
a,b = b,a
a,b = b-a, a
#print(a,b)
return b
import db
primes = db.primes
def pfac(k):
"compute the product of the first k primes"
#this should probably be memoized, but meh
if k <= 0: #product of nothing is 1
return 1
return primes[k-1] * pfac(k-1) #primes[k-1] because the 0th prime is the 1st prime
lcm = pfac
def ephi(k):
"compute euler's phi function on pfac(k)."
"If this code is right, ephi(k) == len(kunits(k))"
if k == 0:
return 1
return (primes[k-1]-1) * ephi(k-1) #primes[k-1] because the 0th prime is the 1st
kunits_memo = {}
def kunits(k):
"generate U(lcm(k))"
"notation: X_ means 'previous value of X'"
if k in kunits_memo: return kunits_memo[k]
#this should probably be memoized, but meh
if k == 0: #p=2 => U(2) = {[1]}
return [1]
p = primes[k-1]
U_ = kunits(k-1)
#print("p:", p, "U_:",U_)
#take the previous one and multiply it out until we win?
L = lcm(k)
#we need to get to L
#we have
L_ = lcm(k-1)
#so we need to multiply out
#actually we need to multiply out exactly primes[k] times
#okay, so now we need to filter intelligently
#it will only
#print("(%d) U_:" % p, U_, L_)
Uish = [(u_ + k*L_) for k in range(p) for u_ in U_] #reasoning: multiply U_ out (exactly p times!) to get all elements between 0 and 2*3*...*p_*p that are coprime to 2*3*...*p_
#then erase those elements which are exactly multiples of p and no other
#(which for some reason are generated by U_ as well?????_
#whores = [(u_,k) for k in range(p) for u_ in U_ if (u_+k*L_) % p == 0]
#print("(%d) whores:" % p, whores)
#ps = [x for x in Uish if x%p==0]
#there is very much structure here...
#oh dear, a bad case: p=2 has lcm=2. in all other cases p!=lcm but here it does so it FUCKS UP SHIT GOOD
#bad = [(u_*p % L_, u_*p // L_) for u_ in U_] #...where did this come from? why does this coincide?
#print("(%d) bad:" % p, bad)
#indexes = [1]
#t0 = time.time()
#for u_ in U_:
# Uish.remove(p*u_)
#t_rem = time.time() - t0
#print("took %0.9fs for getting rid of %d %ds" % ((t_rem), len(U_), p))
#instead of doing that shit..
D = weirdfunction(k-1) #XXX totally inefficient #definitely k-1
U = []
#pp(D) #D should be the same as whores and bad
#take the list of things to cross, which are p*u for u in U_ (last U-nit group)
#t0a = time.time()
U = []
#for base in range(0, p*L_, L_):
# Ux = U_[:] #clone
# for bad in D.get(base // L_, []):
# Ux.remove(bad) #<-- quadratic!!!!!
# U.extend(u+base for u in Ux)
U = [u+level*L_ for level in range(p) for u in sorted(set(U_)-set(D.get(level,[])))]
#hmm, the set-tiness sometimes misorders things.. so we need to sorted()
#t_cons = time.time() - t0a
#print("Took %0.9fs to construct U" % (t_cons))
#print("ratio:", t_rem/t_cons, t_rem > t_cons)
#for level in range(p):
# print("\tU_:", U_)
# #print("\tD[%d]:" % level, D[level])
# S = sorted(list(set(U_)-set(D.get(level,[]))))
# print("\t", level, S)
# U += [u+level*L_ for u in S]
# print()
#print("U:", U)
#print("Uish:", Uish)
#assert sorted(U) == sorted(Uish)
#segment U(2*3*5*7) into sections mod 11
#then these sections correspond to thingsies:
#if thing is a mod 11, then it is a mod
#U = []
#print(indexes)
#for j in range(len(indexes)):
# U += Uish[indexes[j]-1:indexes[j+1]]
#U = Uish
#print("-"*50)
if k-1 in kunits_memo: del kunits_memo[k-1] #HACK: delete the previous memoization to save memory
kunits_memo[k] = U
return U
def weirdfunction(k):
"split the unit group in a weird way that seems to have applications to sorting out"
if k == 0: #patch. in case for the first thing, lcm=1 which makes divmod behave badly. It writes 2=2*1 + 0 when I want it to write 2=1*1 + 1
return {1: [1]}
#(1,1) corresponds to 1*(1+1) = 2*1 + 0 ~= (2,0)
#U = units(lcm(k))
U = kunits(k)
D = {}
for u in U:
q,r = divmod(primes[k]*u, lcm(k))
if q not in D:
D[q] = []
D[q].append(r)
return D
def pp(D):
for d in D:
print(d, "=>", D[d])
def ppl(l,n=200):
"format a list, truncated"
"takes prints the first 3/4s of the result from the front, the last quarter from the back"
if len(l) < n:
return str(l)
else:
front = l[:n*3//4]
back = l[len(l)-(n*1//4):]
#
join = lambda l: str.join(", ", map(str,l))
dots = int(100/5*(1 - n/len(l))) #one dot for every 5 percent of elements skipped
#we skip
#(len(l) - n) / len(l)
#or, 1-n/len(l)
return "[%s, %s, %s]" % (join(front), "."*dots, join(back))
def ppfac(k):
"'pretty' p-fac of k. give a string that is the multiplication done to get lcm(k)"
if k == 0: return "1"
return str.join("*", map(str, primes[:k]))
if __name__ == '__main__':
#for k in range(7):
# print(lcm(k), ephi(k))
for k in range(11):
l = lcm(k)
input("[%d] Ready to compute U(%d)]" % (k, l))
t0 = time.time()
a = kunits(k)
print("took %0.4fs to kunits" % (time.time() - t0))
b = units(l)
print(" "*2 + "Computed unit group by a) kunits(), b) brute force search:")
print(" "*4, "U(%s) =" % ppfac(k), ppl(a))
print(" "*4, "U(%s) =" % ppfac(k), ppl(b))
print(" "*4, "Lengths:", len(a), len(b))
print("Do the two match? %s" % ["**NO**", "yes"][a == b])
print() #print a newline
| |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import json
import logging
from collections import defaultdict
from types import NoneType
from google.appengine.api import users
from google.appengine.ext import deferred, ndb
from babel.numbers import get_currency_name
from framework.consts import get_base_url, DAY
from framework.utils import now, azzert, try_or_defer
from mcfw.exceptions import HttpNotFoundException, HttpBadRequestException
from mcfw.properties import object_factory
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.api import messaging
from plugins.rogerthat_api.exceptions import BusinessException
from plugins.rogerthat_api.to import UserDetailsTO, MemberTO
from plugins.rogerthat_api.to.messaging import Message, AttachmentTO
from plugins.rogerthat_api.to.messaging.flow import FLOW_STEP_MAPPING, FormFlowStepTO
from plugins.rogerthat_api.to.messaging.forms import SignTO, SignFormTO, FormResultTO, FormTO, SignWidgetResultTO
from plugins.rogerthat_api.to.messaging.service_callback_results import FlowMemberResultCallbackResultTO, \
FlowCallbackResultTypeTO, TYPE_FLOW
from plugins.tff_backend.bizz import get_tf_token_api_key, intercom_helpers, get_mazraa_api_key
from plugins.tff_backend.bizz.agreements import get_bank_account_info
from plugins.tff_backend.bizz.authentication import RogerthatRoles
from plugins.tff_backend.bizz.email import send_emails_to_support
from plugins.tff_backend.bizz.gcs import upload_to_gcs
from plugins.tff_backend.bizz.global_stats import get_global_stats
from plugins.tff_backend.bizz.intercom_helpers import IntercomTags
from plugins.tff_backend.bizz.iyo.utils import get_username
from plugins.tff_backend.bizz.kyc import save_utility_bill
from plugins.tff_backend.bizz.kyc.onfido_bizz import get_applicant
from plugins.tff_backend.bizz.kyc.rogerthat_callbacks import kyc_part_1
from plugins.tff_backend.bizz.messages import send_message_and_email
from plugins.tff_backend.bizz.rogerthat import create_error_message, send_rogerthat_flow
from plugins.tff_backend.bizz.service import get_main_branding_hash, add_user_to_role
from plugins.tff_backend.bizz.todo import update_investor_progress
from plugins.tff_backend.bizz.todo.investor import InvestorSteps
from plugins.tff_backend.bizz.user import user_code, get_tff_profile
from plugins.tff_backend.consts.kyc import country_choices
from plugins.tff_backend.consts.payment import TOKEN_TFT, TOKEN_ITFT
from plugins.tff_backend.dal.investment_agreements import get_investment_agreement
from plugins.tff_backend.models.global_stats import GlobalStats
from plugins.tff_backend.models.investor import InvestmentAgreement, PaymentInfo
from plugins.tff_backend.models.user import KYCStatus, TffProfile
from plugins.tff_backend.plugin_consts import KEY_ALGORITHM, KEY_NAME, \
SUPPORTED_CRYPTO_CURRENCIES, CRYPTO_CURRENCY_NAMES, BUY_TOKENS_FLOW_V3, BUY_TOKENS_FLOW_V3_PAUSED, BUY_TOKENS_TAG, \
BUY_TOKENS_FLOW_V3_KYC_MENTION, FLOW_CONFIRM_INVESTMENT, FLOW_INVESTMENT_CONFIRMED, FLOW_SIGN_INVESTMENT, \
BUY_TOKENS_FLOW_V5, INVEST_FLOW_TAG, FLOW_HOSTER_REMINDER, SCHEDULED_QUEUE, FLOW_UTILITY_BILL_RECEIVED
from plugins.tff_backend.to.investor import InvestmentAgreementTO, CreateInvestmentAgreementTO
from plugins.tff_backend.utils import get_step_value, round_currency_amount, get_key_name_from_key_string, get_step
from plugins.tff_backend.utils.app import create_app_user_by_email, get_app_user_tuple, create_app_user
INVESTMENT_TODO_MAPPING = {
InvestmentAgreement.STATUS_CANCELED: None,
InvestmentAgreement.STATUS_CREATED: InvestorSteps.FLOW_AMOUNT,
InvestmentAgreement.STATUS_SIGNED: InvestorSteps.PAY,
InvestmentAgreement.STATUS_PAID: InvestorSteps.ASSIGN_TOKENS,
}
@returns(FlowMemberResultCallbackResultTO)
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def invest_tft(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params):
return invest(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params, TOKEN_TFT)
@returns(FlowMemberResultCallbackResultTO)
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def invest_itft(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params):
return invest(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params, TOKEN_ITFT)
@returns(FlowMemberResultCallbackResultTO)
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode, token=unicode)
def invest(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params, token):
if flush_id == 'flush_kyc' or flush_id == 'flush_corporation':
# KYC flow started from within the invest flow
return kyc_part_1(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag,
result_key, flush_id, flush_message_flow_id, service_identity, user_details, flow_params)
try:
email = user_details.email
app_id = user_details.app_id
app_user = create_app_user_by_email(email, app_id)
logging.info('User %s wants to invest', email)
version = get_key_name_from_key_string(steps[0].message_flow_id)
currency = get_step_value(steps, 'message_get_currency').replace('_cur', '')
if version.startswith(BUY_TOKENS_FLOW_V3) or version.startswith(BUY_TOKENS_FLOW_V5):
amount = float(get_step_value(steps, 'message_get_order_size_ITO').replace(',', '.'))
token_count_float = get_token_count(currency, amount)
else:
token_count_float = float(get_step_value(steps, 'message_get_order_size_ITO'))
amount = get_investment_amount(currency, token_count_float)
username = get_username(app_user)
agreement = _create_investment_agreement(amount, currency, token, token_count_float, username, version,
status=InvestmentAgreement.STATUS_CREATED)
payment_info = []
usd_within_uae_step = get_step(steps, 'message_usd_within_uae')
if usd_within_uae_step and usd_within_uae_step.answer_id == 'button_yes':
payment_info.append(PaymentInfo.UAE.value)
agreement.payment_info.extend(payment_info)
agreement.put()
if version == BUY_TOKENS_FLOW_V3_PAUSED:
return None
utility_bill_step = get_step(steps, 'message_utility_bill')
if utility_bill_step:
azzert(utility_bill_step.answer_id == FormTO.POSITIVE)
url = utility_bill_step.get_value()
deferred.defer(save_utility_bill, url, TffProfile.create_key(get_username(user_details)))
tag = {
'__rt__.tag': INVEST_FLOW_TAG,
'investment_id': agreement.id
}
flow_params = {
'token': agreement.token,
'amount': agreement.amount,
'currency': agreement.currency
}
result = FlowCallbackResultTypeTO(flow=FLOW_CONFIRM_INVESTMENT,
tag=json.dumps(tag).decode('utf-8'),
force_language=None,
flow_params=json.dumps(flow_params))
return FlowMemberResultCallbackResultTO(type=TYPE_FLOW, value=result)
except Exception as e:
logging.exception(e)
return create_error_message()
def _create_investment_agreement(amount, currency, token, token_count_float, username, version, **kwargs):
tff_profile = get_tff_profile(username)
applicant = get_applicant(tff_profile.kyc.applicant_id)
name = '%s %s ' % (applicant.first_name, applicant.last_name)
address = '%s %s' % (applicant.addresses[0].street, applicant.addresses[0].building_number)
address += '\n%s %s' % (applicant.addresses[0].postcode, applicant.addresses[0].town)
country = filter(lambda c: c['value'] == applicant.addresses[0].country, country_choices)[0]['label']
address += '\n%s' % country
precision = 2
reference = user_code(username)
agreement = InvestmentAgreement(creation_time=now(),
username=username,
token=token,
amount=amount,
token_count=long(token_count_float * pow(10, precision)),
token_precision=precision,
currency=currency,
name=name,
address=address,
version=version,
reference=reference,
**kwargs)
return agreement
@returns(InvestmentAgreement)
@arguments(agreement=CreateInvestmentAgreementTO)
def create_investment_agreement(agreement):
# type: (CreateInvestmentAgreementTO) -> InvestmentAgreement
tff_profile = get_tff_profile(agreement.username)
if tff_profile.kyc.status != KYCStatus.VERIFIED:
raise HttpBadRequestException('cannot_invest_not_kyc_verified')
token_count_float = get_token_count(agreement.currency, agreement.amount)
agreement_model = _create_investment_agreement(agreement.amount, agreement.currency, agreement.token,
token_count_float, tff_profile.username, 'manually_created',
status=agreement.status, paid_time=agreement.paid_time,
sign_time=agreement.sign_time)
prefix, doc_content_base64 = agreement.document.split(',')
content_type = prefix.split(';')[0].replace('data:', '')
doc_content = base64.b64decode(doc_content_base64)
agreement_model.put()
pdf_name = InvestmentAgreement.filename(agreement_model.id)
upload_to_gcs(pdf_name, doc_content, content_type)
return agreement_model
def get_currency_rate(currency):
global_stats = GlobalStats.create_key(TOKEN_TFT).get() # type: GlobalStats
if currency == 'USD':
return global_stats.value
currency_stats = filter(lambda c: c.currency == currency, global_stats.currencies) # type: list[CurrencyValue]
if not currency_stats:
raise BusinessException('No stats are set for currency %s', currency)
return currency_stats[0].value
def get_investment_amount(currency, token_count):
# type: (unicode, float) -> float
return round_currency_amount(currency, get_currency_rate(currency) * token_count)
def get_token_count(currency, amount):
# type: (unicode, float) -> float
return amount / get_currency_rate(currency)
@returns()
@arguments(email=unicode, tag=unicode, result_key=unicode, context=unicode, service_identity=unicode,
user_details=UserDetailsTO)
def start_invest(email, tag, result_key, context, service_identity, user_details):
# type: (unicode, unicode, unicode, unicode, unicode, UserDetailsTO) -> None
logging.info('Ignoring start_invest poke tag because this flow is not used atm')
return
flow = BUY_TOKENS_FLOW_V3_KYC_MENTION
logging.info('Starting invest flow %s for user %s', flow, user_details.email)
members = [MemberTO(member=user_details.email, app_id=user_details.app_id, alert_flags=0)]
flow_params = json.dumps({'currencies': _get_conversion_rates()})
messaging.start_local_flow(get_mazraa_api_key(), None, members, service_identity, tag=BUY_TOKENS_TAG,
context=context, flow=flow, flow_params=flow_params)
def _get_conversion_rates():
result = []
stats = get_global_stats(TOKEN_ITFT)
for currency in stats.currencies:
result.append({
'name': _get_currency_name(currency.currency),
'symbol': currency.currency,
'value': currency.value
})
return result
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def invest_complete(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag,
result_key, flush_id, flush_message_flow_id, service_identity, user_details, flow_params):
email = user_details.email
app_id = user_details.app_id
if 'confirm' in end_id:
agreement_key = InvestmentAgreement.create_key(json.loads(tag)['investment_id'])
try_or_defer(_invest, agreement_key, email, app_id, 0)
def _get_currency_name(currency):
if currency in SUPPORTED_CRYPTO_CURRENCIES:
return CRYPTO_CURRENCY_NAMES[currency]
return get_currency_name(currency, locale='en_GB')
def _set_token_count(agreement, token_count_float=None, precision=2):
# type: (InvestmentAgreement, float, int) -> None
stats = get_global_stats(agreement.token)
logging.info('Setting token count for agreement %s', agreement.to_dict())
if agreement.status == InvestmentAgreement.STATUS_CREATED:
if agreement.currency == 'USD':
agreement.token_count = long((agreement.amount / stats.value) * pow(10, precision))
else:
currency_stats = filter(lambda s: s.currency == agreement.currency, stats.currencies)[0]
if not currency_stats:
raise HttpBadRequestException('Could not find currency conversion for currency %s' % agreement.currency)
agreement.token_count = long((agreement.amount / currency_stats.value) * pow(10, precision))
# token_count can be overwritten when marking the investment as paid for BTC
elif agreement.status == InvestmentAgreement.STATUS_SIGNED:
if agreement.currency == 'BTC':
if not token_count_float:
raise HttpBadRequestException('token_count_float must be provided when setting token count for BTC')
# The course of BTC changes how much tokens are granted
if agreement.token_count:
logging.debug('Overwriting token_count for investment agreement %s from %s to %s',
agreement.id, agreement.token_count, token_count_float)
agreement.token_count = long(token_count_float * pow(10, precision))
agreement.token_precision = precision
def _invest(agreement_key, email, app_id):
# type: (ndb.Key, unicode, unicode) -> None
from plugins.tff_backend.bizz.agreements import create_token_agreement_pdf
app_user = create_app_user_by_email(email, app_id)
logging.debug('Creating Token agreement')
agreement = get_investment_agreement(agreement_key.id())
_set_token_count(agreement)
agreement.put()
currency_full = _get_currency_name(agreement.currency)
pdf_name = InvestmentAgreement.filename(agreement_key.id())
username = get_username(app_user)
has_verified_utility_bill = get_tff_profile(username).kyc.utility_bill_verified
pdf_contents = create_token_agreement_pdf(agreement.name, agreement.address, agreement.amount, currency_full,
agreement.currency, agreement.token, agreement.payment_info,
has_verified_utility_bill)
pdf_url = upload_to_gcs(pdf_name, pdf_contents, 'application/pdf')
logging.debug('Storing Investment Agreement in the datastore')
pdf_size = len(pdf_contents)
attachment_name = u'Purchase Agreement - Internal Token Offering %s' % agreement_key.id()
deferred.defer(_send_ito_agreement_sign_message, agreement_key, app_user, pdf_url, attachment_name, pdf_size)
deferred.defer(update_investor_progress, email, app_id, INVESTMENT_TODO_MAPPING[agreement.status])
def needs_utility_bill(agreement):
if agreement.currency in ('EUR', 'GBP') \
or (agreement.currency == 'USD' and PaymentInfo.UAE not in agreement.payment_info):
tff_profile = get_tff_profile(agreement.username)
# not uploaded -> must be someone without a passport -> doesn't need utility bill
if not tff_profile.kyc.utility_bill_url:
return False
return not tff_profile.kyc.utility_bill_verified
return False
def _send_utility_bill_received(app_user):
email, app_id = get_app_user_tuple(app_user)
members = [MemberTO(member=email.email(), app_id=app_id, alert_flags=0)]
messaging.start_local_flow(get_mazraa_api_key(), None, members, None, flow=FLOW_UTILITY_BILL_RECEIVED)
def _send_ito_agreement_sign_message(agreement_key, app_user, pdf_url, attachment_name, pdf_size):
logging.debug('Sending SIGN widget to app user')
form = SignFormTO(positive_button_ui_flags=Message.UI_FLAG_EXPECT_NEXT_WAIT_5,
widget=SignTO(algorithm=KEY_ALGORITHM,
key_name=KEY_NAME,
payload=base64.b64encode(pdf_url).decode('utf-8')))
attachment = AttachmentTO(content_type=u'application/pdf',
download_url=pdf_url,
name=attachment_name,
size=pdf_size)
tag = json.dumps({
u'__rt__.tag': u'sign_investment_agreement',
u'agreement_id': agreement_key.id()
}).decode('utf-8')
flow_params = json.dumps({
'form': form.to_dict(),
'attachments': [attachment.to_dict()]
})
email, app_id = get_app_user_tuple(app_user)
members = [MemberTO(member=email.email(), app_id=app_id, alert_flags=0)]
messaging.start_local_flow(get_mazraa_api_key(), None, members, None, tag=tag,
context=None, flow=FLOW_SIGN_INVESTMENT, flow_params=flow_params)
deferred.defer(_send_sign_investment_reminder, agreement_key.id(), u'long', _countdown=3600, _queue=SCHEDULED_QUEUE)
deferred.defer(_send_sign_investment_reminder, agreement_key.id(), u'short', _countdown=3 * DAY,
_queue=SCHEDULED_QUEUE)
deferred.defer(_send_sign_investment_reminder, agreement_key.id(), u'short', _countdown=10 * DAY,
_queue=SCHEDULED_QUEUE)
def _send_ito_agreement_to_admin(agreement_key, admin_app_user):
logging.debug('Sending SIGN widget to payment admin %s', admin_app_user)
agreement = agreement_key.get() # type: InvestmentAgreement
widget = SignTO()
widget.algorithm = KEY_ALGORITHM
widget.caption = u'Sign to mark this investment as paid.'
widget.key_name = KEY_NAME
widget.payload = base64.b64encode(str(agreement_key.id())).decode('utf-8')
form = SignFormTO()
form.negative_button = u'Abort'
form.negative_button_ui_flags = 0
form.positive_button = u'Accept'
form.positive_button_ui_flags = Message.UI_FLAG_EXPECT_NEXT_WAIT_5
form.type = SignTO.TYPE
form.widget = widget
member_user, app_id = get_app_user_tuple(admin_app_user)
message = u"""Enter your pin code to mark purchase agreement %(investment)s (reference %(reference)s as paid.
- from: %(user)s\n
- amount: %(amount)s %(currency)s
- %(token_count_float)s %(token_type)s tokens
""" % {'investment': agreement.id,
'user': agreement.username,
'amount': agreement.amount,
'currency': agreement.currency,
'token_count_float': agreement.token_count_float,
'token_type': agreement.token,
'reference': agreement.reference}
messaging.send_form(api_key=get_mazraa_api_key(),
parent_message_key=None,
member=member_user.email(),
message=message,
form=form,
flags=0,
alert_flags=Message.ALERT_FLAG_VIBRATE,
branding=get_main_branding_hash(),
tag=json.dumps({u'__rt__.tag': u'sign_investment_agreement_admin',
u'agreement_id': agreement_key.id()}).decode('utf-8'),
attachments=[],
app_id=app_id,
step_id=u'sign_investment_agreement_admin')
@returns(FlowMemberResultCallbackResultTO)
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def investment_agreement_signed(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key,
tag, result_key, flush_id, flush_message_flow_id, service_identity, user_details,
flow_params):
try:
user_detail = user_details
tag_dict = json.loads(tag)
agreement = InvestmentAgreement.create_key(tag_dict['agreement_id']).get() # type: InvestmentAgreement
last_step = steps[-1]
assert isinstance(last_step, FormFlowStepTO)
if last_step.answer_id != FormTO.POSITIVE:
logging.info('Investment agreement was canceled')
agreement.status = InvestmentAgreement.STATUS_CANCELED
agreement.cancel_time = now()
agreement.put()
return None
logging.info('Received signature for Investment Agreement')
sign_result = last_step.form_result.result.get_value()
assert isinstance(sign_result, SignWidgetResultTO)
iyo_username = get_username(user_detail)
logging.debug('Storing signature in DB')
agreement.populate(status=InvestmentAgreement.STATUS_SIGNED,
signature=sign_result.payload_signature,
sign_time=now())
agreement.put_async()
deferred.defer(add_user_to_role, user_detail, RogerthatRoles.INVESTOR)
intercom_tags = get_intercom_tags_for_investment(agreement)
if intercom_tags:
for i_tag in intercom_tags:
deferred.defer(intercom_helpers.tag_intercom_users, i_tag, [iyo_username])
deferred.defer(update_investor_progress, user_detail.email, user_detail.app_id,
INVESTMENT_TODO_MAPPING[agreement.status])
deferred.defer(_inform_support_of_new_investment, iyo_username, agreement.id, agreement.token_count_float)
app_user = create_app_user(users.User(user_detail.email), user_detail.app_id)
if needs_utility_bill(agreement):
logging.debug('Sending "utility bill received" message')
deferred.defer(_send_utility_bill_received, app_user)
else:
logging.debug('Sending confirmation message')
deferred.defer(send_payment_instructions, app_user, agreement.id, '')
deferred.defer(send_hoster_reminder, agreement.username, _countdown=1)
result = FlowCallbackResultTypeTO(flow=FLOW_INVESTMENT_CONFIRMED,
tag=None,
force_language=None,
flow_params=json.dumps({'reference': agreement.reference}))
return FlowMemberResultCallbackResultTO(type=TYPE_FLOW, value=result)
except:
logging.exception('An unexpected error occurred')
return create_error_message()
@returns(NoneType)
@arguments(status=int, form_result=FormResultTO, answer_id=unicode, member=unicode, message_key=unicode, tag=unicode,
received_timestamp=int, acked_timestamp=int, parent_message_key=unicode, result_key=unicode,
service_identity=unicode, user_details=UserDetailsTO)
def investment_agreement_signed_by_admin(status, form_result, answer_id, member, message_key, tag, received_timestamp,
acked_timestamp, parent_message_key, result_key, service_identity,
user_details):
tag_dict = json.loads(tag)
def trans():
agreement = InvestmentAgreement.create_key(tag_dict['agreement_id']).get() # type: InvestmentAgreement
if answer_id != FormTO.POSITIVE:
logging.info('Investment agreement sign aborted')
return
if agreement.status == InvestmentAgreement.STATUS_PAID:
logging.warn('Ignoring request to set InvestmentAgreement %s as paid because it is already paid',
agreement.id)
return
agreement.status = InvestmentAgreement.STATUS_PAID
agreement.paid_time = now()
agreement.put()
profile = get_tff_profile(agreement.username)
user_email, app_id, = get_app_user_tuple(profile.app_user)
deferred.defer(update_investor_progress, user_email.email(), app_id, INVESTMENT_TODO_MAPPING[agreement.status],
_transactional=True)
deferred.defer(_send_tokens_assigned_message, profile.app_user, _transactional=True)
ndb.transaction(trans)
@returns(InvestmentAgreement)
@arguments(agreement_id=(int, long), agreement=InvestmentAgreementTO, admin_app_user=users.User)
def put_investment_agreement(agreement_id, agreement, admin_app_user):
# type: (long, InvestmentAgreement, users.User) -> InvestmentAgreement
agreement_model = InvestmentAgreement.get_by_id(agreement_id) # type: InvestmentAgreement
if not agreement_model:
raise HttpNotFoundException('investment_agreement_not_found')
if agreement_model.status == InvestmentAgreement.STATUS_CANCELED:
raise HttpBadRequestException('order_canceled')
if agreement.status not in (InvestmentAgreement.STATUS_SIGNED, InvestmentAgreement.STATUS_CANCELED):
raise HttpBadRequestException('invalid_status')
# Only support updating the status for now
agreement_model.status = agreement.status
if agreement_model.status == InvestmentAgreement.STATUS_CANCELED:
agreement_model.cancel_time = now()
elif agreement_model.status == InvestmentAgreement.STATUS_SIGNED:
agreement_model.paid_time = now()
if agreement.currency == 'BTC':
_set_token_count(agreement_model, agreement.token_count_float)
deferred.defer(_send_ito_agreement_to_admin, agreement_model.key, admin_app_user)
agreement_model.put()
return agreement_model
def _inform_support_of_new_investment(iyo_username, agreement_id, token_count):
subject = "New purchase agreement signed"
body = """Hello,
We just received a new purchase agreement (%(agreement_id)s) from %(iyo_username)s for %(token_count_float)s tokens.
Please visit %(base_url)s/investment-agreements/%(agreement_id)s to find more details, and collect all the money!
""" % {"iyo_username": iyo_username,
"agreement_id": agreement_id,
'base_url': get_base_url(),
"token_count_float": token_count} # noQA
send_emails_to_support(subject, body)
def get_total_token_count(username, agreements):
total_token_count = defaultdict(lambda: 0)
for agreement in agreements:
total_token_count[agreement.token] += agreement.token_count_float
logging.debug('%s has the following tokens: %s', username, dict(total_token_count))
return total_token_count
def get_total_investment_value(username):
statuses = (InvestmentAgreement.STATUS_PAID, InvestmentAgreement.STATUS_SIGNED)
total_token_count = get_total_token_count(username, InvestmentAgreement.list_by_status_and_user(username, statuses))
tokens = total_token_count.keys()
stats = dict(zip(tokens, ndb.get_multi([GlobalStats.create_key(token) for token in tokens])))
total_usd = 0
for token, token_count in total_token_count.iteritems():
total_usd += token_count * stats[token].value
logging.debug('The tokens of %s are worth $%s', username, total_usd)
return total_usd
@returns()
@arguments(username=unicode)
def send_hoster_reminder(username):
# Temporarily disabled
return
if get_total_investment_value(username) >= 600:
send_rogerthat_flow(app_user, FLOW_HOSTER_REMINDER)
@returns()
@arguments(app_user=users.User, agreement_id=(int, long), message_prefix=unicode, reminder=bool)
def send_payment_instructions(app_user, agreement_id, message_prefix, reminder=False):
agreement = get_investment_agreement(agreement_id)
if reminder and agreement.status != InvestmentAgreement.STATUS_SIGNED:
return
elif not reminder:
deferred.defer(send_payment_instructions, app_user, agreement_id, message_prefix, True,
_countdown=14 * DAY, _queue=SCHEDULED_QUEUE)
username = get_username(app_user)
profile = get_tff_profile(username)
params = {
'currency': agreement.currency,
'reference': agreement.reference,
'message_prefix': message_prefix,
'bank_account': get_bank_account_info(agreement.currency, agreement.payment_info,
profile.kyc.utility_bill_verified),
}
if agreement.currency == 'BTC':
params['amount'] = '{:.8f}'.format(agreement.amount)
params['notes'] = u'Please inform us by email at payments@threefoldtoken.com when you have made payment.'
else:
params['amount'] = '{:.2f}'.format(agreement.amount)
params['notes'] = u'For the attention of ThreeFold FZC, a company incorporated under the laws of Sharjah, ' \
u'United Arab Emirates, with registered office at SAIF Zone, SAIF Desk Q1-07-038/B'
subject = u'ThreeFold payment instructions'
msg = u"""%(message_prefix)sHere are your payment instructions for the purchase of your ThreeFold Tokens.
Please use the following transfer details:
Amount: %(currency)s %(amount)s
%(bank_account)s
%(notes)s
Please use %(reference)s as reference.""" % params
send_message_and_email(app_user, msg, subject, get_mazraa_api_key())
def _send_tokens_assigned_message(app_user):
subject = u'ThreeFold tokens assigned'
message = 'Dear ThreeFold Member, we have just assigned your tokens to your wallet. ' \
'It may take up to an hour for them to appear in your wallet. ' \
'\n\nWe would like to take this opportunity to remind you to have a paper backup of your wallet. ' \
'You can make such a backup by writing down the 29 words you can use to restore the wallet. ' \
'\nYou can find these 29 words by going to Settings -> Security -> threefold. ' \
'\n\nThank you once again for getting on board!'
send_message_and_email(app_user, message, subject, get_mazraa_api_key())
@arguments(agreement=InvestmentAgreement)
def get_intercom_tags_for_investment(agreement):
if agreement.status not in [InvestmentAgreement.STATUS_PAID, InvestmentAgreement.STATUS_SIGNED]:
return []
if agreement.token == TOKEN_ITFT:
return [IntercomTags.ITFT_PURCHASER, IntercomTags.GREENITGLOBE_CONTRACT]
elif agreement.token == TOKEN_TFT:
# todo: In the future (PTO), change ITO_INVESTOR to IntercomTags.TFT_PURCHASER
return [IntercomTags.BETTERTOKEN_CONTRACT, IntercomTags.ITO_INVESTOR]
else:
logging.warn('Unknown token %s, not tagging intercom user %s', agreement.token, agreement.username)
return []
@returns()
@arguments(agreement_id=(int, long), message_type=unicode)
def _send_sign_investment_reminder(agreement_id, message_type):
agreement = get_investment_agreement(agreement_id)
if agreement.status != InvestmentAgreement.STATUS_CREATED:
return
if message_type == u'long':
message = 'Dear ThreeFold Member,\n\n' \
'Thank you for joining the ThreeFold Foundation! Your contract has been created and is ready to be signed and processed.\n' \
'You can find your created %s Purchase Agreement in your ThreeFold messages.' % agreement.token
elif message_type == u'short':
message = 'Dear ThreeFold Member,\n\n' \
'It appears that your created %s Purchase Agreement has not been signed yet.' % agreement.token
else:
return
subject = u'Your Purchase Agreement is ready to be signed'
app_user = get_tff_profile(agreement.username).app_user
send_message_and_email(app_user, message, subject, get_mazraa_api_key())
# Called after the user his utility bill was approved
def send_signed_investments_messages(app_user):
username = get_username(app_user)
agreements = InvestmentAgreement.list_by_status_and_user(username, InvestmentAgreement.STATUS_SIGNED)
for agreement in agreements:
deferred.defer(send_payment_instructions, app_user, agreement.id, '')
| |
# -*- coding: utf-8 -*-
"""
drift - API key rules
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implement api key rules for passing, redirecting or rejecting requests based
on product and version of the client.
"""
from __future__ import absolute_import
import logging
import re
from six.moves import http_client
from six.moves.urllib.parse import urlparse
from flask import request, g, jsonify, make_response, current_app
from driftconfig.relib import ConstraintError
log = logging.getLogger(__name__)
def drift_init_extension(app, **kwargs):
@app.before_request
def check_api_key_rules():
conf = current_app.extensions['driftconfig'].get_config()
rule = get_api_key_rule(request.headers, request.url, conf)
if not rule:
return
# Update response header in the "after request" callback function further below.
if 'response_header' in rule:
g.update_response_header_api_rule = rule['response_header']
if rule['status_code'] is not None:
log.info("Blocking request because of api key rule: %s", rule)
response = make_response(jsonify(rule['response_body']), rule['status_code'])
return response
@app.after_request
def after_request_apply_api_rules(response):
if hasattr(g, 'update_response_header_api_rule'):
for k, v in g.update_response_header_api_rule.items():
response.headers[k] = v
return response
def get_api_key_rule(request_headers, request_url, conf):
# Looks up and matches an api key rule to a given key and client version.
# Returns None if no rule or action is in effect, else a dict with the following
# optional entries that should be used in response to the http request:
# 'status_code', 'response_body' and 'response_header'.
# If 'status_code' is None, the request should be processed further.
# Apply pass-through rules for legacy keys
nginx_conf = conf.table_store.get_table('nginx').get(
{'tier_name': conf.tier['tier_name']})
if nginx_conf:
for rule in nginx_conf.get('api_key_passthrough', []):
key = request_headers.get(rule['key_name'])
if key and re.match(rule['key_value'], key):
log.info("Passing on request that contains legacy api key '%s'.", key)
return
# We don't require the key to exist in the request header as it is usually
# enforced by the api router. Some endpoints are actually "keyless". But if the
# key does exist in the request header, we apply the rules accordingly.
key = request_headers.get('Drift-Api-Key')
if not key:
return
if ':' in key:
key, version = key.rsplit(':', 1)
else:
version = None
def retval(status_code=None, message=None, description=None, rule=None):
status_code = status_code or http_client.FORBIDDEN
message = message or 'Forbidden'
description = description or message
response_body = {
"error": {
"code": "user_error",
"description": description
},
"message": message,
"status_code": status_code
}
return {
'status_code': status_code,
'response_header': {'Content-Type': 'application/json'},
'response_body': response_body,
'rule': rule,
'api_key': key,
'api_key_version': version,
}
# Fist look up the API key in our config.
try:
api_key = conf.table_store.get_table('api-keys').get({'api_key_name': key})
except ConstraintError:
return retval(description="API Key format '{}' not recognized.".format(key))
if not api_key:
return retval(description="API Key '{}' not found.".format(key))
# See if the API key is active or not.
if not api_key['in_use']:
return retval(description="API Key '{}' is disabled.".format(key))
# Product keys must point to a product.
if api_key['key_type'] == 'product' and 'product_name' not in api_key:
return retval(
description="API Key type is 'product', but has no reference to any product."
)
# If key is not associated with any product, there are no product rules to apply
# so we are done here.
if 'product_name' not in api_key:
return
# Match the API key to the product/tenant.
product, tenant = conf.product, conf.tenant
if not product or not tenant:
return retval(description="No product or tenant in context.")
if api_key['product_name'] != product['product_name']:
return retval(
description="API Key '{}' is for product '{}'"
" but current tenant '{}' is on product '{}'.".format(
key, api_key['product_name'], tenant['tenant_name'], product['product_name']))
# Now we apply the actual rules for this product.
rules = conf.table_store.get_table('api-key-rules').find(
{'product_name': product['product_name'], 'is_active': True})
rules = sorted(rules, key=lambda rule: rule['assignment_order'])
for rule in rules:
patterns = rule['version_patterns']
is_match = False
# If no pattern is specified, it effectively means "match always".
if not patterns:
is_match = True
# If the caller provided its version, we use it to match certain rules.
if version:
for pattern in patterns:
if version.startswith(pattern):
is_match = True
break
# If the rule did not match the client version, continue to next one.
if not is_match:
continue
ret = retval(rule=rule)
ret['response_header'].update(rule.get('response_header', {}))
if rule['rule_type'] == 'pass':
ret['status_code'] = None # Signal a pass on this request.
return ret
if rule['rule_type'] == 'reject':
if 'reject' in rule:
ret['response_body'] = rule['reject']['response_body']
ret['status_code'] = rule['reject']['status_code']
else:
ret['response_body'] = {'message': 'Forbidden'}
ret['status_code'] = http_client.FORBIDDEN
return ret
elif rule['rule_type'] == 'redirect':
urlparts = urlparse(request_url)
if '.' not in urlparts.hostname:
return retval(
status_code=400,
message='Bad Request',
description="Can't redirect to new tenant when hostname is dotless.",
rule=rule,
)
current_tenant, domain = urlparts.hostname.split('.', 1)
redirect = rule['redirect']
current_hostname = urlparts.hostname
if 'tenant_name' in redirect:
new_hostname = redirect['tenant_name'] + '.' + domain
elif 'host_name' in redirect:
new_hostname = redirect['host_name']
# See if the host already matches the redirection.
if current_hostname == new_hostname:
continue
url = request_url.replace(urlparts.hostname, new_hostname)
ret['status_code'] = 307 # Temporary redirect
ret['response_body'] = {'message': "Redirecting to '{}'.".format(new_hostname)}
ret['response_header']['Location'] = url
return ret
| |
import pkgutil
from collections import OrderedDict
from insights.core.dr import SkipComponent
__all__ = [n for (i, n, p) in pkgutil.iter_modules(__path__) if not p]
class ParseException(Exception):
"""
Exception that should be thrown from parsers that encounter
exceptions they recognize while parsing. When this exception
is thrown, the exception message and data are logged and no
parser output data is saved.
"""
pass
class SkipException(SkipComponent):
"""
Exception that should be thrown from parsers that are explicitly
written to look for errors in input data. If the expected error
is not found then the parser should throw this exception to
signal to the infrastructure that the parser's output should not be
retained.
"""
pass
def get_active_lines(lines, comment_char="#"):
"""
Returns lines, or parts of lines, from content that are not commented out
or completely empty. The resulting lines are all individually stripped.
This is useful for parsing many config files such as ifcfg.
Parameters:
lines (list): List of strings to parse.
comment_char (str): String indicating that all chars following
are part of a comment and will be removed from the output.
Returns:
list: List of valid lines remaining in the input.
Examples:
>>> lines = [
... 'First line',
... ' ',
... '# Comment line',
... 'Inline comment # comment',
... ' Whitespace ',
... 'Last line']
>>> get_active_lines(lines)
['First line', 'Inline comment', 'Whitespace', 'Last line']
"""
return filter(None, (line.split(comment_char, 1)[0].strip() for line in lines))
def optlist_to_dict(optlist, opt_sep=',', kv_sep='=', strip_quotes=False):
"""Parse an option list into a dictionary.
Takes a list of options separated by ``opt_sep`` and places them into
a dictionary with the default value of ``True``. If ``kv_sep`` option
is specified then key/value options ``key=value`` are parsed. Useful
for parsing options such as mount options in the format
``rw,ro,rsize=32168,xyz``.
Parameters:
optlist (str): String of options to parse.
opt_sep (str): Separater used to split options.
kv_sep (str): If not `None` then `optlist` includes key=value pairs
to be split, and this str is used to split them.
strip_quotes (bool): If set, will remove matching '"' and '"'
characters from start and end of line. No quotes are removed
from inside the string and mismatched quotes are not removed.
Returns:
dict: Returns a dictionary of names present in the list. If `kv_sep`
is not `None` then the values will be the str on the right-hand side
of `kv_sep`. If `kv_sep` is `None` then each key will have a default
value of `True`.
Examples:
>>> optlist = 'rw,ro,rsize=32168,xyz'
>>> optlist_to_dict(optlist)
{'rw': True, 'ro': True, 'rsize': '32168', 'xyz': True}
"""
def make_kv(opt):
if kv_sep is not None and kv_sep in opt:
k, v = opt.split(kv_sep, 1)
if strip_quotes and v[0] in ('"', "'") and v[-1] == v[0]:
return k, v[1:-1]
else:
return k, v
else:
return opt, True
return dict(make_kv(opt) for opt in optlist.split(opt_sep))
def split_kv_pairs(lines, comment_char="#", filter_string=None, split_on="=", use_partition=False, ordered=False):
"""Split lines of a list into key/value pairs
Use this function to filter and split all lines of a list of strings into
a dictionary. Named arguments may be used to control how the line is split,
how lines are filtered and the type of output returned. See parameters for
more information. When splitting key/value, the first occurence of the
split character is used, other occurrences of the split char in the line
will be ignored. ::func:`get_active_lines` is called to strip comments and
blank lines from the data.
Parameters:
lines (list of str): List of the strings to be split.
comment_char (str): Char that when present in the line indicates all
following chars are part of a comment. If this is present, all
comments and all blank lines are removed from list before further
processing. The default comment char is the `#` character.
filter_string (str): If the filter string is present, then only lines
containing the filter will be processed, other lines will be ignored.
split_on (str): Character to use when splitting a line. Only the first
occurence of the char is used when splitting, so only one split is
performed at the first occurrence of `split_on`. The default string is `=`.
use_partition (bool): If this parameter is `True` then the python `partition`
function will be used to split the line. If `False` then the pyton `split`
function will be used. The difference is that when `False`, if the split
character is not present in the line then the line is ignored and when
`True` the line will be parsed regardless. Set `use_partition` to `True`
if you have valid lines that do not contain the `split_on` character.
Set `use_partition` to `False` if you want to ignore lines that do not
contain the `split_on` character. The default value is `False`.
ordered (bool): If this parameter is `True` then the resulting dictionary
will be in the same order as in the original file, a python
`OrderedDict` type is used. If this parameter is `False` then the resulting
dictionary is in no particular order, a base python `dict` type is used.
The default is `False`.
Returns:
dict: Return value is a dictionary of the key/value pairs. If parameter
`keyword` is `True` then an OrderedDict is returned, otherwise a dict
is returned.
Examples:
>>> from .. import split_kv_pairs
>>> for line in lines:
... print line
# Comment line
# Blank lines will also be removed
keyword1 = value1 # Inline comments
keyword2 = value2a=True, value2b=100M
keyword3 # Key with no separator
>>> split_kv_pairs(lines)
{'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, comment_char='#')
{'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, filter_string='keyword2')
{'keyword2': 'value2a=True, value2b=100M'}
>>> split_kv_pairs(lines, use_partition=True)
{'keyword3': '', 'keyword2': 'value2a=True, value2b=100M', 'keyword1': 'value1'}
>>> split_kv_pairs(lines, use_partition=True, ordered=True)
OrderedDict([('keyword1', 'value1'), ('keyword2', 'value2a=True, value2b=100M'), ('keyword3', '')])
"""
_lines = lines if comment_char is None else get_active_lines(lines, comment_char=comment_char)
_lines = _lines if filter_string is None else [l for l in _lines if filter_string in l]
kv_pairs = OrderedDict() if ordered else {}
for line in _lines:
if not use_partition:
if split_on in line:
k, v = line.split(split_on, 1)
kv_pairs[k.strip()] = v.strip()
else:
k, _, v = line.partition(split_on)
kv_pairs[k.strip()] = v.strip()
return kv_pairs
def unsplit_lines(lines, cont_char='\\', keep_cont_char=False):
"""Recombine lines having a continuation character at end.
Generator that recombines lines in the list that have the char `cont_char`
at the end of a line. If `cont_char` is found in a line then then
next line will be appended to the current line, this will continue for
multiple continuation lines until the next line is found with no
continuation character at the end. All lines found will be combined and
returned.
If the `keep_cont_char` option is set to True, the continuation character
will be left on the end of the line. Otherwise, by default, it is removed.
Parameters:
lines (list): List of strings to be evaluated.
cont_char (char): Char to search for at end of line. Default is ``\\``.
keep_cont_char (bool): Whether to keep the continuation on the end of
the line. Defaults to False, which causes the continuation
character to be removed.
Yields:
line (str): Yields unsplit lines
Examples:
>>> lines = ['Line one \\', ' line one part 2', 'Line two']
>>> list(unsplit_lines(lines))
['Line one line one part 2', 'Line two']
>>> list(unsplit_lines(lines, cont_char='2'))
['Line one \\', ' line one part Line two']
>>> list(unsplit_lines(lines, keep_cont_char=True)
['Line one \ line one part 2', 'Line two']
"""
unsplit_lines = []
for line in lines:
line = line.rstrip()
if line.endswith(cont_char):
unsplit_lines.append(line if keep_cont_char else line[:-1])
else:
yield ''.join(unsplit_lines) + line
unsplit_lines = []
if unsplit_lines:
yield ''.join(unsplit_lines)
def calc_offset(lines, target, invert_search=False):
"""
Function to search for a line in a list starting with a target string.
If `target` is `None` or an empty string then `0` is returned. This
allows checking `target` here instead of having to check for an empty
target in the calling function. Each line is stripped of leading spaces
prior to comparison with each target however target is not stripped.
See `parse_fixed_table` in this module for sample usage.
Arguments:
lines (list): List of strings.
target (list): List of strings to search for at the beginning of any
line in lines.
invert_search (boolean): If `True` this flag causes the search to continue
until the first line is found not matching anything in target.
An empty line is implicitly included in target. Default is `False`.
This would typically be used if trimming trailing lines off of a
file by passing `reversed(lines)` as the `lines` argument.
Returns:
int: index into the `lines` indicating the location of `target`. If
`target` is `None` or an empty string `0` is returned as the offset.
If `invert_search` is `True` the index returned will point to the line
after the last target was found.
Raises:
ValueError: Exception is raised if `target` string is specified and it
was not found in the input lines.
Examples:
>>> lines = [
... '# ',
... 'Warning line',
... 'Error line',
... ' data 1 line',
... ' data 2 line']
>>> target = ['data']
>>> calc_offset(lines, target)
3
>>> target = ['#', 'Warning', 'Error']
>>> calc_offset(lines, target, invert_search=True)
3
"""
if target and target[0] is not None:
for offset, line in enumerate(l.strip() for l in lines):
found_any = any([line.startswith(t) for t in target])
if not invert_search and found_any:
return offset
elif invert_search and not(line == '' or found_any):
return offset
# If we get here then we didn't find any of the targets
raise ValueError("Line containing '{}' was not found in table".format(','.join(target)))
else:
# If no target then return index 0
return 0
def parse_fixed_table(table_lines,
heading_ignore=[],
header_substitute=[],
trailing_ignore=[]):
"""
Function to parse table data containing column headings in the first row and
data in fixed positions in each remaining row of table data.
Table columns must not contain spaces within the column name. Column headings
are assumed to be left justified and the column data width is the width of the
heading label plus all whitespace to the right of the label. This function
will handle blank columns.
Arguments:
table_lines (list): List of strings with the first line containing column
headings separated by spaces, and the remaining lines containing
table data in left justified format.
heading_ignore (list): Optional list of strings to search for at
beginning of line. All lines before this line will be ignored.
If specified then it must be present in the file or `ValueError` will
be raised.
header_substitute (list): Optional list of tuples containing
`(old_string_value, new_string_value)` to be used to modify header
values. If whitespace is present in a column it must be replaced with
non-whitespace characters in order for the table to be parsed correctly.
trailing_ignore (list): Optional list of strings to look for at the end
rows of the content. Lines starting with these strings will be ignored,
thereby truncating the rows of data.
Returns:
list: Returns a list of dict for each row of column data. Dict keys
are the column headings in the same case as input.
Raises:
ValueError: Raised if `heading_ignore` is specified and not found in `table_lines`.
Sample input::
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
Examples:
>>> table_data = parse_fixed_table(table_lines)
>>> table_data
[{'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'},
{'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}]
"""
first_line = calc_offset(table_lines, heading_ignore)
try:
last_line = len(table_lines) - calc_offset(reversed(table_lines),
trailing_ignore,
invert_search=True)
except ValueError:
last_line = len(table_lines)
header = table_lines[first_line]
if header_substitute:
for old_val, new_val in header_substitute:
header = header.replace(old_val, new_val)
col_headers = header.strip().split()
col_index = [header.index(c) for c in col_headers]
table_data = []
for line in table_lines[first_line + 1:last_line]:
col_data = dict(
(col_headers[c], line[col_index[c]:col_index[c + 1]].strip())
for c in range(len(col_index) - 1)
)
col_data[col_headers[-1]] = line[col_index[-1]:].strip()
table_data.append(col_data)
return table_data
def parse_delimited_table(table_lines,
delim=None,
max_splits=-1,
strip=True,
header_delim='same as delimiter',
heading_ignore=None,
header_substitute=None,
trailing_ignore=None):
"""
Parses table-like text. Uses the first (non-ignored) row as the list of
column names, which cannot contain the delimiter. Fields cannot contain
the delimiter but can be blank if a printable delimiter is used.
Arguments:
table_lines (list): List of strings with the first line containing
column headings separated by spaces, and the remaining lines
containing table data.
delim (str): String used in the content to separate fields. If left
as None (the default), white space is used as the field separator.
max_splits (int): Maximum number of fields to create by splitting the
line. After this number of fields has been found, the rest of the
line is left un-split and may contain the delimiter. Lines may
contain less than this number of fields.
strip (bool): If set to `True`, fields and headings will be stripped
of leading and trailing space. If set to `False`, fields and
headings will be left as is. The delimiter is always removed, so
strip need not be set if `delim` is set to None (but will not
change output in that case).
header_delim (str): When set, uses a different delimiter to the
content for splitting the header into keywords. Set to `None`,
this will split on white space. When left at the special value
of `'same as delimiter'`, the content delimiter will be used to
split the header line as well.
heading_ignore (list): Optional list of strings to search for at
beginning of line. All lines before this line will be ignored.
If specified then it must be present in the file or `ValueError`
will be raised.
header_substitute (list): Optional list of tuples containing
`(old_string_value, new_string_value)` to be used to modify
header values. If whitespace is present in a column it must be
replaced with non-whitespace characters in order for the table to
be parsed correctly.
trailing_ignore (list): Optional list of strings to look for at the
end rows of the content. Lines starting with these strings will
be ignored, thereby truncating the rows of data.
Returns:
list: Returns a list of dictionaries for each row of column data,
keyed on the column headings in the same case as input.
"""
if not table_lines:
return []
first_line = calc_offset(table_lines, heading_ignore)
try:
# Ignore everything before the heading in this search
last_line = len(table_lines) - calc_offset(
reversed(table_lines[first_line + 1:]), trailing_ignore, invert_search=True
)
except ValueError:
# We seem to have run out of content before we found something we
# wanted - return an empty list.
return []
if header_delim == 'same as delimiter':
header_delim = delim
header = table_lines[first_line]
if header_substitute:
for old_val, new_val in header_substitute:
header = header.replace(old_val, new_val)
content = table_lines[first_line + 1:last_line]
headings = [c.strip() if strip else c for c in header.split(header_delim)]
r = []
for row in content:
row = row.strip()
if row:
rowsplit = row.split(delim, max_splits)
if strip:
rowsplit = [i.strip() for i in rowsplit]
r.append(dict(zip(headings, rowsplit)))
return r
def keyword_search(rows, **kwargs):
"""
Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
"""
results = []
if not kwargs:
return results
# Allows us to transform the key and do lookups like __contains and
# __startswith
matchers = {
'default': lambda s, v: s == v,
'contains': lambda s, v: v in s,
'startswith': lambda s, v: s.startswith(v),
'lower_value': lambda s, v: s.lower() == v.lower(),
}
def key_match(row, key, value):
# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.
my_row = {}
for my_key, val in row.iteritems():
my_row[my_key.replace(' ', '_').replace('-', '_')] = val
matcher_fn = matchers['default']
if '__' in key:
key, matcher = key.split('__', 1)
if matcher not in matchers:
# put key back the way we found it, matcher fn unchanged
key = key + '__' + matcher
else:
matcher_fn = matchers[matcher]
return key in my_row and matcher_fn(my_row[key], value)
data = []
for row in rows:
if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.iteritems())):
data.append(row)
return data
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import operator
from oslo_utils import units
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.text import normalize_newlines # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import functions
from horizon.utils import memoized
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from ..course import Course
from ..course import CourseHelper
LOG = logging.getLogger(__name__)
class SelectProjectUserAction(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
def __init__(self, request, *args, **kwargs):
super(SelectProjectUserAction, self).__init__(request, *args, **kwargs)
# Set our project choices
projects = [(tenant.id, tenant.name)
for tenant in request.user.authorized_tenants]
self.fields['project_id'].choices = projects
# Set our user options
users = [(request.user.id, request.user.username)]
self.fields['user_id'].choices = users
class Meta(object):
name = _("Project & User")
# Unusable permission so this is always hidden. However, we
# keep this step in the workflow for validation/verification purposes.
permissions = ("!",)
class SelectProjectUser(workflows.Step):
action_class = SelectProjectUserAction
contributes = ("project_id", "user_id")
class SetInstanceDetailsAction(workflows.Action):
availability_zone = forms.ChoiceField(label=_("Availability Zone"),
required=False)
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
source_type = forms.ChoiceField(label=_("Instance Boot Source"),
help_text=_("Choose Your Boot Source "
"Type."))
instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"),
required=False)
volume_id = forms.ChoiceField(label=_("Volume"), required=False)
volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"),
required=False)
image_id = forms.ChoiceField(
label=_("Image Name"),
required=False,
widget=forms.SelectWidget(
data_attrs=('volume_size',),
transform=lambda x: ("%s (%s)" % (x.name,
filesizeformat(x.bytes)))))
volume_size = forms.IntegerField(label=_("Device size (GB)"),
initial=1,
min_value=0,
required=False,
help_text=_("Volume size in gigabytes "
"(integer value)."))
device_name = forms.CharField(label=_("Device Name"),
required=False,
initial="vda",
help_text=_("Volume mount point (e.g. 'vda' "
"mounts at '/dev/vda'). Leave "
"this field blank to let the "
"system choose a device name "
"for you."))
delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"),
initial=False,
required=False,
help_text=_("Delete volume on "
"instance terminate"))
class Meta(object):
name = _("Details")
help_text_template = ("prof/courses/"
"_launch_details_help.html")
def __init__(self, request, context, *args, **kwargs):
self._init_images_cache()
self.request = request
self.context = context
super(SetInstanceDetailsAction, self).__init__(
request, context, *args, **kwargs)
print "COURSE TO SESSION"
tmp = self.request.GET.get('course', None)
if (tmp != None):
request.session['course'] = tmp
# Hide the device field if the hypervisor doesn't support it.
if not nova.can_set_mount_point():
self.fields['device_name'].widget = forms.widgets.HiddenInput()
source_type_choices = [
('', _("Select source")),
("image_id", _("Boot from image")),
("instance_snapshot_id", _("Boot from snapshot")),
]
self.fields['source_type'].choices = source_type_choices
@memoized.memoized_method
def _get_flavor(self, flavor_id):
try:
# We want to retrieve details for a given flavor,
# however flavor_list uses a memoized decorator
# so it is used instead of flavor_get to reduce the number
# of API calls.
flavors = instance_utils.flavor_list(self.request)
flavor = [x for x in flavors if x.id == flavor_id][0]
except IndexError:
flavor = None
return flavor
@memoized.memoized_method
def _get_image(self, image_id):
try:
# We want to retrieve details for a given image,
# however get_available_images uses a cache of image list,
# so it is used instead of image_get to reduce the number
# of API calls.
images = image_utils.get_available_images(
self.request,
self.context.get('project_id'),
self._images_cache)
image = [x for x in images if x.id == image_id][0]
except IndexError:
image = None
return image
def _check_flavor_for_image(self, cleaned_data):
# Prevents trying to launch an image needing more resources.
image_id = cleaned_data.get('image_id')
image = self._get_image(image_id)
flavor_id = cleaned_data.get('flavor')
flavor = self._get_flavor(flavor_id)
if not image or not flavor:
return
props_mapping = (("min_ram", "ram"), ("min_disk", "disk"))
for iprop, fprop in props_mapping:
if getattr(image, iprop) > 0 and \
getattr(image, iprop) > getattr(flavor, fprop):
msg = (_("The flavor '%(flavor)s' is too small "
"for requested image.\n"
"Minimum requirements: "
"%(min_ram)s MB of RAM and "
"%(min_disk)s GB of Root Disk.") %
{'flavor': flavor.name,
'min_ram': image.min_ram,
'min_disk': image.min_disk})
self._errors['image_id'] = self.error_class([msg])
break # Not necessary to continue the tests.
def _check_volume_for_image(self, cleaned_data):
image_id = cleaned_data.get('image_id')
image = self._get_image(image_id)
volume_size = cleaned_data.get('volume_size')
if not image or not volume_size:
return
volume_size = int(volume_size)
img_gigs = functions.bytes_to_gigabytes(image.size)
smallest_size = max(img_gigs, image.min_disk)
if volume_size < smallest_size:
msg = (_("The Volume size is too small for the"
" '%(image_name)s' image and has to be"
" greater than or equal to "
"'%(smallest_size)d' GB.") %
{'image_name': image.name,
'smallest_size': smallest_size})
self._errors['volume_size'] = self.error_class([msg])
def _check_source_image(self, cleaned_data):
if not cleaned_data.get('image_id'):
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
else:
self._check_flavor_for_image(cleaned_data)
def _check_source_volume_image(self, cleaned_data):
volume_size = self.data.get('volume_size', None)
if not volume_size:
msg = _("You must set volume size")
self._errors['volume_size'] = self.error_class([msg])
if float(volume_size) <= 0:
msg = _("Volume size must be greater than 0")
self._errors['volume_size'] = self.error_class([msg])
if not cleaned_data.get('image_id'):
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
return
else:
self._check_flavor_for_image(cleaned_data)
self._check_volume_for_image(cleaned_data)
def _check_source_instance_snapshot(self, cleaned_data):
# using the array form of get blows up with KeyError
# if instance_snapshot_id is nil
if not cleaned_data.get('instance_snapshot_id'):
msg = _("You must select a snapshot.")
self._errors['instance_snapshot_id'] = self.error_class([msg])
def _check_source_volume(self, cleaned_data):
if not cleaned_data.get('volume_id'):
msg = _("You must select a volume.")
self._errors['volume_id'] = self.error_class([msg])
def _check_source_volume_snapshot(self, cleaned_data):
if not cleaned_data.get('volume_snapshot_id'):
msg = _("You must select a snapshot.")
self._errors['volume_snapshot_id'] = self.error_class([msg])
def _check_source(self, cleaned_data):
# Validate our instance source.
source_type = self.data.get('source_type', None)
source_check_methods = {
'image_id': self._check_source_image,
'volume_image_id': self._check_source_volume_image,
'instance_snapshot_id': self._check_source_instance_snapshot,
'volume_id': self._check_source_volume,
'volume_snapshot_id': self._check_source_volume_snapshot
}
check_method = source_check_methods.get(source_type)
if check_method:
check_method(cleaned_data)
def clean(self):
cleaned_data = super(SetInstanceDetailsAction, self).clean()
self._check_source(cleaned_data)
return cleaned_data
def populate_flavor_choices(self, request, context):
return instance_utils.flavor_field_data(request, False)
def populate_availability_zone_choices(self, request, context):
try:
zones = api.nova.availability_zone_list(request)
except Exception:
zones = []
exceptions.handle(request,
_('Unable to retrieve availability zones.'))
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
def get_help_text(self, extra_context=None):
extra = {} if extra_context is None else dict(extra_context)
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
instance_utils.flavor_list(self.request)])
extra['flavors'] = flavors
images = image_utils.get_available_images(
self.request, self.initial['project_id'], self._images_cache)
if images is not None:
attrs = [{'id': i.id,
'min_disk': getattr(i, 'min_disk', 0),
'min_ram': getattr(i, 'min_ram', 0),
'size': functions.bytes_to_gigabytes(i.size)}
for i in images]
extra['images'] = json.dumps(attrs)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetInstanceDetailsAction, self).get_help_text(extra)
def _init_images_cache(self):
if not hasattr(self, '_images_cache'):
self._images_cache = {}
def _get_volume_display_name(self, volume):
if hasattr(volume, "volume_id"):
vol_type = "snap"
visible_label = _("Snapshot")
else:
vol_type = "vol"
visible_label = _("Volume")
return (("%s:%s" % (volume.id, vol_type)),
(_("%(name)s - %(size)s GB (%(label)s)") %
{'name': volume.name,
'size': volume.size,
'label': visible_label}))
def populate_image_id_choices(self, request, context):
choices = []
images = image_utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
for image in images:
image.bytes = image.virtual_size or image.size
image.volume_size = max(
image.min_disk, functions.bytes_to_gigabytes(image.bytes))
choices.append((image.id, image))
if context.get('image_id') == image.id and \
'volume_size' not in context:
context['volume_size'] = image.volume_size
if choices:
choices.sort(key=lambda c: c[1].name)
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available")))
return choices
def populate_instance_snapshot_id_choices(self, request, context):
images = image_utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') == "snapshot"]
if choices:
choices.sort(key=operator.itemgetter(1))
choices.insert(0, ("", _("Select Instance Snapshot")))
else:
choices.insert(0, ("", _("No snapshots available")))
return choices
def populate_volume_id_choices(self, request, context):
volumes = []
try:
if base.is_service_enabled(request, 'volume'):
available = api.cinder.VOLUME_STATE_AVAILABLE
volumes = [self._get_volume_display_name(v)
for v in cinder.volume_list(self.request,
search_opts=dict(status=available, bootable=1))]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve list of volumes.'))
if volumes:
volumes.insert(0, ("", _("Select Volume")))
else:
volumes.insert(0, ("", _("No volumes available")))
return volumes
def populate_volume_snapshot_id_choices(self, request, context):
snapshots = []
try:
if base.is_service_enabled(request, 'volume'):
available = api.cinder.VOLUME_STATE_AVAILABLE
snapshots = [self._get_volume_display_name(s)
for s in cinder.volume_snapshot_list(
self.request, search_opts=dict(status=available))]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve list of volume '
'snapshots.'))
if snapshots:
snapshots.insert(0, ("", _("Select Volume Snapshot")))
else:
snapshots.insert(0, ("", _("No volume snapshots available")))
return snapshots
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
depends_on = ("project_id", "user_id")
contributes = ("source_type", "source_id",
"availability_zone", "name", "count", "flavor",
"device_name", # Can be None for an image.
"delete_on_terminate")
def prepare_action_context(self, request, context):
if 'source_type' in context and 'source_id' in context:
context[context['source_type']] = context['source_id']
return context
def contribute(self, data, context):
context = super(SetInstanceDetails, self).contribute(data, context)
# Allow setting the source dynamically.
if ("source_type" in context and "source_id" in context
and context["source_type"] not in context):
context[context["source_type"]] = context["source_id"]
# Translate form input to context for source values.
if "source_type" in data:
if data["source_type"] in ["image_id", "volume_image_id"]:
context["source_id"] = data.get("image_id", None)
else:
context["source_id"] = data.get(data["source_type"], None)
if "volume_size" in data:
context["volume_size"] = data["volume_size"]
return context
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
profile = forms.ChoiceField(label=_("Policy Profiles"),
required=False,
widget=widget,
help_text=_("Launch instance with "
"this policy profile"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
if api.neutron.is_port_profiles_supported():
self.fields['profile'].choices = (
self.get_policy_profile_choices(request))
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
return instance_utils.network_field_data(request)
def get_policy_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'policy'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
# Disabling the template drag/drop only in the case port profiles
# are used till the issue with the drag/drop affecting the
# profile_id detection is fixed.
if api.neutron.is_port_profiles_supported():
contributes = ("network_id", "profile_id",)
else:
template_name = "project/instances/_update_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
if api.neutron.is_port_profiles_supported():
context['profile_id'] = data.get('profile', None)
return context
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Instanzen starten")
finalize_button_name = _("Starten")
success_message = _('Instanzen gestartet.')
failure_message = _('Beim Starten der Instanzen ist ein Fehler aufgetreten.')
success_url = "horizon:project:instances:index"
multipart = True
default_steps = (SelectProjectUser,
SetInstanceDetails,
SetNetwork)
@sensitive_variables('context')
def handle(self, request, context):
custom_script = context.get('script_data', '')
print "__CONTEXT"
print context
print"__CONTEXT"
dev_mapping_1 = None
dev_mapping_2 = None
image_id = ''
# Determine volume mapping options
source_type = context.get('source_type', None)
if source_type in ['image_id', 'instance_snapshot_id']:
image_id = context['source_id']
elif source_type in ['volume_id', 'volume_snapshot_id']:
dev_mapping_1 = {context['device_name']:
'%s::%s' %
(context['source_id'],
int(bool(context['delete_on_terminate'])))}
elif source_type == 'volume_image_id':
device_name = context.get('device_name', '').strip() or None
dev_mapping_2 = [
{'device_name': device_name, # None auto-selects device
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination':
int(bool(context['delete_on_terminate'])),
'uuid': context['source_id'],
'boot_index': '0',
'volume_size': context['volume_size']
}
]
netids = context.get('network_id', None)
if netids:
nics = [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
nics = None
avail_zone = context.get('availability_zone', None)
if api.neutron.is_port_profiles_supported():
nics = self.set_network_port_profiles(request,
context['network_id'],
context['profile_id'])
try:
helper = CourseHelper(request.user)
# start all instances for a course.
helper.startInstances(courseId=request.session['course'], imageId=image_id, flavorId=context['flavor'])
return True
except Exception:
exceptions.handle(request)
return False
def set_network_port_profiles(self, request, net_ids, profile_id):
# Create port with Network ID and Port Profile
# for the use with the plugin supporting port profiles.
nics = []
for net_id in net_ids:
try:
port = api.neutron.port_create(
request,
net_id,
policy_profile_id=profile_id,
)
except Exception as e:
msg = (_('Unable to create port for profile '
'"%(profile_id)s": %(reason)s'),
{'profile_id': profile_id,
'reason': e})
for nic in nics:
try:
port_id = nic['port-id']
api.neutron.port_delete(request, port_id)
except Exception:
msg = (msg +
_(' Also failed to delete port %s') % port_id)
redirect = self.success_url
exceptions.handle(request, msg, redirect=redirect)
if port:
nics.append({"port-id": port.id})
LOG.debug("Created Port %(portid)s with "
"network %(netid)s "
"policy profile %(profile_id)s",
{'portid': port.id,
'netid': net_id,
'profile_id': profile_id})
return nics
| |
#!/usr/bin/env python
import random, os.path
#import basic pygame modules
import pygame
from pygame.locals import *
#see if we can load more than standard BMP
if not pygame.image.get_extended():
raise SystemExit("Sorry, extended image module required")
#game constants
MAX_SHOTS = 2 #most player bullets onscreen
ALIEN_ODDS = 22 #chances a new alien appears
BOMB_ODDS = 60 #chances a new bomb will drop
ALIEN_RELOAD = 12 #frames between new aliens
SCREENRECT = Rect(0, 0, 640, 480)
SCORE = 0
main_dir = os.path.split(os.path.abspath(__file__))[0]
def load_image(file):
"loads an image, prepares it for play"
file = os.path.join(main_dir, 'data', file)
try:
surface = pygame.image.load(file)
except pygame.error:
raise SystemExit('Could not load image "%s" %s'%(file, pygame.get_error()))
return surface.convert()
def load_images(*files):
imgs = []
for file in files:
imgs.append(load_image(file))
return imgs
class dummysound:
def play(self): pass
def load_sound(file):
if not pygame.mixer: return dummysound()
file = os.path.join(main_dir, 'data', file)
try:
sound = pygame.mixer.Sound(file)
return sound
except pygame.error:
print ('Warning, unable to load, %s' % file)
return dummysound()
# each type of game object gets an init and an
# update function. the update function is called
# once per frame, and it is when each object should
# change it's current position and state. the Player
# object actually gets a "move" function instead of
# update, since it is passed extra information about
# the keyboard
class Player(pygame.sprite.Sprite):
speed = 10
bounce = 24
gun_offset = -11
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect(midbottom=SCREENRECT.midbottom)
self.reloading = 0
self.origtop = self.rect.top
self.facing = -1
def move(self, direction):
if direction: self.facing = direction
self.rect.move_ip(direction*self.speed, 0)
self.rect = self.rect.clamp(SCREENRECT)
if direction < 0:
self.image = self.images[0]
elif direction > 0:
self.image = self.images[1]
self.rect.top = self.origtop - (self.rect.left//self.bounce%2)
def gunpos(self):
pos = self.facing*self.gun_offset + self.rect.centerx
return pos, self.rect.top
class Alien(pygame.sprite.Sprite):
speed = 13
animcycle = 12
images = []
def __init__(self):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect()
self.facing = random.choice((-1,1)) * Alien.speed
self.frame = 0
if self.facing < 0:
self.rect.right = SCREENRECT.right
def update(self):
self.rect.move_ip(self.facing, 0)
if not SCREENRECT.contains(self.rect):
self.facing = -self.facing;
self.rect.top = self.rect.bottom + 1
self.rect = self.rect.clamp(SCREENRECT)
self.frame = self.frame + 1
self.image = self.images[self.frame//self.animcycle%3]
class Explosion(pygame.sprite.Sprite):
defaultlife = 12
animcycle = 3
images = []
def __init__(self, actor):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect(center=actor.rect.center)
self.life = self.defaultlife
def update(self):
self.life = self.life - 1
self.image = self.images[self.life//self.animcycle%2]
if self.life <= 0: self.kill()
class Shot(pygame.sprite.Sprite):
speed = -11
images = []
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect(midbottom=pos)
def update(self):
self.rect.move_ip(0, self.speed)
if self.rect.top <= 0:
self.kill()
class Bomb(pygame.sprite.Sprite):
speed = 9
images = []
def __init__(self, alien):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.rect = self.image.get_rect(midbottom=
alien.rect.move(0,5).midbottom)
def update(self):
self.rect.move_ip(0, self.speed)
if self.rect.bottom >= 470:
Explosion(self)
self.kill()
class Score(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(None, 20)
self.font.set_italic(1)
self.color = Color('white')
self.lastscore = -1
self.update()
self.rect = self.image.get_rect().move(10, 450)
def update(self):
if SCORE != self.lastscore:
self.lastscore = SCORE
msg = "Score: %d" % SCORE
self.image = self.font.render(msg, 0, self.color)
def main(winstyle = 0):
# Initialize pygame
pygame.init()
if pygame.mixer and not pygame.mixer.get_init():
print ('Warning, no sound')
pygame.mixer = None
# Set the display mode
winstyle = 0 # |FULLSCREEN
bestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32)
screen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth)
#Load images, assign to sprite classes
#(do this before the classes are used, after screen setup)
img = load_image('player1.gif')
Player.images = [img, pygame.transform.flip(img, 1, 0)]
img = load_image('explosion1.gif')
Explosion.images = [img, pygame.transform.flip(img, 1, 1)]
Alien.images = load_images('alien1.gif', 'alien2.gif', 'alien3.gif')
Bomb.images = [load_image('bomb.gif')]
Shot.images = [load_image('shot.gif')]
#decorate the game window
icon = pygame.transform.scale(Alien.images[0], (32, 32))
pygame.display.set_icon(icon)
pygame.display.set_caption('Pygame Aliens')
pygame.mouse.set_visible(0)
#create the background, tile the bgd image
bgdtile = load_image('background.gif')
background = pygame.Surface(SCREENRECT.size)
for x in range(0, SCREENRECT.width, bgdtile.get_width()):
background.blit(bgdtile, (x, 0))
screen.blit(background, (0,0))
pygame.display.flip()
#load the sound effects
boom_sound = load_sound('boom.wav')
shoot_sound = load_sound('car_door.wav')
if pygame.mixer:
music = os.path.join(main_dir, 'data', 'house_lo.ogg')
pygame.mixer.music.load(music)
pygame.mixer.music.play(-1)
# Initialize Game Groups
aliens = pygame.sprite.Group()
shots = pygame.sprite.Group()
bombs = pygame.sprite.Group()
all = pygame.sprite.RenderUpdates()
lastalien = pygame.sprite.GroupSingle()
#assign default groups to each sprite class
Player.containers = all
Alien.containers = aliens, all, lastalien
Shot.containers = shots, all
Bomb.containers = bombs, all
Explosion.containers = all
Score.containers = all
#Create Some Starting Values
global score
alienreload = ALIEN_RELOAD
kills = 0
clock = pygame.time.Clock()
#initialize our starting sprites
global SCORE
player = Player()
Alien() #note, this 'lives' because it goes into a sprite group
if pygame.font:
all.add(Score())
while player.alive():
#get input
for event in pygame.event.get():
if event.type == QUIT or \
(event.type == KEYDOWN and event.key == K_ESCAPE):
return
keystate = pygame.key.get_pressed()
# clear/erase the last drawn sprites
all.clear(screen, background)
#update all the sprites
all.update()
#handle player input
direction = keystate[K_RIGHT] - keystate[K_LEFT]
player.move(direction)
firing = keystate[K_SPACE]
if not player.reloading and firing and len(shots) < MAX_SHOTS:
Shot(player.gunpos())
shoot_sound.play()
player.reloading = firing
# Create new alien
if alienreload:
alienreload = alienreload - 1
elif not int(random.random() * ALIEN_ODDS):
Alien()
alienreload = ALIEN_RELOAD
# Drop bombs
if lastalien and not int(random.random() * BOMB_ODDS):
Bomb(lastalien.sprite)
# Detect collisions
for alien in pygame.sprite.spritecollide(player, aliens, 1):
boom_sound.play()
Explosion(alien)
Explosion(player)
SCORE = SCORE + 1
player.kill()
for alien in pygame.sprite.groupcollide(shots, aliens, 1, 1).keys():
boom_sound.play()
Explosion(alien)
SCORE = SCORE + 1
for bomb in pygame.sprite.spritecollide(player, bombs, 1):
boom_sound.play()
Explosion(player)
Explosion(bomb)
player.kill()
#draw the scene
dirty = all.draw(screen)
pygame.display.update(dirty)
#cap the framerate
clock.tick(40)
if pygame.mixer:
pygame.mixer.music.fadeout(1000)
pygame.time.wait(1000)
pygame.quit()
#call the "main" function if running this script
if __name__ == '__main__': main()
| |
#!/usr/bin/env python
# coding: utf-8
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# Incremental Minimal FSA algorithm:
# [1] http://aclweb.org/anthology-new/J/J00/J00-1002.pdf
# [2] http://habrahabr.ru/post/190694/
import sys
import itertools
import collections
class State(object):
def __init__(self, key):
self.key = key
self.final = False
self.inputs = collections.OrderedDict()
self.outputs = collections.OrderedDict()
def make_final(self):
self.final = True
def __eq__(self, other):
if self.final != other.final:
return False
if len(self.outputs) == 0 and len(other.outputs) == 0:
if self.final and other.final:
return True
else:
sys.stderr("ERROR")
return False
else:
if len(self.outputs) != len(other.outputs):
return False
else:
outputs_pairs = itertools.izip(self.outputs.iteritems(), other.outputs.iteritems())
for (trans_1, state_1), (trans_2, state_2) in outputs_pairs:
if trans_1 != trans_2:
return False
if state_1 != state_2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class MinimalIncrFSA(object):
def __init__(self):
self.__state_counter = 1
self.START = State("0")
self.reg = collections.OrderedDict({self.START.key: self.START})
self.FINISH = None
def new_state(self, key=None):
if key is None:
key = str(self.__state_counter)
self.__state_counter += 1
new_state = State(key)
return new_state
def prefix(self, word):
state_list = [self.START]
current_state = state_list[-1]
for symbol in word:
next_state = self.get_state(current_state, symbol)
if next_state is None:
return state_list
state_list.append(next_state)
current_state = next_state
return state_list
def add_suffix(self, word, state_list):
current_state = state_list[-1]
for i in xrange(len(state_list) - 1, len(word)):
new_state = self.new_state()
symbol = word[i]
self.add_trans(current_state, new_state, symbol)
current_state = new_state
state_list.append(current_state)
current_state.make_final()
def add_word(self, word):
prefix_states = self.prefix(word)
self.add_suffix(word, prefix_states)
def register_get(self, state):
if state.key in self.reg:
return self.reg[state.key]
for reg_state in self.reg.values():
if reg_state == state:
return reg_state
return None
def get_state(self, from_state, trans_symbol):
return from_state.outputs.get(trans_symbol)
def add_trans(self, from_state, to_state, trans_symbol):
from_state.outputs[trans_symbol] = to_state
to_state.inputs[trans_symbol] = from_state
def rem_trans(self, from_state, to_state, trans_symbol):
if trans_symbol in from_state.outputs:
from_state.outputs.pop(trans_symbol, None)
if trans_symbol in to_state.inputs:
to_state.inputs(trans_symbol, None)
def replace_or_register(self, word, state_list):
state_idx = len(state_list) - 1
word_idx = len(word) - 1
while state_idx > 0:
state = state_list[state_idx]
reg_state = self.register_get(state)
if reg_state is None:
# print state.key, "not in register"
self.reg[state.key] = state
elif state.key == reg_state.key:
# print state.key, "is", reg_state.key
word_idx -= 1
state_idx -= 1
continue
else:
# print state.key, "==", reg_state.key
trans_symbol = word[word_idx]
prev_state = state_list[state_idx - 1]
state_list[state_idx] = reg_state
# self.rem_trans(prev_state, state, trans_symbol)
self.add_trans(prev_state, reg_state, trans_symbol)
word_idx -= 1
state_idx -= 1
def find_confluence_index(self, state_list):
for i in xrange(1, len(state_list)):
state = state_list[i]
if len(state.inputs) > 1:
arcs = 0
for i_state in state.inputs.values():
for v in i_state.outputs.values():
if v.key == state.key:
arcs += 1
if arcs > 1:
return i
# print "CONF:", state.key
# print "INPUTS:"
# for k, v in state.inputs.iteritems():
# print "%s -%s-> %s" % (v.key, k, state.key)
# for k2, v2 in v.outputs.iteritems():
# print " %s -%s-> %s" % (v.key, k2, v2.key)
# return i
return -1
def clone_state(self, state):
clone = self.new_state()
# for key, val in state.inputs.iteritems():
# clone.inputs[key] = val
clone.final = state.final
return clone
def make_single_final_state(self):
nodes = [self.START]
visited_states = set()
final_states = []
while len(nodes) > 0:
new_nodes = []
for node in nodes:
for next_state in node.outputs.itervalues():
if next_state.key in visited_states:
continue
else:
new_nodes.append(next_state)
visited_states.add(next_state.key)
if next_state.final:
final_states.append(next_state)
nodes = new_nodes
self.FINISH = self.new_state()
for i in xrange(0, len(final_states)):
self.add_trans(final_states[i], self.FINISH, "*e*")
final_states[i].final = False
def pprint(self):
printed = set()
print_str = ""
nodes = [self.START]
while len(nodes) > 0:
new_nodes = []
for node in nodes:
for trans_symbol, next_state in node.outputs.iteritems():
if next_state.final:
print_str += "%s -%s-> (%s)\n" % (node.key, trans_symbol, next_state.key)
else:
print_str += "%s -%s-> %s\n" % (node.key, trans_symbol, next_state.key)
if next_state.key not in printed:
new_nodes.append(next_state)
printed.add(next_state.key)
nodes = new_nodes
return print_str
def to_carmel(self, fout):
printed = set()
nodes = [self.START]
fout.write("%s\n" % self.FINISH.key)
while len(nodes) > 0:
new_nodes = []
for node in nodes:
for trans_symbol, next_state in node.outputs.iteritems():
arc = "%s%s%s" % (node.key, next_state.key, trans_symbol)
if arc in printed:
continue
if trans_symbol == "*e*":
fout.write("(%s (%s *e*))\n" % (node.key, next_state.key))
else:
fout.write("(%s (%s \"%s\"))\n" % (node.key, next_state.key, trans_symbol))
new_nodes.append(next_state)
printed.add(arc)
nodes = new_nodes
def add_min_word(self, word):
# print self.pprint()
state_list = self.prefix(word)
# print [s.key for s in state_list]
confl_idx = self.find_confluence_index(state_list)
# print state_list[confl_idx].key
# print [i for i in state_list[confl_idx].inputs.keys()]
if confl_idx > -1:
idx = confl_idx
while idx < len(state_list):
# print idx
prev = state_list[idx - 1]
cloned = self.clone_state(state_list[idx])
self.add_trans(prev, cloned, word[idx - 1])
# print "cloned: %s -%s-> %s " % (prev.key, word[idx - 1], cloned.key)
# print cloned.final
# print cloned.inputs
# for v in cloned.inputs.values():
# print v.key
state_list[idx] = cloned
idx += 1
confl_idx += 1
# print
# print self.pprint()
self.add_suffix(word, state_list)
# print
# print self.pprint()
# print self.pprint()
self.replace_or_register(word, state_list)
# s0 = State(0, )
# s1 = State(1, )
# s2 = State(2, )
#
# s3 = State(3, )
# s4 = State(4, )
#
# s5 = State(5, )
# s6 = State(6, )
#
# s1.inputs[s0.key] = s0
# s2.inputs[s0.key] = s0
#
# s1.outputs["y"] = s3
# s1.outputs["x"] = s4
#
# s2.outputs["y"] = s5
# s2.outputs["x"] = s6
#
# s3.inputs["y"] = s1
# s4.inputs["x"] = s1
# s5.inputs["y"] = s2
# s6.inputs["x"] = s2
#
# s3.make_final()
# s4.make_final()
# s5.make_final()
# s6.make_final()
# print s1 == s2
fsa = MinimalIncrFSA()
# fsa.add_min_word("a")
# fsa.add_min_word("aaron")
# fsa.add_min_word("ab")
# fsa.add_min_word("aba")
# print fsa.pprint()
# fsa.add_min_word("abacha")
# print fsa.pprint()
# fsa.make_single_final_state()
# print fsa.pprint()
# i = 0
for line in sys.stdin:
word = line.replace(" ", "")
word = word.replace("\n", "")
fsa.add_word(word)
# print fsa.pprint()
# print
# print fsa.pprint()
# print "<<%d>>" % i
# print
# print
fsa.make_single_final_state()
# print fsa.pprint()
fsa.to_carmel(sys.stdout)
| |
"""
pathtools.py provides functions handling IP hops, IXP detection and ASN information.
"""
import dbtools as db
import os
import copy
import logging
# load database from the local folder
cur_path = os.path.abspath(os.path.dirname(__file__))
as_rel = db.AsRelationDB(os.path.join(cur_path, "db/20161201.as-rel2.txt"))
ip2asn = db.AsnDB(main=os.path.join(cur_path, "db/ipasn.dat"),
reserved=os.path.join(cur_path, "db/reserved_ip.txt"))
ixp_pref = db.IxpPrefixDB(os.path.join(cur_path, "db/ixp_prefixes.txt"))
ixp_member = db.IxpMemberDB(os.path.join(cur_path, "db/ixp_membership.txt"))
def get_ip_info(ip):
"""Query the ASN and IXP information for a given IP address from various data source
Args:
ip (string): ip address, e.g. '129.250.66.33'
Returns:
addr (db.Addr): Addr object, with addr_type attribute set
"""
# first check if it is IXP interconnection
addr = ixp_member.lookup_interco(ip)
if addr is None:
# then check if it belongs to a certian IXP prefix
ixp = ixp_pref.lookup(ip)
if ixp is not None:
addr = db.Addr(addr=ip, addr_type=db.AddrType.IxpPref, ixp=ixp)
else: # finally check if can be found from ip2asn db
asn = ip2asn.lookup(ip)
if type(asn) is int: # if int then returns ASN
addr = db.Addr(addr=ip, addr_type=db.AddrType.Normal, asn=asn)
else: # other type either string for reserved IP blocks or none for not found
addr = db.Addr(addr=ip, addr_type=db.AddrType.Others, desc=asn)
return addr
def bridge(path):
"""given a sequence of IP hops, identify sub-sequences without ASN, and remove only those IPs other than
IXP IPs if the the ASes wrapping the sub-sequence have known relation ship
Args:
path (list of dbtools.Addr): a path composed of IP hops; sub-sequence without ASN can be composed of
IP hops of dbtools.AddrType.IxpPref or dbtools.AddrType.Others.
Return:
list of dbtools.Addr
"""
remove_flag = [False] * len(path) # hop flag to one meant to be removed
asn_path = [hop.asn for hop in path]
holes = find_holes(asn_path) # indexes of None (ASN) sub-sequences
last_idx = len(path) - 1
for start, end in holes:
# only check the sub-sequences having type dbtools.AddrType.Others hops
if start > 0 and end < last_idx and db.AddrType.Others in [hop.type for hop in path[start:end+1]]:
# if there is known relation between the two ASes wrapping the None sub-sequence
left_asn = path[start-1].asn
right_asn = path[end+1].asn
if left_asn == right_asn or as_rel.has_relation((left_asn, right_asn)) is not None:
# remove only the hop of type dbtools.AddrType.Others
for idx in range(start, end+1):
if path[idx].type == db.AddrType.Others:
remove_flag[idx] = True
return [path[idx] for idx in range(last_idx+1) if not remove_flag[idx]]
def find_holes(x):
"""find the beginning and end of continuous None in the given iterator
Args:
x (iterator): the input sequence
Returns:
list of (int, int) indicating the beginning and the end of a continuous None sub-sequence
"""
holes = []
in_hole = False
for idx, val in enumerate(x):
if not in_hole:
if val is None:
start = idx
in_hole = True
else:
if val is not None:
end = idx - 1
in_hole = False
holes.append((start, end))
# in case the iteration ends while still in hole
# test_case = [None, 1, 1, None, 1, None, None, None, 1, None]
if in_hole:
holes.append((start, idx))
return holes
def insert_ixp(path):
"""insert IXP hops according to the presence of IXP address and IXP memebership of surrounding AS
Args:
path (list of db.Addr): a list of hops
Returns:
list of db.Addr
"""
path_len = len(path)
ixp_insertion = []
for idx, hop in enumerate(path):
if (hop.type == db.AddrType.InterCo or hop.type == db.AddrType.IxpPref) and (0 < idx < path_len-1):
# Normal - Interco/IxpPref - Normal
if path[idx-1].type == db.AddrType.Normal and path[idx+1].type == db.AddrType.Normal:
left_hop = path[idx-1]
right_hop = path[idx+1]
# Normal - Interco - Normal
if hop.type == db.AddrType.InterCo:
# ASN: A - A - A -> A - A - A
if left_hop.get_asn() == hop.get_asn() == right_hop.get_asn():
pass
# ASN: A - A - B -> A - A - IXP - B
elif left_hop.get_asn() == hop.get_asn() != right_hop.get_asn():
ixp_insertion.append((idx+1, hop.ixp))
# ASN: A - B - B -> A - IXP - B - B
elif left_hop.get_asn() != hop.get_asn() == right_hop.get_asn():
ixp_insertion.append((idx, hop.ixp))
# ASN: A - B - C
elif left_hop.get_asn() != hop.get_asn() != right_hop.get_asn():
# check IXP member ship
left_is_member = ixp_member.is_member(ixp=hop.ixp, asn=left_hop.asn)
right_is_member = ixp_member.is_member(ixp=hop.ixp, asn=right_hop.asn)
# IXP membership: A -m- B -m- C -> A - IXP - B - IXP - C
if left_is_member and right_is_member:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx+1, hop.ixp))
# IXP membership: A -m- B - C -> A - IXP - B - C
elif left_is_member:
ixp_insertion.append((idx, hop.ixp))
# IXP membership: A - B -m- C -> A - B - IXP - C
elif right_is_member:
ixp_insertion.append((idx + 1, hop.ixp))
else:
pass # in this case no IXP hop will be seen in the path
# Normal - IxpPref - Normal
elif hop.type == db.AddrType.IxpPref:
left_is_member = ixp_member.is_member(ixp=hop.ixp, asn=left_hop.asn)
right_is_member = ixp_member.is_member(ixp=hop.ixp, asn=right_hop.asn)
# IXP membership: A -m- IxpPref -m- B -> A - IXP - IxpPref - IXP - B
if left_is_member and right_is_member:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx + 1, hop.ixp))
# IXP membership: A -m- IxpPref- B -> A - IXP - IxpPref - B
elif left_is_member:
ixp_insertion.append((idx, hop.ixp))
# IXP membership: A - IxpPref -m- B -> A - IxpPref- IXP - B
elif right_is_member:
ixp_insertion.append((idx + 1, hop.ixp))
else:
pass # in this case no IXP shop shall be seen in the path
# Interco/IxpPref - Inter/IxpPref
elif path[idx+1].type == db.AddrType.InterCo or path[idx+1].type == db.AddrType.IxpPref:
# belong to same IXP
if path[idx].ixp == path[idx+1].ixp:
ixp_insertion.append((idx + 1, hop.ixp))
else:
ixp_insertion.append((idx, hop.ixp))
ixp_insertion.append((idx+1, path[idx+1].ixp))
shift = 0
for ins in ixp_insertion:
path.insert(ins[0]+shift, db.Addr(addr=None, addr_type=db.AddrType.Virtual, ixp=ins[1]))
shift += 1
return path
def remove_repeated_asn(path):
""" remove repeated ASN in the give path
Args:
path (list of ASN): ASN can be int for str if IXP hop
Returns:
list of ASN
"""
removed = []
for idx, hop in enumerate(path):
if idx == 0:
removed.append(hop)
elif hop != path[idx-1]:
removed.append(hop)
return removed
def as_path_change(paths):
""" mark the idx at which AS path changes
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if path != paths[idx-1]:
change[idx] = 1
return change
def as_path_change_cl(paths):
"""" mark the idx at which there is surely an AS path change not related to timeout, private address etc.
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
if path[-1] == paths[idx-1][-1] and path != paths[idx-1]: # exclude reachability issue
diff_as = set(path) ^ set(paths[idx-1])
if len(diff_as) > 0 and all([type(i) is int for i in diff_as]): # all difference is a valid ASN
change[idx] = 1
return change
def as_path_change_cs(paths):
""" mark the idx at which where AS path change happens
AS path change is where the FIRST different AS hops are both valid public ASN hop
avoid changes due to timeout, private address, reachability issues
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
for hop_pair in zip(path, paths[idx-1]):
if hop_pair[0] != hop_pair[1]:
if type(hop_pair[0]) is int and type(hop_pair[1]) is int:
change[idx] = 1
break
return change
def is_ixp_asn_hop(x):
""" check the whether return value of db.Addr.get_asn() is an IXP or not
if the type(x) is str and the string is not Invalid IP or reserved IP, than it must be an IXP name
Args:
x (int, string, None)
Returns:
bool
"""
return type(x) is str and not is_bad_hop(x)
def is_bad_hop(x):
""" check the whether return value of db.Addr.get_asn() is an description string of reserved IP blocks or invalid IP address
Args:
x (int, string, None)
Returns:
bool
"""
return x == 'Invalid IP address' or ip2asn.reserved_des is None or x in ip2asn.reserved_des
def as_path_change_ixp(paths):
"""" mark the idx at which there is surely an AS path change related to IXP.
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
if path[-1] == paths[idx-1][-1] and path != paths[idx-1]: # exclude reachability issue
diff_as = set(path) ^ set(paths[idx-1])
if len(diff_as) > 0 and \
any([is_ixp_asn_hop(i) for i in diff_as]):
change[idx] = 1
return change
def as_path_change_ixp_cs(paths):
""" mark the idx at which where path change is an IXP change
IXP change is where the FIRST different AS hops involve at least one IXP
if the previous AS hop differs already, it is not longer an IXP change
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
for hop_pair in zip(path, paths[idx-1]):
if hop_pair[0] != hop_pair[1]:
if all([not is_bad_hop(i) for i in hop_pair]) and any([type(i) is str for i in hop_pair]):
change[idx] = 1
break
return change
def as_path_change_ixp_pu(paths):
""" mark the idx at which where path change is an pure IXP change
pure IXP change is where the FIRST different AS hops involve IXP in both AS paths
if the previous AS hop differs already, it is not longer a pure IXP change
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
for hop_pair in zip(path, paths[idx-1]):
if hop_pair[0] != hop_pair[1]:
if all([is_ixp_asn_hop(i) for i in hop_pair]):
change[idx] = 1
break
return change
class IpForwardingPattern:
"""IpForwardingPattern describes the forwarding paths for all the paris-id in joining one destination
Attributes:
pattern (list of path): index of the list is the paris id; the element is a path composed of hops;
each path is a list of hop; two paths are equal if they contain the same hops following same order
"""
def __init__(self, size, paris_id=None, paths=None):
"""Initialize with size that the number of different paris id and optionally with paths taken by paris id
Args:
size (int): number of different paris id, in the case of RIPE Atlas, it is 16
paris_id (list of int): sequence of paris id
paths (list of path): path taken when the corresponding paris id in the paris_id list is used
"""
self.pattern = [None] * size
if paris_id is not None and paths is not None:
# NOTE: if a paris_id have different paths is not checked here
assert len(paris_id) == len(paths)
for pid, path in zip(paris_id, paths):
self.pattern[pid] = path
def update(self, paris_id, path):
"""update/complete the current pattern with new paris id and path taken
Return True if the input can be integrated into the existing pattern; False otherwise
Args:
paris_id (int): one single paris id
path (a path): a path taken by the paris id
Returns:
boolean
"""
assert paris_id < len(self.pattern)
# if the paris id has not yet path set, the input can always be integrated into existing pattern
if self.pattern[paris_id] is None:
self.pattern[paris_id] = path
return True
elif self.pattern[paris_id] == path:
return True
else:
return False
def is_complete(self):
"""test if the pattern has path set for each paris id"""
return None not in self.pattern
def is_match(self, paris_id, paths):
"""test if the input paris ids and paths are compatible with existing pattern
the difference with self.update() is that, is_match won't modify self.pattern is a paris id is not yet set
Args:
paris_id (list of int)
paths (list of path)
Returns:
boolean
"""
for pid, path in zip(paris_id, paths):
if self.pattern[pid] is not None and path is not None and self.pattern[pid] != path:
return False
return True
def is_match_pattern(self, pattern):
"""test if the input IpForwarding pattern is compatible with existing pattern
a variation of self.is_match()
Returns:
boolean
"""
if len(pattern.pattern) != len(self.pattern):
return False
else:
return self.is_match(range(len(pattern.pattern)), pattern.pattern)
def __repr__(self):
return "IpForwardingPattern(%r)" % dict(enumerate(self.pattern))
def __str__(self):
return "%s" % dict(enumerate(self.pattern))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class PatternSegment:
"""PatternsSegment describes a subsequence of paths following a same IpFowardingPattern
Attributes:
begin (int): the beginning index of the path segment;
only meaningful when you know the sequence of paris_id and paths; the same for end
end (int): the index if last path of the segment, thus inclusive
pattern (IpForwardingPattern): the pattern followed by this segment
"""
def __init__(self, begin, end, pattern):
self.begin = begin
self.end = end
self.pattern = pattern
def get_len(self):
"""return the length of the segment"""
return self.end - self.begin + 1
def __repr__(self):
return "PatternSegment(begin=%r, end=%r, pattern=%r)" % (self.begin, self.end, self.pattern)
def __str__(self):
return "(%r, %r, pattern=%s)" % (self.begin, self.end, self.pattern)
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
def ip_path_change_simple(paris_id, paths, size=16):
"""given a sequence paris_id and path, detect when a different path is take for a same paris id
the functions cuts the given paths sequence into segments where each following a same IpForwardingPattern
Args:
paris_id (list of int): Paris ID used when tracerouting
paths (list of path): path is composed of ip hops
size (int): number of different paris_ids
Returns:
list of PatternSegment
"""
assert (len(paris_id) == len(paths))
seg = []
cur_seg = PatternSegment(begin=0, end=0, pattern=IpForwardingPattern(size))
for idx, (pid, path) in enumerate(zip(paris_id, paths)):
if cur_seg.pattern.update(pid, path):
cur_seg.end = idx
else:
# once a paris id and the path take is not longer compatible with the current segment
# start a new segment
seg.append(cur_seg)
cur_seg = PatternSegment(begin=idx, end=idx, pattern=IpForwardingPattern(size))
cur_seg.pattern.update(pid, path)
# store the last segment
if cur_seg not in seg:
seg.append(cur_seg)
return seg
def ip_path_change_bck_ext(paris_id, paths, size=16):
""" maximize longest path segment with backward extension
after the ip_path_change_simple() extends segment in -> direction;
this function further checks if the longer segment of the two neighbouring ones
can be further extended in <- direction
the intuition behind is that most time measurement flows on dominant patterns
Args:
paris_id (list of int): Paris ID used when tracerouting
paths (list of path): path is composed of ip hops
size (int): number of different paris_ids
Returns:
list of PatternSegment
"""
seg = ip_path_change_simple(paris_id, paths, size) # simple segmentation
for idx, s in enumerate(seg[:-1]):
next_s = seg[idx + 1]
# | cur seg |<- next seg | extend later
# | cur seg ->| next seg | is already done with simple segmentation
# next segment can only be backwardly extended if:
# it's pattern is complete
# it's pattern has been repeated twice so that we are sure that it is a stable pattern
# it is longer than the previous pattern so that we maximizes the longest pattern
if next_s.pattern.is_complete() and next_s.get_len() >= 2 * size and next_s.get_len() > s.get_len():
next_s_cp = copy.deepcopy(next_s)
cur_s_cp = copy.deepcopy(s)
pos = cur_s_cp.end
while True:
# test if can be backwardly extended
if next_s.pattern.update(paris_id[pos], paths[pos]):
cur_s_cp.end = pos - 1
cur_s_cp.pattern = IpForwardingPattern(size,
paris_id[cur_s_cp.begin:cur_s_cp.end+1],
paths[cur_s_cp.begin:cur_s_cp.end+1])
next_s_cp.begin = pos
pos -= 1
else:
break
# if extended, change the both segments
if cur_s_cp != s:
seg[idx] = cur_s_cp
seg[idx+1] = next_s_cp
return seg
def ip_path_change_split(paris_id, paths, size):
"""pattern change detection with finer granilarity
for segments with short length, < 2 * size, chances are that there is a short deviation inside
while backward extension might find the end of the short deviation but not necessary the beginning,
thus the need for further finer split.
the intuition is that if a short segment have a sub-segment at 2 in length that matches with same popular patterns
we further split the short segment
Args:
paris_id (list of int): Paris ID used when tracerouting
paths (list of path): path is composed of ip hops
size (int): number of different paris_ids
Returns:
list of PatternSegment
"""
seg = ip_path_change_bck_ext(paris_id, paths, size)
# find relatively popular IpForwarding pattern: any patter that ever lasts more than 2 paris id iteration
# not different segment can have same pattern at different places in the path sequences
long_pat = set([s.pattern for s in seg if s.get_len() > 2*size and s.pattern.is_complete()])
# {idx:(position, length)}
# idx: the idx of seg to be split
# position and length of the longest sub-segment that matches popular patterns
split = dict()
# new segmentation after split
split_seg = []
# try to further split short segments by finding the longest sub-segment that matches with popular patterns
for idx, s in enumerate(seg):
# the segment should at least 3 in length and it's pattern has not been repeated
# and it's pattern doesn't match with any of the popular ones
if 2 < s.get_len() < 2 * size:
# logging.debug("Split short seg %d th: %r" % (idx, s))
any_match = False
for lp in long_pat:
if lp.is_match_pattern(s.pattern):
any_match = True
# logging.debug("\tShort seg match with popular pattern %r, thus skipped" % (lp))
if not any_match:
max_len_per_pos = []
# iterate over all the idx from the beginning to one before last of the short segment
# and store the longest match with popular patterns for each position
for pos in range(s.begin, s.end):
# logging.debug("\tInspect pos %d" % pos)
l = 2 # starting from match length 2
while pos+l <= s.end+1: # iterate till the end of current segment
any_match = False # the number of matched long pattern
for lp in long_pat:
if lp.is_match(paris_id[pos:pos+l], paths[pos:pos+l]):
any_match = True
break
if any_match: # if pos:pos+l matches at least one long pattern, further extend the length
l += 1
else: # record last successful try
max_len_per_pos.append((pos, l-1))
break
# this is case when the end of sub-segment reaches the end of the short segment
if (pos, l-1) not in max_len_per_pos:
max_len_per_pos.append((pos, l-1))
# logging.debug("\t\tlongest sub seg %s" % str(max_len_per_pos[-1]))
max_len_per_pos = sorted(max_len_per_pos, key=lambda e: e[1], reverse=True)
longest_cut = max_len_per_pos[0]
if longest_cut[1] > 1: # further split only if the length of the longest match > 1 in length
split[idx] = longest_cut
# logging.debug("\t cut at %s" % str(longest_cut))
# split the segments
for idx, s in enumerate(seg):
if idx in split:
cut_begin = split[idx][0]
cut_end = cut_begin + split[idx][1] - 1
# three possible cases: 1/ at match at beginning; 2/ the match in the middle; 3/ the match at the end
if cut_begin == s.begin:
split_seg.append(PatternSegment(begin=cut_begin,
end=cut_end,
pattern=IpForwardingPattern(size,
paris_id[cut_begin:cut_end + 1],
paths[cut_begin:cut_end + 1])))
split_seg.append(PatternSegment(begin=cut_end + 1,
end=s.end,
pattern=IpForwardingPattern(size,
paris_id[cut_end + 1:s.end + 1],
paths[cut_end + 1:s.end + 1])))
elif cut_begin > s.begin and cut_end < s.end:
split_seg.append(PatternSegment(begin=s.begin,
end=cut_begin - 1,
pattern=IpForwardingPattern(size,
paris_id[s.begin:cut_begin],
paths[s.begin:cut_begin])))
split_seg.append(PatternSegment(begin=cut_begin,
end=cut_end,
pattern=IpForwardingPattern(size,
paris_id[cut_begin:cut_end + 1],
paths[cut_begin:cut_end + 1])))
split_seg.append(PatternSegment(begin=cut_end + 1,
end=s.end,
pattern=IpForwardingPattern(size,
paris_id[cut_end + 1:s.end + 1],
paths[cut_end + 1:s.end + 1])))
elif cut_end == s.end:
split_seg.append(PatternSegment(begin=s.begin,
end=cut_begin - 1,
pattern=IpForwardingPattern(size,
paris_id[s.begin:cut_begin],
paths[s.begin:cut_begin])))
split_seg.append(PatternSegment(begin=cut_begin,
end=cut_end,
pattern=IpForwardingPattern(size,
paris_id[cut_begin:cut_end + 1],
paths[cut_begin:cut_end + 1])))
else:
split_seg.append(s)
# after the above split, the new neighbouring segments could again math popular pattern, merge them
# {idx: new segment}
# idx: the first idx of the two neighbour segment in split_seg that meant to be merged
# maps to the new merged segment
merge = dict()
for idx, s in enumerate(split_seg[:-1]):
next_s = split_seg[idx+1]
# if the two neighbour segments are short test if them can be merged
if s.get_len() < 2 * size or next_s.get_len() < 2 * size:
# if the neighbouring seg matches with each other then test if merged seg matches with popular pattern
if s.pattern.is_match_pattern(next_s.pattern):
merge_pat = IpForwardingPattern(size, paris_id[s.begin:next_s.end+1], paths[s.begin:next_s.end+1])
any_match = False
for lp in long_pat:
if lp.is_match_pattern(merge_pat):
any_match = True
break
if any_match:
merge[idx] = PatternSegment(begin=s.begin, end=next_s.end, pattern=merge_pat)
# in general consecutive merge, e.g. 1 merge 2 and 2 merge 3, is not possible
# log it when happens
for i in merge:
if i+1 in merge:
logging.error("IP change split: consecutive merge possible: %r, %r" % (paris_id, paths))
return split_seg
mg_seg = []
for idx, seg in enumerate(split_seg):
if idx in merge:
mg_seg.append(merge[idx])
elif idx not in merge and idx-1 not in merge:
mg_seg.append(seg)
return mg_seg
def ifp_change(seg, seq_len):
""" mark the idx at which IpForwardingPattern changes, i.e. the beginning of a new segment
Args:
seg (list of PatternSegment): the out put of ifp change detection algos
seq_len: the total length of the path sequence
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * seq_len
if len(seg) > 1:
for s in seg[1:]:
change[s.begin] = 1
return change
| |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nova import context
from nova import db
from nova import exception as ex
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers as helper
from nova.tests.unit import policy_fixture
def rand_flavor(**kwargs):
flav = {
'name': 'name-%s' % helper.generate_random_alphanumeric(10),
'id': helper.generate_random_alphanumeric(10),
'ram': int(helper.generate_random_numeric(2)) + 1,
'disk': int(helper.generate_random_numeric(3)),
'vcpus': int(helper.generate_random_numeric(1)) + 1,
}
flav.update(kwargs)
return flav
class FlavorManageFullstack(test.TestCase):
"""Tests for flavors manage administrative command.
Extension: os-flavors-manage
os-flavors-manage adds a set of admin functions to the flavors
resource for the creation and deletion of flavors.
POST /v2/flavors:
{
'name': NAME, # string, required unique
'id': ID, # string, required unique
'ram': RAM, # in MB, required
'vcpus': VCPUS, # int value, required
'disk': DISK, # in GB, required
'OS-FLV-EXT-DATA:ephemeral', # in GB, ephemeral disk size
'is_public': IS_PUBLIC, # boolean
'swap': SWAP, # in GB?
'rxtx_factor': RXTX, # ???
}
Returns Flavor
DELETE /v2/flavors/ID
Functional Test Scope::
This test starts the wsgi stack for the nova api services, uses an
in memory database to ensure the path through the wsgi layer to
the database.
"""
def setUp(self):
super(FlavorManageFullstack, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture())
# NOTE(sdague): because this test is primarily an admin API
# test default self.api to the admin api.
self.api = api_fixture.admin_api
self.user_api = api_fixture.api
def assertFlavorDbEqual(self, flav, flavdb):
# a mapping of the REST params to the db fields
mapping = {
'name': 'name',
'disk': 'root_gb',
'ram': 'memory_mb',
'vcpus': 'vcpus',
'id': 'flavorid',
'swap': 'swap'
}
for k, v in six.iteritems(mapping):
if k in flav:
self.assertEqual(flav[k], flavdb[v],
"%s != %s" % (flav, flavdb))
def assertFlavorAPIEqual(self, flav, flavapi):
# for all keys in the flavor, ensure they are correctly set in
# flavapi response.
for k, v in six.iteritems(flav):
if k in flavapi:
self.assertEqual(flav[k], flavapi[k],
"%s != %s" % (flav, flavapi))
else:
self.fail("Missing key: %s in flavor: %s" % (k, flavapi))
def assertFlavorInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.assertEqual(flav['name'], item['name'])
return
self.fail("%s not found in %s" % (flav, flavlist))
def assertFlavorNotInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.fail("%s found in %s" % (flav, flavlist))
def test_flavor_manage_func_negative(self):
"""Test flavor manage edge conditions.
- Bogus body is a 400
- Unknown flavor is a 404
- Deleting unknown flavor is a 404
"""
# Test for various API failure conditions
# bad body is 400
resp = self.api.api_post('flavors', '', check_response_status=False)
self.assertEqual(400, resp.status)
# get unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
# delete unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
ctx = context.get_admin_context()
# bounds conditions - invalid vcpus
flav = {'flavor': rand_flavor(vcpus=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status, resp)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav['flavor']['id'])
# bounds conditions - invalid ram
flav = {'flavor': rand_flavor(ram=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav['flavor']['id'])
# NOTE(sdague): if there are other bounds conditions that
# should be checked, stack them up here.
def test_flavor_manage_deleted(self):
"""Ensure the behavior around a deleted flavor is stable.
- Fetching a deleted flavor works, and returns the flavor info.
- Listings should not contain deleted flavors
"""
# create a deleted flavor
new_flav = {'flavor': rand_flavor()}
self.api.api_post('flavors', new_flav)
self.api.api_delete('flavors/%s' % new_flav['flavor']['id'])
# It is valid to directly fetch details of a deleted flavor
resp = self.api.api_get('flavors/%s' % new_flav['flavor']['id'])
self.assertEqual(200, resp.status)
self.assertFlavorAPIEqual(new_flav['flavor'], resp.body['flavor'])
# deleted flavor should not show up in a list
resp = self.api.api_get('flavors')
self.assertFlavorNotInList(new_flav['flavor'], resp.body)
def test_flavor_manage_func(self):
"""Basic flavor creation lifecycle testing.
- Creating a flavor
- Ensure it's in the database
- Ensure it's in the listing
- Delete it
- Ensure it's hidden in the database
"""
ctx = context.get_admin_context()
flav1 = {
'flavor': rand_flavor(),
}
# Create flavor and ensure it made it to the database
self.api.api_post('flavors', flav1)
flav1db = db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
self.assertFlavorDbEqual(flav1['flavor'], flav1db)
# Ensure new flavor is seen in the listing
resp = self.api.api_get('flavors')
self.assertFlavorInList(flav1['flavor'], resp.body)
# Delete flavor and ensure it was removed from the database
self.api.api_delete('flavors/%s' % flav1['flavor']['id'])
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav1['flavor']['id'])
resp = self.api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(404, resp.status)
def test_flavor_manage_permissions(self):
"""Ensure that regular users can't create or delete flavors.
"""
ctx = context.get_admin_context()
flav1 = {'flavor': rand_flavor()}
# Ensure user can't create flavor
resp = self.user_api.api_post('flavors', flav1,
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and that it didn't leak through
self.assertRaises(ex.FlavorNotFound,
db.flavor_get_by_flavor_id,
ctx, flav1['flavor']['id'])
# Create the flavor as the admin user
self.api.api_post('flavors', flav1)
# Ensure user can't delete flavors from our cloud
resp = self.user_api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and ensure that we didn't actually delete the flavor,
# this will throw an exception if we did.
db.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
| |
from __future__ import absolute_import, division, print_function
import logging; _L = logging.getLogger('openaddr.cache')
import os
import errno
import math
import mimetypes
import shutil
import re
import csv
import simplejson as json
from os import mkdir
from hashlib import md5
from os.path import join, basename, exists, abspath, splitext
from urllib.parse import urlparse
from subprocess import check_output
from tempfile import mkstemp
from hashlib import sha1
from shutil import move
from shapely.geometry import shape
from esridump import EsriDumper
from esridump.errors import EsriDownloadError
import requests
# HTTP timeout in seconds, used in various calls to requests.get() and requests.post()
_http_timeout = 180
from .conform import X_FIELDNAME, Y_FIELDNAME, GEOM_FIELDNAME, attrib_types
from . import util
def mkdirsp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def traverse(item):
"Iterates over nested iterables"
if isinstance(item, list):
for i in item:
for j in traverse(i):
yield j
else:
yield item
def request(method, url, **kwargs):
if urlparse(url).scheme == 'ftp':
if method != 'GET':
raise NotImplementedError("Don't know how to {} with {}".format(method, url))
return util.request_ftp_file(url)
try:
_L.debug("Requesting %s with args %s", url, kwargs.get('params') or kwargs.get('data'))
return requests.request(method, url, timeout=_http_timeout, **kwargs)
except requests.exceptions.SSLError as e:
_L.warning("Retrying %s without SSL verification", url)
return requests.request(method, url, timeout=_http_timeout, verify=False, **kwargs)
class CacheResult:
cache = None
fingerprint = None
version = None
elapsed = None
def __init__(self, cache, fingerprint, version, elapsed):
self.cache = cache
self.fingerprint = fingerprint
self.version = version
self.elapsed = elapsed
@staticmethod
def empty():
return CacheResult(None, None, None, None)
def todict(self):
return dict(cache=self.cache, fingerprint=self.fingerprint, version=self.version)
def compare_cache_details(filepath, resultdir, data):
''' Compare cache file with known source data, return cache and fingerprint.
Checks if fresh data is already cached, returns a new file path if not.
'''
if not exists(filepath):
raise Exception('cached file {} is missing'.format(filepath))
fingerprint = md5()
with open(filepath, 'rb') as file:
for line in file:
fingerprint.update(line)
# Determine if anything needs to be done at all.
if urlparse(data.get('cache', '')).scheme == 'http' and 'fingerprint' in data:
if fingerprint.hexdigest() == data['fingerprint']:
return data['cache'], data['fingerprint']
cache_name = basename(filepath)
if not exists(resultdir):
mkdir(resultdir)
move(filepath, join(resultdir, cache_name))
data_cache = 'file://' + join(abspath(resultdir), cache_name)
return data_cache, fingerprint.hexdigest()
class DownloadError(Exception):
pass
class DownloadTask(object):
def __init__(self, source_prefix, params={}, headers={}):
'''
params: Additional query parameters, used by EsriRestDownloadTask.
headers: Additional HTTP headers.
'''
self.source_prefix = source_prefix
self.headers = {
'User-Agent': 'openaddresses-extract/1.0 (https://github.com/openaddresses/openaddresses)',
}
self.headers.update(dict(**headers))
self.query_params = dict(**params)
@classmethod
def from_protocol_string(clz, protocol_string, source_prefix=None):
if protocol_string.lower() == 'http':
return URLDownloadTask(source_prefix)
elif protocol_string.lower() == 'ftp':
return URLDownloadTask(source_prefix)
elif protocol_string.lower() == 'esri':
return EsriRestDownloadTask(source_prefix)
else:
raise KeyError("I don't know how to extract for protocol {}".format(protocol_string))
def download(self, source_urls, workdir, conform):
raise NotImplementedError()
def guess_url_file_extension(url):
''' Get a filename extension for a URL using various hints.
'''
scheme, _, path, _, query, _ = urlparse(url)
mimetypes.add_type('application/x-zip-compressed', '.zip', False)
mimetypes.add_type('application/vnd.geo+json', '.json', False)
_, likely_ext = os.path.splitext(path)
bad_extensions = '', '.cgi', '.php', '.aspx', '.asp', '.do'
if not query and likely_ext not in bad_extensions:
#
# Trust simple URLs without meaningless filename extensions.
#
_L.debug(u'URL says "{}" for {}'.format(likely_ext, url))
path_ext = likely_ext
else:
#
# Get a dictionary of headers and a few bytes of content from the URL.
#
if scheme in ('http', 'https'):
response = request('GET', url, stream=True)
content_chunk = next(response.iter_content(99))
headers = response.headers
response.close()
elif scheme in ('file', ''):
headers = dict()
with open(path) as file:
content_chunk = file.read(99)
else:
raise ValueError('Unknown scheme "{}": {}'.format(scheme, url))
path_ext = False
# Guess path extension from Content-Type header
if 'content-type' in headers:
content_type = headers['content-type'].split(';')[0]
_L.debug('Content-Type says "{}" for {}'.format(content_type, url))
path_ext = mimetypes.guess_extension(content_type, False)
#
# Uh-oh, see if Content-Disposition disagrees with Content-Type.
# Socrata recently started using Content-Disposition instead
# of normal response headers so it's no longer easy to identify
# file type.
#
if 'content-disposition' in headers:
pattern = r'attachment; filename=("?)(?P<filename>[^;]+)\1'
match = re.match(pattern, headers['content-disposition'], re.I)
if match:
_, attachment_ext = splitext(match.group('filename'))
if path_ext == attachment_ext:
_L.debug('Content-Disposition agrees: "{}"'.format(match.group('filename')))
else:
_L.debug('Content-Disposition disagrees: "{}"'.format(match.group('filename')))
path_ext = False
if not path_ext:
#
# Headers didn't clearly define a known extension.
# Instead, shell out to `file` to peek at the content.
#
mime_type = get_content_mimetype(content_chunk)
_L.debug('file says "{}" for {}'.format(mime_type, url))
path_ext = mimetypes.guess_extension(mime_type, False)
return path_ext
def get_content_mimetype(chunk):
''' Get a mime-type for a short length of file content.
'''
handle, file = mkstemp()
os.write(handle, chunk)
os.close(handle)
mime_type = check_output(('file', '--mime-type', '-b', file)).strip()
os.remove(file)
return mime_type.decode('utf-8')
class URLDownloadTask(DownloadTask):
CHUNK = 16 * 1024
def get_file_path(self, url, dir_path):
''' Return a local file path in a directory for a URL.
May need to fill in a filename extension based on HTTP Content-Type.
'''
scheme, host, path, _, _, _ = urlparse(url)
path_base, _ = os.path.splitext(path)
if self.source_prefix is None:
# With no source prefix like "us-ca-oakland" use the name as given.
name_base = os.path.basename(path_base)
else:
# With a source prefix, create a safe and unique filename with a hash.
hash = sha1((host + path_base).encode('utf-8'))
name_base = u'{}-{}'.format(self.source_prefix, hash.hexdigest()[:8])
path_ext = guess_url_file_extension(url)
_L.debug(u'Guessed {}{} for {}'.format(name_base, path_ext, url))
return os.path.join(dir_path, name_base + path_ext)
def download(self, source_urls, workdir, conform=None):
output_files = []
download_path = os.path.join(workdir, 'http')
mkdirsp(download_path)
for source_url in source_urls:
file_path = self.get_file_path(source_url, download_path)
# FIXME: For URLs with file:// scheme, simply copy the file
# to the expected location so that os.path.exists() returns True.
# Instead, implement a FileDownloadTask class?
scheme, _, path, _, _, _ = urlparse(source_url)
if scheme == 'file':
shutil.copy(path, file_path)
if os.path.exists(file_path):
output_files.append(file_path)
_L.debug("File exists %s", file_path)
continue
try:
resp = request('GET', source_url, headers=self.headers, stream=True)
except Exception as e:
raise DownloadError("Could not connect to URL", e)
if resp.status_code in range(400, 499):
raise DownloadError('{} response from {}'.format(resp.status_code, source_url))
size = 0
with open(file_path, 'wb') as fp:
for chunk in resp.iter_content(self.CHUNK):
size += len(chunk)
fp.write(chunk)
output_files.append(file_path)
_L.info("Downloaded %s bytes for file %s", size, file_path)
return output_files
class EsriRestDownloadTask(DownloadTask):
def get_file_path(self, url, dir_path):
''' Return a local file path in a directory for a URL.
'''
_, host, path, _, _, _ = urlparse(url)
hash, path_ext = sha1((host + path).encode('utf-8')), '.csv'
# With no source prefix like "us-ca-oakland" use the host as a hint.
name_base = '{}-{}'.format(self.source_prefix or host, hash.hexdigest()[:8])
_L.debug('Downloading {} to {}{}'.format(url, name_base, path_ext))
return os.path.join(dir_path, name_base + path_ext)
@classmethod
def fields_from_conform_function(cls, v):
fxn = v.get('function')
if fxn:
if fxn in ('join', 'format'):
return set(v['fields'])
elif fxn == 'chain':
fields = set()
user_vars = set([v['variable']])
for func in v['functions']:
if isinstance(func, dict) and 'function' in func:
fields |= cls.fields_from_conform_function(func) - user_vars
return fields
else:
return set([v.get('field')])
@classmethod
def field_names_to_request(cls, conform):
''' Return list of fieldnames to request based on conform, or None.
'''
if not conform:
return None
fields = set()
for k, v in conform.items():
if k in attrib_types:
if isinstance(v, dict):
# It's a function of some sort?
if 'function' in v:
fields |= cls.fields_from_conform_function(v)
elif isinstance(v, list):
# It's a list of field names
fields |= set(v)
else:
fields.add(v)
if fields:
return list(filter(None, sorted(fields)))
else:
return None
def download(self, source_urls, workdir, conform=None):
output_files = []
download_path = os.path.join(workdir, 'esri')
mkdirsp(download_path)
query_fields = EsriRestDownloadTask.field_names_to_request(conform)
for source_url in source_urls:
size = 0
file_path = self.get_file_path(source_url, download_path)
if os.path.exists(file_path):
output_files.append(file_path)
_L.debug("File exists %s", file_path)
continue
downloader = EsriDumper(source_url, parent_logger=_L, timeout=300)
metadata = downloader.get_metadata()
if query_fields is None:
field_names = [f['name'] for f in metadata['fields']]
else:
field_names = query_fields[:]
if X_FIELDNAME not in field_names:
field_names.append(X_FIELDNAME)
if Y_FIELDNAME not in field_names:
field_names.append(Y_FIELDNAME)
if GEOM_FIELDNAME not in field_names:
field_names.append(GEOM_FIELDNAME)
# Get the count of rows in the layer
try:
row_count = downloader.get_feature_count()
_L.info("Source has {} rows".format(row_count))
except EsriDownloadError:
_L.info("Source doesn't support count")
with open(file_path, 'w', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=field_names)
writer.writeheader()
for feature in downloader:
try:
geom = feature.get('geometry') or {}
row = feature.get('properties') or {}
if not geom:
raise TypeError("No geometry parsed")
if any((isinstance(g, float) and math.isnan(g)) for g in traverse(geom)):
raise TypeError("Geometry has NaN coordinates")
shp = shape(feature['geometry'])
row[GEOM_FIELDNAME] = shp.wkt
try:
centroid = shp.centroid
except RuntimeError as e:
if 'Invalid number of points in LinearRing found' not in str(e):
raise
xmin, xmax, ymin, ymax = shp.bounds
row[X_FIELDNAME] = round(xmin/2 + xmax/2, 7)
row[Y_FIELDNAME] = round(ymin/2 + ymax/2, 7)
else:
if centroid.is_empty:
raise TypeError(json.dumps(feature['geometry']))
row[X_FIELDNAME] = round(centroid.x, 7)
row[Y_FIELDNAME] = round(centroid.y, 7)
writer.writerow({fn: row.get(fn) for fn in field_names})
size += 1
except TypeError:
_L.debug("Skipping a geometry", exc_info=True)
_L.info("Downloaded %s ESRI features for file %s", size, file_path)
output_files.append(file_path)
return output_files
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class transformprofile(base_resource) :
""" Configuration for URL Transformation profile resource. """
def __init__(self) :
self._name = ""
self._type = ""
self._onlytransformabsurlinbody = ""
self._comment = ""
self._regexforfindingurlinjavascript = ""
self._regexforfindingurlincss = ""
self._regexforfindingurlinxcomponent = ""
self._regexforfindingurlinxml = ""
self._additionalreqheaderslist = ""
self._additionalrespheaderslist = ""
self.___count = 0
@property
def name(self) :
"""Name for the URL transformation profile. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the URL transformation profile is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, ^A"my transform profile^A" or ^A'my transform profile^A').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the URL transformation profile. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the URL transformation profile is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, ^A"my transform profile^A" or ^A'my transform profile^A').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def type(self) :
"""Type of transformation. Always URL for URL Transformation profiles.<br/>Possible values = URL.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Type of transformation. Always URL for URL Transformation profiles.<br/>Possible values = URL
"""
try :
self._type = type
except Exception as e:
raise e
@property
def onlytransformabsurlinbody(self) :
"""In the HTTP body, transform only absolute URLs. Relative URLs are ignored.<br/>Possible values = ON, OFF.
"""
try :
return self._onlytransformabsurlinbody
except Exception as e:
raise e
@onlytransformabsurlinbody.setter
def onlytransformabsurlinbody(self, onlytransformabsurlinbody) :
"""In the HTTP body, transform only absolute URLs. Relative URLs are ignored.<br/>Possible values = ON, OFF
"""
try :
self._onlytransformabsurlinbody = onlytransformabsurlinbody
except Exception as e:
raise e
@property
def comment(self) :
"""Any comments to preserve information about this URL Transformation profile.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Any comments to preserve information about this URL Transformation profile.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def regexforfindingurlinjavascript(self) :
"""Patclass having regexes to find the URLs in JavaScript.
"""
try :
return self._regexforfindingurlinjavascript
except Exception as e:
raise e
@property
def regexforfindingurlincss(self) :
"""Patclass having regexes to find the URLs in CSS.
"""
try :
return self._regexforfindingurlincss
except Exception as e:
raise e
@property
def regexforfindingurlinxcomponent(self) :
"""Patclass having regexes to find the URLs in X-Component.
"""
try :
return self._regexforfindingurlinxcomponent
except Exception as e:
raise e
@property
def regexforfindingurlinxml(self) :
"""Patclass having regexes to find the URLs in XML.
"""
try :
return self._regexforfindingurlinxml
except Exception as e:
raise e
@property
def additionalreqheaderslist(self) :
"""Patclass having a list of additional request header names that should transformed.
"""
try :
return self._additionalreqheaderslist
except Exception as e:
raise e
@property
def additionalrespheaderslist(self) :
"""Patclass having a list of additional response header names that should transformed.
"""
try :
return self._additionalrespheaderslist
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(transformprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.transformprofile
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add transformprofile.
"""
try :
if type(resource) is not list :
addresource = transformprofile()
addresource.name = resource.name
addresource.type = resource.type
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].type = resource[i].type
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete transformprofile.
"""
try :
if type(resource) is not list :
deleteresource = transformprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update transformprofile.
"""
try :
if type(resource) is not list :
updateresource = transformprofile()
updateresource.name = resource.name
updateresource.type = resource.type
updateresource.onlytransformabsurlinbody = resource.onlytransformabsurlinbody
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].type = resource[i].type
updateresources[i].onlytransformabsurlinbody = resource[i].onlytransformabsurlinbody
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of transformprofile resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = transformprofile()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ transformprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the transformprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = transformprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = transformprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [transformprofile() for _ in range(len(name))]
obj = [transformprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = transformprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of transformprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = transformprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the transformprofile resources configured on NetScaler.
"""
try :
obj = transformprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of transformprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = transformprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Onlytransformabsurlinbody:
ON = "ON"
OFF = "OFF"
class Type:
URL = "URL"
class transformprofile_response(base_response) :
def __init__(self, length=1) :
self.transformprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.transformprofile = [transformprofile() for _ in range(length)]
| |
#!/usr/bin/env python
# cmpcodesize/main.py - Command-line entry point for cmpcodesize -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
import argparse
import collections
import glob
import os
import sys
from cmpcodesize.compare import \
compare_function_sizes, compare_sizes_of_file, list_function_sizes,\
read_sizes
SHORTCUTS = {
"O": "bin/Benchmark_O",
"Ounchecked": "bin/Benchmark_Ounchecked",
"Onone": "bin/Benchmark_Onone",
"dylib": "lib/swift/macosx/x86_64/libswiftCore.dylib",
}
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Compares code sizes of "new" files, taking "old" files as a reference.
Environment variables:
SWIFT_NEW_BUILDDIR The new build-dir
E.g. .../swiftnew/build/Ninja-ReleaseAssert+stdlib-Release/swift-macosx-x86_64
SWIFT_OLD_BUILDDIR The old build-dir
E.g. .../swiftold/build/Ninja-ReleaseAssert+stdlib-Release/swift-macosx-x86_64
How to specify files:
1) No files:
Compares codesize of the Benchmark_* executables and the swiftCore dylib in
the new and old build-dirs.
Example:
cmpcodesize
2) One or more paths relative to the build-dirs (can be a pattern):
Compares the files in the new and old build-dirs.
Aliases:
O => bin/Benchmark_O
Ounchecked => bin/Benchmark_Ounchecked
Onone => bin/Benchmark_Onone
dylib => lib/swift/macosx/x86_64/libswiftCore.dylib
Examples:
cmpcodesize Onone
cmpcodesize benchmark/PerfTestSuite/O/*.o
3) Two files:
Compares these two files (the first is the old file).
Example:
cmpcodesize test.o newversion.o
4) Two lists of files, separated by '--':
Compares a set of files.
Example:
cmpcodesize olddir/*.o -- newdir/*.o
5) One file (only available with the -l option):
Lists function sizes for that file
Example:
cmpcodesize -l test.o""")
# Optional arguments.
parser.add_argument('-a', '--additional-sections',
help='Show sizes of additional sections.',
action='store_true',
dest='all_sections',
default=False)
parser.add_argument('-c', '--category',
help='Show functions by category.',
action='store_true',
dest='list_categories',
default=False)
parser.add_argument('-l', '--list',
help='List all functions (can be a very long list). ' +
'Cannot be used in conjunction with ' +
'--additional-sections or --category. ' +
'You must specify between one and two files ' +
'when using this option.',
action='store_true',
dest='list_functions',
default=False)
parser.add_argument('-s', '--summarize',
help='Summarize the sizes of multiple files instead ' +
'of listing each file separately.',
action='store_true',
dest='sum_sizes',
default=False)
# Positional arguments.
# These can be specified in means beyond what argparse supports,
# so we gather them in a list and parse them manually.
parser.add_argument('files', nargs='*',
help='A list of old and new files.')
# argparse can't handle an '--' argument, so we replace it with
# a custom identifier.
separator_token = '*-*-*'
parsed_arguments = parser.parse_args(
[separator_token if arg == '--' else arg for arg in sys.argv[1:]])
if parsed_arguments.list_functions:
# --list is mutually exclusive with both --additional-sections
# and --category. argparse is only capable of expressing mutual
# exclusivity among options, not among groups of options, so
# we detect this case manually.
assert (not parsed_arguments.all_sections and
not parsed_arguments.list_categories), \
'Incorrect usage: --list cannot be specified in conjunction ' + \
'with --additional-sections or --category.'
# A file must be specified when using --list.
assert parsed_arguments.files, \
'Incorrect usage: Must specify between one and two files when ' + \
'using --list, but you specified no files.'
if separator_token in parsed_arguments.files:
separator_index = parsed_arguments.files.index(separator_token)
old_files = parsed_arguments.files[:separator_index]
new_files = parsed_arguments.files[separator_index + 1:]
else:
old_file_args = parsed_arguments.files
old_build_dir = os.environ.get("SWIFT_OLD_BUILDDIR")
new_build_dir = os.environ.get("SWIFT_NEW_BUILDDIR")
if not parsed_arguments.files:
assert old_build_dir and new_build_dir, \
'Incorrect usage: You must specify either a list of ' + \
'files, or have both $SWIFT_OLD_BUILDDIR and ' + \
'$SWIFT_NEW_BUILDDIR environment variables set.\n' + \
'$SWIFT_OLD_BUILDDIR = {0}\n$SWIFT_NEW_BUILDDIR = {1}'.format(
old_build_dir, new_build_dir)
old_file_args = list(SHORTCUTS.keys())
old_files = []
new_files = []
num_expanded = 0
for file in old_file_args:
if file in SHORTCUTS:
file = SHORTCUTS[file]
if not file.startswith("./") and old_build_dir and new_build_dir:
old_expanded = glob.glob(os.path.join(old_build_dir, file))
new_expanded = glob.glob(os.path.join(new_build_dir, file))
if old_expanded and new_expanded:
old_files.extend(old_expanded)
new_files.extend(new_expanded)
num_expanded += 1
if num_expanded != 0 and num_expanded != len(old_file_args):
sys.exit("mix of expanded/not-expanded arguments")
if num_expanded == 0:
if len(old_file_args) > 2:
sys.exit("too many arguments")
old_files = old_file_args[0:1]
new_files = old_file_args[1:2]
for file in (old_files + new_files):
if not os.path.isfile(file):
sys.exit("file " + file + " not found")
if parsed_arguments.list_functions:
if not new_files:
sizes = collections.defaultdict(int)
for file in old_files:
read_sizes(sizes, file, True, False)
print(os.linesep.join(list_function_sizes(sizes.items())))
else:
compare_function_sizes(old_files, new_files)
else:
print("%-26s%16s %14s %14s %s" %
("", "Section", "Old", "New", "Percent"))
if parsed_arguments.sum_sizes:
compare_sizes_of_file(old_files, new_files,
parsed_arguments.all_sections,
parsed_arguments.list_categories)
else:
if len(old_files) != len(new_files):
sys.exit("number of new files must be the same of old files")
old_files.sort()
new_files.sort()
for old_file, new_file in zip(old_files, new_files):
compare_sizes_of_file([old_file], [new_file],
parsed_arguments.all_sections,
parsed_arguments.list_categories)
if __name__ == '__main__':
main()
| |
import sys
from evoplotter import utils
from evoplotter import plotter
from evoplotter import printer
from evoplotter import reporting
from evoplotter.dims import *
import numpy
import argparse
options = argparse.ArgumentParser(description="Simple parser for the smtgp results.", add_help=True)
options.add_argument("dirs", type=str, nargs="*", default=None,
help="Name of the file containing ")
options.add_argument("-f", "--file", type=str, default=None,
help="Name of the file containing ")
##################################################
# MAIN
##################################################
# Checking if the number of arguments is correct.
if len(sys.argv) == 1:
print("No results directory was specified!")
exit()
env = options.parse_args()
if env.file is None:
folders = env.dirs
else:
folders = [L.strip() for L in utils.read_lines(env.file)]
props = utils.load_properties_dirs(folders)
# for p in props[:3]:
# print(str(p))
def p_correctness(p):
return "result.best.eval" in p and \
"result.best.isOptimal" in p and \
"smtgp.exception.stacktrace" not in p
props = [p for p in props if p_correctness(p)]
def p_onlyWithHoles(p):
return "smtgp.holesConsts" in p or \
"smtgp.holesVars" in p
def p_optBisecting(p):
return p["smtgp.optimizationMode"] == "bisecting"
def p_optSolver(p):
return p["smtgp.optimizationMode"] == "solver"
def p_cv_timed(p):
return p["smtgp.useConstantProvider"] == "true" and \
p["smtgp.useInputVarsAsTerminals"] == "true" and \
"smtgp.holesConsts" not in p and \
"smtgp.holesVars" not in p and \
"maxTime" in p and \
p["populationSize"] == "250"
def p_cv(p):
return p["smtgp.useConstantProvider"] == "true" and \
p["smtgp.useInputVarsAsTerminals"] == "true" and \
"smtgp.holesConsts" not in p and \
"smtgp.holesVars" not in p and \
"maxTime" not in p and \
p["populationSize"] == "250"
def p_cv5000(p):
return p["smtgp.useConstantProvider"] == "true" and \
p["smtgp.useInputVarsAsTerminals"] == "true" and \
"smtgp.holesConsts" not in p and \
"smtgp.holesVars" not in p and \
"maxTime" not in p and \
p["populationSize"] == "5000"
def p_cV(p):
return p["smtgp.useConstantProvider"] == "true" and \
p["smtgp.useInputVarsAsTerminals"] == "false" and \
"smtgp.holesConsts" not in p and \
"smtgp.holesVars" in p
def p_Cv(p):
return p["smtgp.useConstantProvider"] == "false" and \
p["smtgp.useInputVarsAsTerminals"] == "true" and \
"smtgp.holesConsts" in p and \
"smtgp.holesVars" not in p
def p_CV(p):
return p["smtgp.useConstantProvider"] == "false" and \
p["smtgp.useInputVarsAsTerminals"] == "false" and \
"smtgp.holesConsts" in p and \
"smtgp.holesVars" in p
def p_fill(p):
return "smtgp.fillHoles" in p and p["smtgp.fillHoles"] == "true"
def p_notFill(p):
return "smtgp.fillHoles" in p and p["smtgp.fillHoles"] == "false"
def p_bench_keijzer12(p):
return p["smtgp.pathTests"] == "data/int/keijzer12.csv"
def p_bench_koza1(p):
return p["smtgp.pathTests"] == "data/int/koza1.csv"
def p_bench_koza1_p(p):
return p["smtgp.pathTests"] == "data/int/koza1-p.csv"
def p_bench_koza1_d2(p):
return p["smtgp.pathTests"] == "data/int/koza1-2d.csv"
def p_bench_koza1_p_d2(p):
return p["smtgp.pathTests"] == "data/int/koza1-p-2d.csv"
def p_optimal(p):
return p["result.best.isOptimal"] == "1"
plotter.set_latex(True)
dim_benchmark = Dim([Config(r"\texttt{Keijzer12}", p_bench_keijzer12, 49),
Config(r"\texttt{Koza1}", p_bench_koza1, 11),
Config(r"\texttt{Koza1-p}", p_bench_koza1_p, 11),
Config(r"\texttt{Koza1-2D}", p_bench_koza1_d2, 49),
Config(r"\texttt{Koza1-p-2D}", p_bench_koza1_p_d2, 49)])
conf_cv = Config(r"$GP$", p_cv)
conf_cv_timed = Config(r"$GP_T$", p_cv_timed)
conf_cv_5000 = Config(r"$GP_{5000}$", p_cv5000)
dim_fill = Dim([Config(r"$EPS$-$L", p_fill),
Config(r"$EPS$-$B", p_notFill)])
dim_usedHoles_ho = Dim([Config( "c$", p_Cv),
Config( "v$", p_cV),
Config( "{cv}$", p_CV)])
dim_optMode = Dim([Config("optSolver", p_optSolver),
Config("optBisecting", p_optBisecting)])
def get_num_optimal(props):
props2 = [p for p in props if p["result.best.isOptimal"] == "1"]
return len(props2)
def get_stats_fitness(props):
fits = [float(vp["result.best.eval"]) for vp in props]
if len(fits) == 0:
return -1.0, -1.0
else:
return numpy.mean(fits), numpy.std(fits)
def get_stats_duration(props):
fits = [float(vp["result.totalTimeSystem"]) / 1000 for vp in props]
if len(fits) == 0:
return -1.0, -1.0
else:
return numpy.mean(fits), numpy.std(fits)
def get_gens_of_best(props):
return [p["result.best.generation"] for p in props if p["result.best.isOptimal"] == "1"]
def get_sum(props, key):
return sum([float(p[key]) for p in props])
def round_tuple_str(tup):
return ("(%0.2f" % tup[0]) + ", " + ("%0.2f" % tup[1]) + ")"
def print_stats_filtered(filtered, show_solver_stats = True):
"""Prints run statistics for a provided list of results."""
print("number of results: " + str(len(filtered)))
if len(filtered) != 0:
num_opt = get_num_optimal(filtered)
print("optimal gen nums: " + str(get_gens_of_best(filtered)))
print("optimal: " + str(num_opt))
print("success rate: " + str(round(float(num_opt) / float(len(filtered)), 3)))
print("(avg, std) fitness of bestOfRun: " + round_tuple_str(get_stats_fitness(filtered)))
print("(avg, std) totalTimeSystem [s]: " + round_tuple_str(get_stats_duration(filtered)))
print("")
if show_solver_stats:
evalSolver = get_sum(filtered, "result.stats.evaluatedSolver")
evalSolverUnknown = get_sum(filtered, "result.stats.evaluatedSolverUnknown")
evalSolverTimeout = get_sum(filtered, "result.stats.evaluatedSolverTimeout")
percentUnsuccessful = None
if evalSolver > 0:
percentUnsuccessful = '%.4f' % (float(evalSolverTimeout + evalSolverUnknown) / float(evalSolver))
print("evalSolverTotal: " + str(evalSolver))
print("evalSolverUnknown: " + str(evalSolverUnknown))
print("evalSolverTimeout: " + str(evalSolverTimeout))
print("ratio unsuccessful: " + str(percentUnsuccessful))
def print_table(props, dim_rows, dim_cols):
def fun0(filtered):
return str(len(filtered))
textStatus = printer.latex_table(props, dim_rows, dim_cols, fun0, latexize_underscores=False)
textStatus = printer.table_color_map(textStatus, 0.0, 50.0, 100.0)
print(textStatus)
print("\n\n")
def fun1(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_optimal(filtered)
# return "{0}/{1}".format(str(num_opt), str(len(filtered)))
return "{0}".format(str(num_opt))
textNumOptimal = printer.latex_table(props, dim_rows, dim_cols, fun1, latexize_underscores=False)
textNumOptimal = printer.table_color_map(textNumOptimal, 0.0, 50.0, 100.0)
print(textNumOptimal)
print("\n\n")
def fun2(filtered):
if len(filtered) == 0:
return "-"
avgFit = round(get_stats_fitness(filtered)[0], 2)
return "{0}".format(str(avgFit))
textAvgFitness = printer.latex_table(props, dim_rows, dim_cols, fun2, latexize_underscores=False)
textAvgFitness = printer.table_color_map(textAvgFitness, 0.0, 25.0, 50.0)
print(textAvgFitness)
print("\n\n")
def fun3(filtered):
if len(filtered) == 0:
return "-"
avg_time = round(get_stats_duration(filtered)[0], 1)
return "{0}".format(str(avg_time))
textAvgRuntime = printer.latex_table(props, dim_rows, dim_cols, fun3, latexize_underscores=False)
textAvgRuntime = printer.table_color_map(textAvgRuntime, 0.0, 1000, 10000)
print(textAvgRuntime)
print("\n\n")
def fun4(filtered):
if len(filtered) == 0:
return "-"
evalSolver = get_sum(filtered, "result.stats.evaluatedSolver")
evalSolverUnknown = get_sum(filtered, "result.stats.evaluatedSolverUnknown")
evalSolverTimeout = get_sum(filtered, "result.stats.evaluatedSolverTimeout")
if evalSolver > 0:
percentUnsuccessful = float(evalSolverTimeout + evalSolverUnknown) / float(evalSolver)
return str(round(percentUnsuccessful,3))
else:
return "-"
textRatioOfUnknowns = printer.latex_table(props, dim_rows, dim_cols, fun4, latexize_underscores=False)
textRatioOfUnknowns = printer.table_color_map(textRatioOfUnknowns, 0.0, 0.3, 0.6)
print(textRatioOfUnknowns)
print("\n\n")
report = reporting.ReportPDF()
section1 = reporting.Section("Experiments", [])
subsects = [("Status (correctly finished processes)", textStatus, reversed(reporting.color_scheme_red)),
("Number of optimal solutions (max=100)", textNumOptimal, reporting.color_scheme_green),
("Average fitness", textAvgFitness, reporting.color_scheme_green),
("Average runtime", textAvgRuntime, reporting.color_scheme_blue),
("Ratio of unknowns", textRatioOfUnknowns, reporting.color_scheme_yellow)]
for title, table, cs in subsects:
if isinstance(cs, reporting.ColorScheme3):
cs = cs.toBlockLatex()
sub = reporting.Subsection(title, [cs, reporting.BlockLatex(table + "\n")])
section1.add(sub)
report.add(section1)
report.save_and_compile("eps_results.tex")
def print_stats_unsuccesful(filtered):
evalSolver = get_sum(filtered, "result.stats.evaluatedSolver")
evalSolverUnsat = get_sum(filtered, "result.stats.evaluatedSolverUnsat")
evalSolverUnknown = get_sum(filtered, "result.stats.evaluatedSolverUnknown")
evalSolverTimeout = get_sum(filtered, "result.stats.evaluatedSolverTimeout")
percentUnsuccessful = None
if evalSolver > 0:
percentUnsuccessful = '%.4f' % (float(evalSolverTimeout + evalSolverUnknown + evalSolverUnsat) / float(evalSolver))
text = "numRuns: " + str(len(filtered)) + "\n"
text += "evalSolverTotal: " + str(evalSolver) + "\n"
text += "evalSolverUnsat: " + str(evalSolverUnsat) + "\n"
text += "evalSolverUnknown: " + str(evalSolverUnknown) + "\n"
text += "evalSolverTimeout: " + str(evalSolverTimeout) + "\n"
text += "% unsuccessful: " + str(percentUnsuccessful) + "\n"
text += "(avg, std) fitness of bestOfRun: " + ('(%.2f, %.2f)' % get_stats_fitness(filtered)) + "\n"
text += "(avg, std) totalTimeSystem [s]: " + ('(%.1f, %.1f)' % get_stats_duration(filtered)) + "\n"
return text
def print_stats(props, dim):
"""Loops over all possible configs and filters results depending on the configs predicates. For each config file standard run stats will be printed."""
for config in dim:
filtered = config.filter_props(props)
str_vname = config.get_caption()
print("(*) VARIANT: " + str_vname)
print_stats_filtered(filtered)
print("\n\n\n\n")
def print_opt_mode_stats(props):
"""Collects aggregated statistics about unsuccessful solver evaluations."""
props = [p for p in props if p_onlyWithHoles(p)]
text = printer.text_listing(props, dim_optMode, print_stats_unsuccesful)
print(text)
def search_differing_evals(props):
"""Searching for runs with differing result.best.eval and result.best.evalNormally."""
for p in props:
eval = int(p["result.best.eval"])
evalNormally = int(p["result.best.evalNormally"])
if eval != evalNormally:
sol = p["result.best"]
sol_fill = p["result.best.holesFilled"]
print("File: " + p["thisFileName"])
if "smtgp.holesConsts" in p and "smtgp.holesVars" in p:
print("variant: CV")
elif "smtgp.holesConsts" in p and "smtgp.holesVars" not in p:
print("variant: Cv")
elif "smtgp.holesConsts" not in p and "smtgp.holesVars" in p:
print("variant: cV")
else:
print("variant: cv")
if p["smtgp.fillHoles"] == "true":
print("fillHoles: 1")
else:
print("fillHoles: 0")
if p_optSolver(p):
print("optSolver")
else:
print("optBisecting")
print("solution: ".ljust(26) + sol)
print("solution filled holes: ".ljust(26) + sol_fill)
print("eval: " + str(eval))
print("evalNormally: " + str(evalNormally))
print("\n\n")
def print_optimals(props):
props = [p for p in props if p_optimal(p)]
dim = Dim(conf_cv_timed)
dim += dim_usedHoles_ho * dim_fill
def print_optimal(p):
opt = p["result.best"]
optFilled = p["result.best.holesFilled"]
return "Found optimal:\t" + optFilled + "\n"
text = printer.text_listing(props, dim, print_optimal)
print(text)
utils.save_to_file("figures/optimals.txt", text)
def print_optimals_per_benchmark(props):
props = [p for p in props if p_optimal(p)]
dim_variants = Dim([conf_cv, conf_cv_timed, conf_cv_5000]) + dim_usedHoles_ho * dim_fill
dim = dim_benchmark * dim_variants
def print_optimal(p):
optFilled = p["result.best.holesFilled"]
return optFilled + "\n"
text = printer.text_listing(props, dim, print_optimal, is_fun_single_prop=True)
print(text)
utils.save_to_file("figures/optimals.txt", text)
def draw_boxplots(props):
dim_variants = Dim([conf_cv, conf_cv_timed, conf_cv_5000])
dim_variants += dim_fill * dim_usedHoles_ho
plotter.compare_fitness_on_benchmarks(props, dim_benchmark, dim_variants, use_latex=True)
def draw_fitness_progression(props, plot_individual_runs=True):
dim_variants = Dim(conf_cv)
dim_variants += dim_fill * dim_usedHoles_ho
plotter.plot_value_progression_grid(props, dim_benchmark, dim_variants, plot_individual_runs=plot_individual_runs)
props = [p for p in props if p_optSolver(p) or p_cv(p)] #p_cv added because by mistake they have set 'bisecting' flag.
# dim = dim_benchmark * dim_usedHoles_ho * dim_fill
# print_stats(props, dim)
# Printing a table with results.
dim_variants = Dim([conf_cv, conf_cv_timed, conf_cv_5000])
dim_variants += dim_fill * dim_usedHoles_ho
# print_table(props, dim_variants, dim_benchmark)
print_table(props, dim_benchmark, dim_variants)
# print_opt_mode_stats(props)
# print_optimals(props)
# print_optimals_per_benchmark(props)
# search_differing_evals(props)
draw_boxplots(props)
# draw_fitness_progression(props, plot_individual_runs=True)
# draw_fitness_progression(props, plot_individual_runs=False)
| |
import numpy as np
import matplotlib.pyplot as plt
from plotstuff import colours
cols = colours()
plotpar = {'axes.labelsize': 16,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': True}
plt.rcParams.update(plotpar)
def make_plot(data, b):
"""
Scatter plots of recovered period vs injected periods, coloured by
r-magnitude, teff and amplitude.
"""
pers, periods, log_amps, teffs, rmags, amps, noises_ppm = data
xs = np.linspace(0, max(pers))
plt.clf()
plt.plot(xs, xs, ":", color="k")
plt.scatter(pers, periods, c=teffs, vmin=3000, vmax=8000,
edgecolor="face", cmap="BuPu", s=8)
plt.colorbar(label="$\mathrm{T}_{\mathrm{eff}}~\mathrm{(K)}$")
plt.ylabel("$\mathrm{Measured~Period~(Days)}$")
plt.xlabel("$\mathrm{Injected~Period~(Days)}$")
plt.xlim(0, max(pers))
plt.ylim(0, max(periods))
plt.savefig("pvp_T_{0}.pdf".format(b))
plt.clf()
plt.plot(xs, xs, ":", color="k")
plt.scatter(pers, periods, c=rmags,
edgecolor="face", cmap="GnBu_r", s=8)
plt.colorbar(label="$\mathrm{r-band~magnitude}$")
plt.ylabel("$\mathrm{Measured~Period~(Days)}$")
plt.xlabel("$\mathrm{Injected~Period~(Days)}$")
plt.xlim(0, max(pers))
plt.ylim(0, max(periods))
plt.savefig("pvp_r_{0}.pdf".format(b))
plt.clf()
plt.plot(xs, xs, ":", color="k")
plt.scatter(pers, periods, c=log_amps,
edgecolor="face", cmap="PuRd", s=8)
plt.colorbar(label="$\log\mathrm{(Amplitude)~(ppt)}$")
plt.ylabel("$\mathrm{Measured~Period~(Days)}$")
plt.xlabel("$\mathrm{Injected~Period~(Days)}$")
plt.xlim(0, max(pers))
plt.ylim(0, max(periods))
plt.savefig("pvp_a_{0}.pdf".format(b))
def recovered(data, f):
"""
Take all data and return just the successfully recovered stuff
data: 2d array of pers, periods, log_amps, teffs, rmags, amps, noises_ppm
f: tolerance, e.g. 0.1.
Returns 2d array of just successfully recovered stars.
"""
pers, periods, log_amps, teffs, rmags, amps, noises_ppm = data
m = (periods < pers + f*pers) * (pers - f*pers < periods)
return np.vstack((pers[m], periods[m], log_amps[m], teffs[m], rmags[m],
amps[m], noises_ppm[m]))
def find_fraction(X, X_r, bins):
# Fraction recovered as a function of X
if bins is None:
true_hist, bins = np.histogram(X)
measured_hist, _ = np.histogram(X_r, bins)
else:
true_hist, _ = np.histogram(X, bins)
measured_hist, _ = np.histogram(X_r, bins)
th = np.array([float(i) for i in true_hist])
mh = np.array([float(i) for i in measured_hist])
return mh/th*100, bins # percent
def percents(X, X_r, teffs, teffs_r):
# total --- all stars
percent, bins = find_fraction(X, X_r, None)
# now for different temperatures
mf = lambda Xmin, Xmax, t: (Xmin < t) * (t < Xmax)
Gm, Km, Mm, Fm = mf(Gmin, Gmax, teffs), mf(Kmin, Kmax, teffs), \
mf(Mmin, Mmax, teffs), mf(Fmin, Fmax, teffs)
Gmr, Kmr, Mmr, Fmr = mf(Gmin, Gmax, teffs_r), mf(Kmin, Kmax, teffs_r), \
mf(Mmin, Mmax, teffs_r), mf(Fmin, Fmax, teffs_r)
Gpercent, Gbins = find_fraction(X[Gm], X_r[Gmr], bins)
Kpercent, Kbins = find_fraction(X[Km], X_r[Kmr], bins)
Mpercent, Mbins = find_fraction(X[Mm], X_r[Mmr], bins)
Fpercent, Fbins = find_fraction(X[Fm], X_r[Fmr], bins)
return Gpercent, Kpercent, Mpercent, Fpercent, bins
def summary_plot(data, b, f):
"""
Make plots of completeness vs rotation period.
Completeness vs amplitude.
Completeness vs r-mag.
Completeness vs teff.
"""
# load data
pers, periods, log_amps, teffs, rmags, amps, noises_ppm = data
# find the arrays of successful recoveries
pers_r, periods_r, log_amps_r, teffs_r, rmags_r, amps_r, noises_ppm_r = \
recovered(data, f)
print("\n", len(pers), "injected", len(pers_r),
"recovered, {0:.2f}%".format(float(len(pers_r)) /
float(len(pers))*100), "\n")
# Calculate the percentage recovery as a function of period for the
# different spectral types
Gpercent, Kpercent, Mpercent, Fpercent, bins = percents(pers, pers_r,
teffs, teffs_r)
Gfrac, Kfrac, Mfrac = 0.2426, 0.5429, 0.8339
# Gfrac, Kfrac, Mfrac = 1, 1, 1
# make the plot
plt.clf()
# plt.step(bins[:-1], Fpercent, lw=2, color="MediumPurple",
# label="$\mathrm{F~dwarfs}$")
plt.step(bins[:-1], Gpercent*Gfrac, lw=2, color="CornflowerBlue",
label="$\mathrm{G~dwarfs}$")
plt.step(bins[:-1], Kpercent*Kfrac, lw=2, color="LimeGreen",
label="$\mathrm{K~dwarfs}$")
plt.step(bins[:-1], Mpercent*Mfrac, lw=2, color="DarkOrange",
label="$\mathrm{M~dwarfs}$")
plt.xlim(0, bins[-2])
plt.xlabel("$\mathrm{Injected~Rotation~Period~(Days)}$")
plt.ylabel("$\mathrm{Percentage~Successfully~Recovered}$")
plt.ylim(0, 100)
plt.legend(loc="best")
plt.savefig("recovered_hist_{0}.pdf".format(b))
def trilegal(data, b):
"""
Make histograms of trilegal outputs.
Histograms of Rotation period for different masses
"""
# load data
pers, periods, log_amps, teffs, rmags, amps, noises_ppm = data
mf = lambda Xmin, Xmax, t: (Xmin < t) * (t < Xmax)
m = (teffs < 7000) * (2000 < teffs)
plt.clf()
plt.hist(teffs[m], color="w", histtype="stepfilled")
plt.xlabel("$\mathrm{Effective~Temperature~(K)}$")
plt.ylabel("$\mathrm{Number~out~of~20,0000}$")
plt.savefig("trilegal_teff_hist{0}.pdf".format(b))
plt.clf()
m = (teffs < 7000) * (2000 < teffs) * pers > 0
bins = plt.hist(pers[m], 10, color="w", histtype="stepfilled")[1]
phist = np.histogram(pers[m], bins=bins)[0]
plt.hist(pers[m], bins=bins, color="w", histtype="stepfilled")
rec = recovered(data, .1)
l = (rec[3] < 7000) * (2000 < rec[3]) * rec[0] > 0
plt.hist(rec[0][l], bins=bins, color="w", edgecolor="CornFlowerBlue",
histtype="stepfilled", lw=2)
plt.xlabel("$\mathrm{Injected~Rotation~Period~(Days)}$")
plt.ylabel("$\mathrm{Number~out~of~20,0000}$")
plt.savefig("trilegal_period_hist{0}.pdf".format(b))
# make fraction recovered histogram
nbins = 10
m = (teffs < 7000) * (2000 < teffs) * pers > 0
rec = recovered(data, .1)
l = (rec[3] < 7000) * (2000 < rec[3]) * rec[0] > 0
# make histograms
inj, ibins = np.histogram(pers[m], nbins)
reco, rbins = np.histogram(rec[0][l], bins=ibins)
frac = np.zeros(len(ibins))
frac[:-1] = reco/inj
kepler_frac = 0.328
plt.clf()
plt.step(ibins, frac*kepler_frac, color="CornFlowerBlue", lw=2)
# plt.xlim(0, 70)
# plt.ylim(0, 1)
plt.xlabel("$\mathrm{Injected~Rotation~Period~(Days)}$")
plt.ylabel("$\mathrm{Fraction~Recovered}$")
# load Amy's data
data = np.genfromtxt("Table_1_Periodic.txt", skip_header=1,
delimiter=",").T
m = (2000 < data[1]) * (data[1] < 7000)
amy_p = data[4][m]
amy, abins = np.histogram(amy_p, bins=ibins)
amy_tot = 103693. / nbins
# plt.step(ibins[:-1], amy*(phist/float(max(phist))), "k")
# plt.step(ibins[:-1], amy/amy_tot/phist, "k")
phist = np.array([float(i) for i in phist])
print(amy, phist/float(max(phist)))
# plt.step(ibins[:-1], amy*phist, "k")
# plt.step(bins[:-1], phist/float(max(phist)), "r")
# plt.step(ibins[:-1], amy/amy_tot * phist/float(max(phist)), "k")
plt.ylim(0, .5)
plt.savefig("trilegal_fraction_amy_hist{0}.pdf".format(b))
if __name__ == "__main__":
Fmin, Fmax = 6000, 7000
Gmin, Gmax = 5200, 6000
Kmin, Kmax = 3700, 5200
Mmin, Mmax = 2400, 3700
yr = 10
b = -10
# load data file
d = np.genfromtxt("results/{0}yr_resultsl45b{1}.txt".format(yr, b)).T
m = d[0] > 0 # remove negative injected periods
data = np.vstack((d[0][m], d[1][m], d[2][m], d[3][m], d[4][m], d[5][m],
d[6][m]))
# make the plots
# summary_plot(data, b, .1)
# make_plot(data, b)
trilegal(data, b)
| |
import calendar
from django.core.urlresolvers import reverse
SUPPORTED_EXTENSIONS = ('jpg', 'tif', 'png', 'gif')
UNSUPPORTED_EXTENSIONS = ('jp2', 'pdf', 'webp')
SUPPORTED_QUALITY = ('default', 'color', 'gray',)
class IIIFImageApiException(Exception):
""" Base class for IIIF Image API Exceptions """
pass
class ClientError(IIIFImageApiException):
""" Invalid IIIF Image API operation requested by client """
pass
class UnsupportedError(IIIFImageApiException):
""" Unsupported IIIF Image API operation requested by client """
pass
def parse_dimensions_string(dimension_string, permit_floats=False):
# Split dimensions string into sections
try:
x, y, width, height = dimension_string.split(',')
except ValueError, ex:
raise ClientError(
"Cannot split dimensions string %s: %s" % (dimension_string, ex))
# Parse dimensions string sections into numerical values
try:
if permit_floats:
x = float(x)
y = float(y)
width = float(width)
height = float(height)
else:
x = int(x)
y = int(y)
width = int(width)
height = int(height)
except ValueError, ex:
raise ClientError(
"Cannot parse numbers from dimensions string %s: %s"
% (dimension_string, ex))
# Sanity-checks for numerical values
# Negatives not permitted
if x < 0 or y < 0 or width < 0 or height < 0:
raise ClientError(
"Negative numbers illegal in dimensions string %s"
% dimension_string)
# Zero width or height not permitted
if width <= 0 or height <= 0:
raise ClientError(
"Zero numbers illegal in dimensions string %s"
% dimension_string)
return (x, y, width, height)
def parse_width_height_string(wh_string):
# Split dimensions string into sections
try:
width, height = wh_string.split(',')
except ValueError, ex:
raise ClientError(
"Cannot split width-height string %s: %s" % (wh_string, ex))
# Parse width-height string sections into numerical values
try:
width = int(width) if width else None
height = int(height) if height else None
except ValueError, ex:
raise ClientError(
"Cannot parse numbers from width-height string %s: %s"
% (wh_string, ex))
# Sanity-checks for numerical values
# At least one of width or height must be set
if width is None and height is None:
raise ClientError(
"There must be a value in width-height string %s"
% wh_string)
# Negatives not permitted
if (width is not None and width <= 0) or \
(height is not None and height <= 0):
raise ClientError(
"Zero or negative numbers illegal in width-height string %s"
% wh_string)
return (width, height)
def parse_region(region, image_width, image_height):
"""
Parse Region parameter to determine the rectangular portion of the full
image to be returned, informed by the actual image dimensions.
Returns (x, y, width, height):
- x,y are pixel offsets into the image from the upper left
- width, height are pixel dimensions for cropped image.
"""
if region == 'full':
# Return complete image, no cropping
x, y, width, height = 0, 0, image_width, image_height
elif region == 'square':
square_size = min(image_width, image_height)
# Generate x,y offsets to centre cropped image. This is not mandated
# by the spec but is recommended as a sensible default.
x = int((image_width - square_size) / 2)
y = int((image_height - square_size) / 2)
width = height = square_size
elif region.startswith('pct:'):
x_pct, y_pct, width_pct, height_pct = \
parse_dimensions_string(region[4:], permit_floats=True)
x, y, width, height = map(int, (
x_pct / 100 * image_width,
y_pct / 100 * image_height,
width_pct / 100 * image_width,
height_pct / 100 * image_height,
))
else:
x, y, width, height = parse_dimensions_string(region)
# If region extends beyond original's dimensions, crop extends only to
# image edge.
width = min(width, image_width - x)
height = min(height, image_height - y)
return x, y, width, height
def parse_size(size, image_width, image_height):
aspect_ratio = float(image_width) / image_height
if size in ('full', 'max'):
width, height = image_width, image_height
elif size.startswith('pct:'):
# Percentage applied to width and height
pct = float(size[4:])
width = pct / 100 * image_width
height = pct / 100 * image_height
elif size.startswith('!'):
# Best fit
width, height = parse_width_height_string(size[1:])
# spec assumes both width and height are given. If not, let's just use
# the image aspect ratio to fill in the blanks.
if width is None and height is None:
# no-op
width = image_width
height = image_height
elif width is None:
width = height * aspect_ratio
elif height is None:
height = width / aspect_ratio
# now we're sure we have both width and height
if width / aspect_ratio <= height:
# Requested width is best-fit
height = int(width / aspect_ratio)
else:
# Requested height is best-fit
width = int(height * aspect_ratio)
else:
width, height = parse_width_height_string(size)
if width is height is None:
width = image_width
height = image_height
# Handle "w,"
elif height is None:
height = width / aspect_ratio
# Handle ",h"
elif width is None:
width = height * aspect_ratio
return int(width), int(height)
def parse_rotation(rotation_str, image_width, image_height):
if rotation_str.startswith('!'):
is_mirrored = True
rotation_str = rotation_str[1:]
raise UnsupportedError(
"Image API rotation mirroring is not yet supported: %s"
% rotation)
else:
is_mirrored = False
try:
rotation = int(rotation_str) % 360
except ValueError, ex:
raise ClientError(
"Cannot parse number from rotation string %s: %s"
% (rotation_str, ex))
valid = (0,)
if rotation not in valid:
raise UnsupportedError(
"Image API rotation parameters other than %r degrees"
" are not yet supported: %s" % (valid, rotation))
return is_mirrored, rotation
def parse_quality(quality):
if quality not in SUPPORTED_QUALITY:
raise UnsupportedError(
"Image API quality parameters other than %r"
" are not yet supported: %s" % (SUPPORTED_QUALITY, quality))
return quality
def parse_format(output_format, image_format):
# TODO Do we need to limit output format based on image's existing format?
if output_format in UNSUPPORTED_EXTENSIONS:
raise UnsupportedError(
"Image API format parameters %r are not yet supported: %s"
% (UNSUPPORTED_EXTENSIONS, output_format))
if output_format not in SUPPORTED_EXTENSIONS:
raise ClientError(
"Invalid Image API format parameter not in %r: %s"
% (SUPPORTED_EXTENSIONS + UNSUPPORTED_EXTENSIONS, output_format))
return output_format
def make_canonical_path(
image_identifier, image_width, image_height,
region, size, rotation, quality, format_str
):
"""
Return the canonical URL path for an image for the given region/size/
rotation/quality/format API tranformation settings.
See http://iiif.io/api/image/2.1/#canonical-uri-syntax
"""
original_aspect_ratio = float(image_width) / image_height
if (
region == 'full' or
# Use 'full' if 'square' is requested for an already square image
(region == 'square' and image_width == image_height) or
# Use 'full' if region exactly matches image dimensions
(region == (0, 0, image_width, image_height))
):
canonical_region = 'full'
else:
# Use explicit x,y,width,height region settings
canonical_region = ','.join(map(str, region))
if size in ['full', 'max']:
canonical_size = 'full'
elif (region[2:] == size and (image_width, image_height) == size):
# Use 'full' if result image dimensions are unchanged from original
# and are also unchanged from the region operation's output
canonical_size = 'full'
elif float(size[0]) / size[1] == original_aspect_ratio:
# w, syntax for images scaled to maintain the aspect ratio
canonical_size = '%d,' % size[0]
else:
# Full with,height size if aspect ratio is changed
canonical_size = ','.join(map(str, size))
canonical_rotation = ''
if rotation[0]:
# Image is mirrored
canonical_rotation += '!'
canonical_rotation += '%d' % rotation[1]
canonical_quality = quality
canonical_format = format_str
return reverse(
'iiif_image_api',
args=[image_identifier, canonical_region, canonical_size,
canonical_rotation, canonical_quality, canonical_format]
)
def build_iiif_file_storage_path(url_path, ik_image, iiif_storage):
"""
Return the file storage path for a given IIIF Image API URL path.
NOTE: The returned file storage path includes the given ``Image``
instance's ID to ensure the path is unique and identifiable, and its
modified timestamp to act as a primitive cache-busting mechanism for
when the image is changed but there are pre-existing image conversions.
TODO: Ideally we should use a hash or timestamp for Image's actual
image data changes, not the whole instance which could change but
have same image.
"""
storage_path = url_path[1:] # Stip leading slash
# Strip redundant 'iiif-' prefix if present (re-added below)
if storage_path.startswith('iiif/'):
storage_path = storage_path[5:]
# Add Image's modified timestamp to storage path as a primitive
# cache-busting mechanism.
ik_image_ts = str(calendar.timegm(ik_image.date_modified.timetuple()))
splits = storage_path.split('/')
storage_path = '/'.join(
[splits[0]] + # Image ID
[ik_image_ts] + # Image instance modified timestamp
splits[1:] # Remainder of storage path
)
# Replace '/' & ',' with '-' to keep separators of some kind in
# storage file name, otherwise the characters get purged and
# produce storage names with potentially ambiguous and clashing
# values e.g. /3/100,100,200,200/... => iiif3100100200200
storage_path = storage_path.replace('/', '-').replace(',', '-')
# Convert URL path format to a valid file name for a given storage engine
storage_path = iiif_storage.get_valid_name(storage_path)
# Add path prefix to storage path to avoid dumping image files
# into the location of a storage location that might be used for many
# purposes.
if iiif_storage.location != 'iiif':
storage_path = 'iiif/' + storage_path
return storage_path
def is_remote_storage(iiif_storage, storage_path):
"""
Return ``True`` if given storage class uses remote (not local) storage.
See https://docs.djangoproject.com/en/1.10/ref/files/storage/#django.core.files.storage.Storage.path
"""
# TODO Surely Django's storage mechanism has a better way to test this?
try:
iiif_storage.path(storage_path)
return False
except NotImplementedError:
return True
| |
from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.http import urlsafe_base64_decode
from django.utils.safestring import mark_safe
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, UpdateView, DetailView, DeleteView
from ratelimit.exceptions import Ratelimited
from ratelimit.mixins import RatelimitMixin
from ratelimit.utils import is_ratelimited
from .forms import EmailUserCreationForm, PasswordResetForm
class RateLimitedFormView(FormView):
ratelimit_key = 'ip'
ratelimit_block = True
ratelimit_rate = '1/h'
ratelimit_group = None
def dispatch(self, *args, **kwargs):
ratelimited = is_ratelimited(request=self.request,
group=self.ratelimit_group,
key=self.ratelimit_key,
rate=self.ratelimit_rate,
increment=False)
if ratelimited and self.ratelimit_block:
raise Ratelimited()
return super(RateLimitedFormView, self).dispatch(*args, **kwargs)
class RegistrationView(RateLimitedFormView):
template_name = 'accounts/register.html'
form_class = EmailUserCreationForm
ratelimit_group = 'registration'
def form_valid(self, form):
user = form.save()
messages.success(self.request,
mark_safe(
"Successfully registered, you are now logged in! <a href='%s'>View your profile</a>" %
reverse('user-detail', kwargs={'pk': user.id})))
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
login(self.request, user)
is_ratelimited(request=self.request, group=self.ratelimit_group, key=self.ratelimit_key,
rate=self.ratelimit_rate, increment=True)
return super(RegistrationView, self).form_valid(form)
def get_success_url(self):
return reverse('new_count')
class PasswordChangeView(LoginRequiredMixin, FormView):
template_name = 'accounts/password_change.html'
form_class = PasswordChangeForm
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, "Password changed successfully")
return HttpResponseRedirect(reverse('new_count'))
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
context_object_name = 'user_detail'
template_name = 'accounts/user_detail.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDetailView, self).get_object()
else:
raise PermissionDenied
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['keyboards'] = self.object.keyboard_set.all().order_by('-is_primary')
return context
class UserDeleteView(LoginRequiredMixin, DeleteView):
model = User
context_object_name = 'user_object'
template_name = 'accounts/user_check_delete.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDeleteView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User account deleted")
return reverse('new_count')
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ['first_name', 'last_name', 'email', ]
template_name = 'accounts/user_update.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserUpdateView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User details updated")
return reverse('user-detail', kwargs={'pk': self.kwargs['pk']})
class PasswordResetView(RatelimitMixin, FormView):
template_name = 'accounts/reset_form.html'
form_class = PasswordResetForm
ratelimit_rate = '5/h'
ratelimit_group = 'pwdreset'
ratelimit_key = 'ip'
ratelimit_block = True
def form_valid(self, form):
form.save(request=self.request)
messages.success(self.request, 'Reset email sent')
return super(PasswordResetView, self).form_valid(form)
def form_invalid(self, form):
"""Don't expose form errors to the user"""
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('new_count')
class PasswordResetConfirmView(FormView):
template_name = 'accounts/reset_confirm.html'
form_class = SetPasswordForm
@method_decorator(sensitive_post_parameters())
def dispatch(self, request, *args, **kwargs):
return super(PasswordResetConfirmView, self).dispatch(request, *args, **kwargs)
@staticmethod
def valid_user(uidb64):
try:
uid = urlsafe_base64_decode(uidb64)
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
return user
@staticmethod
def valid_token(user, token):
if user is not None:
return default_token_generator.check_token(user, token)
else:
return False
def _valid_inputs(self, uidb64, token):
self.user_object = self.valid_user(uidb64)
return self.valid_token(self.user_object, token)
def get(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
form = self.get_form(self.get_form_class())
return self.render_to_response(self.get_context_data(form=form, validlink=True))
else:
return self.render_to_response(self.get_context_data(validlink=False))
def post(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
return super(PasswordResetConfirmView, self).post(request, *args, **kwargs)
else:
return self.render_to_response(self.get_context_data(validlink=False))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user_object
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, 'Password reset successfully')
return HttpResponseRedirect(reverse('new_count'))
def rate_limited(request, exception):
messages.error(request, 'You have been rate limited')
return HttpResponseRedirect(reverse('new_count'))
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import constants as q_const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
from neutron.services.qos.agents import qos_rpc
from neutron.db import qos_rpc_base as qos_db_rpc
from neutron.db import qos_db
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
class RpcCallbacks(n_rpc.RpcCallback,
type_tunnel.TunnelRpcCallbackMixin,
qos_db_rpc.QoSServerRpcCallbackMixin):
RPC_API_VERSION = '1.3'
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
# 1.3 get_device_details rpc signature upgrade to obtain 'host' and
# return value to include fixed_ips and device_owner for
# the device port
def __init__(self, notifier, type_manager):
self.setup_tunnel_callback_mixin(notifier, type_manager)
super(RpcCallbacks, self).__init__()
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s details requested by agent "
"%(agent_id)s with host %(host)s",
{'device': device, 'agent_id': agent_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_context = plugin.get_bound_port_context(rpc_context,
port_id,
host)
if not port_context:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segment = port_context.bound_segment
port = port_context.current
if not segment:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port['network_id'],
'vif_type': port[portbindings.VIF_TYPE]})
return {'device': device}
if 'trunkport:type' in port:
trunk_type = port['trunkport:type']
else:
trunk_type = ""
remote_nets = db.get_vlan_mappings(rpc_context, port)
new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
plugin.update_port_status(rpc_context,
port_id,
new_status,
host)
#query qos policies, add to entry for qos-service
session = db_api.get_session()
policies = qos_db.get_policies_by_port(session, port_id)
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port_id,
'mac_address': port['mac_address'],
'admin_state_up': port['admin_state_up'],
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK],
'trunk_networks': remote_nets,
'trunk_type': trunk_type,
'fixed_ips': port['fixed_ips'],
'qos_policies': policies,
'device_owner': port['device_owner'],
'profile': port[portbindings.PROFILE]}
LOG.debug(_("Returning: %s"), entry)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_port_detail(self, rpc_context, **kwargs):
port_id = kwargs.get('port_id')
agent_id = kwargs.get('agent_id')
LOG.debug("Port %(port)s details requested by agent "
"%(agent_id)s",
{'port': port_id, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_info = plugin.get_port(rpc_context, port_id)
return port_info
def get_ports(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
host = kwargs.get('host')
mac_address = kwargs.get('mac_address')
LOG.debug("Ports requested by agent "
"%(agent_id)s",
{'agent_id': agent_id})
filters = {}
if host != None:
filters['binding:host_id'] = [host]
if mac_address != None:
filters['mac_address'] = [mac_address]
plugin = manager.NeutronManager.get_plugin()
ports_info = plugin.get_ports(rpc_context, filters=filters)
return ports_info
def get_networks(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
network_id = kwargs.get('network_id')
LOG.debug("Networks %(network_id)s requested by agent "
"%(agent_id)s",
{'network_id': network_id, 'agent_id': agent_id})
filters = {}
filters['id'] = [network_id]
plugin = manager.NeutronManager.get_plugin()
networks_info = plugin.get_networks(rpc_context, filters=filters)
return networks_info
def port_bound_to_host(self, rpc_context, **kwargs):
agent_id = kwargs.get('agent_id')
host = kwargs.get('host')
port_id = kwargs.get('port_id')
LOG.debug("Port %(port_id)s bound to host %(host)s requested by agent "
"%(agent_id)s",
{'port_id':port_id, 'host': host, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_bound = plugin.port_bound_to_host(rpc_context, port_id, host)
return port_bound
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug(_("Device %(device)s no longer exists at agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_exists = True
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return {'device': device,
'exists': port_exists}
try:
port_exists = bool(plugin.update_port_status(
rpc_context, port_id, q_const.PORT_STATUS_DOWN, host))
except exc.StaleDataError:
port_exists = False
LOG.debug("delete_port and update_device_down are being executed "
"concurrently. Ignoring StaleDataError.")
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug(_("Device %(device)s up at agent %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
port_id = plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_ACTIVE,
host)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (l3plugin and
utils.is_extension_supported(l3plugin,
q_const.L3_DISTRIBUTED_EXT_ALIAS)):
try:
l3plugin.dvr_vmarp_table_update(rpc_context, port_id, "add")
except exceptions.PortNotFound:
LOG.debug('Port %s not found during ARP update', port_id)
def get_subnet_dhcp_by_network_id(self, rpc_context, **kwargs):
network_id = kwargs.get('network_id')
plugin = manager.NeutronManager.get_plugin()
subnets = plugin.get_subnets_by_network(rpc_context, network_id)
filter = {'device_owner': ['network:dhcp'],
'network_id': [network_id]}
dhcp_ports = plugin.get_ports(rpc_context, filter)
return {'subnets': subnets,
'dhcp_ports': dhcp_ports}
def get_user_port_id_from_vm_port(self, vm_port):
name = vm_port['name']
n_list = name.split('@')
if(len(n_list) > 1 and u'vm_port' == n_list[0] and len(n_list[1]) > 1):
return n_list[1]
else:
return u''
def get_cidr_and_gwip_by_subnet_id(self, context, subnet_id):
plugin = manager.NeutronManager.get_plugin()
subnet = plugin.get_subnet(context, subnet_id)
if not subnet:
LOG.debug(_('HYBRID: Subnet not found, subnet_id: %s.'
), subnet_id)
return ''
return [subnet['cidr'], subnet['gateway_ip']]
def get_ip_addresses_from_fixed_ips(self, context, fixed_ips):
if(len(fixed_ips) <= 0):
return None
ip_list = []
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
subnet_id = fixed_ip.get('subnet_id')
ip_cidr_gwip = self.get_cidr_and_gwip_by_subnet_id(context,
subnet_id)
ip_list.append([ip_address, ip_cidr_gwip[0], ip_cidr_gwip[1]])
return ip_list
# if(len(fixed_ips) > 0):
# return fixed_ips[0].get('ip_address', '')
def get_user_address(self, rpc_context, **kwargs):
mac = kwargs.get('mac_address')
ip = kwargs.get('ip_address')
host = kwargs.get('host')
LOG.debug("HYBRID: Agent requests user address of VM on host: %s, "
"ip_address: %s, mac_address: %s", host, ip, mac)
plugin = manager.NeutronManager.get_plugin()
vm_port_id = plugin._device_to_port_id(mac)
# vm_port = plugin.get_port_from_device([vm_port_id])
vm_port = plugin.get_port(rpc_context, vm_port_id)
if not vm_port:
LOG.debug(_('HYBRID: VM port not found, host: %s, ip_address: %s,'
'mac_address: %s.'), host, ip, mac)
return {'user_port': {}}
user_port_id = self.get_user_port_id_from_vm_port(vm_port)
user_port = plugin.get_port(rpc_context, user_port_id)
# user_port = plugin.get_port_from_device(user_port_id)
if not user_port:
LOG.debug(_('HYBRID: User port not found, host: %s, ip_address: '
'%s, mac_address: %s.'), host, ip, mac)
return {'user_port': {}}
ip_addresses = self.get_ip_addresses_from_fixed_ips(
rpc_context, user_port['fixed_ips'])
user_port['binding:host_id'] = host
port = {'port': user_port}
plugin.update_port(rpc_context, user_port_id, port)
ret_msg = {'user_port': {
'mac_address': user_port['mac_address'],
'ip_addresses': ip_addresses,
'port_id': user_port['id'],
'vm_port_id': vm_port_id}}
LOG.debug(_('HYBRID: Get user port, return msg %s.'), ret_msg)
return ret_msg
class AgentNotifierApi(n_rpc.RpcProxy,
dvr_rpc.DVRAgentRpcApiMixin,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin,
qos_rpc.QoSAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type=None, segmentation_id=None,
physical_network=None, host=None):
if not host:
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
else:
self.cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic='%s.%s' % (self.topic_port_update, host))
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cell wrapper v2 implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.keras.layers.legacy_rnn import rnn_cell_impl
from tensorflow.python.keras.legacy_tf_layers import base as legacy_base_layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
def testResidualWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = wrapper_type(
base_cell, residual_with_slice_fn)(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate(
[g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.DeviceWrapper
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.GRUCell(3)
wrapped_cell = wrapper_type(cell, "/cpu:0")
(name, dep), = wrapped_cell._checkpoint_dependencies
wrapped_cell.get_config() # Should not throw an error
self.assertIs(dep, cell)
self.assertEqual("cell", name)
outputs, _ = wrapped_cell(x, m)
self.assertIn("cpu:0", outputs.device.lower())
@parameterized.parameters(
[[rnn_cell_impl.DropoutWrapper, rnn_cell_wrapper_v2.DropoutWrapper],
[rnn_cell_impl.ResidualWrapper, rnn_cell_wrapper_v2.ResidualWrapper]])
def testWrapperKerasStyle(self, wrapper, wrapper_v2):
"""Tests if wrapper cell is instantiated in keras style scope."""
wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1))
self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None))
wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1))
self.assertFalse(wrapped_cell._keras_style)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell")
rnn_cell = wrapper(base_cell)
rnn_layer = layers.RNN(rnn_cell)
inputs = ops.convert_to_tensor_v2_with_dispatch([[[1]]],
dtype=dtypes.float32)
rnn_layer(inputs)
wrapper_name = generic_utils.to_snake_case(wrapper.__name__)
expected_weights = ["rnn/" + wrapper_name + "/" + var for var in
("kernel:0", "recurrent_kernel:0", "bias:0")]
self.assertLen(rnn_cell.weights, 3)
self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.trainable_variables],
expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables],
[])
self.assertCountEqual([v.name for v in rnn_cell.cell.weights],
expected_weights)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
with legacy_base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)
self.assertTrue(all("_wrapper" in v.name for v in weights))
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperV2Build(self, wrapper):
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper(cell)
wrapper.build((1,))
self.assertTrue(cell.built)
def testDeviceWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DeviceWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell, "/cpu:0")
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
def testResidualWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.ResidualWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4)
def residual_fn(inputs, outputs):
return inputs * 3 + outputs
wrapper = wrapper_cls(cell, residual_fn=residual_fn)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5)
def testDropoutWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper
cell = layers.GRUCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, dropout_state_filter_visitor=lambda s: True)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertTrue(reconstructed_wrapper._dropout_state_filter(None))
def dropout_state_filter_visitor(unused_state):
return False
wrapper = wrapper_cls(
cell, dropout_state_filter_visitor=dropout_state_filter_visitor)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertFalse(reconstructed_wrapper._dropout_state_filter(None))
def testDropoutWrapperWithKerasLSTMCell(self):
wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper
cell = layers.LSTMCell(10)
with self.assertRaisesRegex(ValueError, "does not work with "):
wrapper_cls(cell)
cell = layers.LSTMCellV2(10)
with self.assertRaisesRegex(ValueError, "does not work with "):
wrapper_cls(cell)
if __name__ == "__main__":
test.main()
| |
#Copyright [2013] Hewlett-Packard Development Company, L.P.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Model classes that form the core of snapshots functionality."""
from trove.common import cfg
from trove.common import exception
from trove.db.models import DatabaseModelBase
from trove.openstack.common import log as logging
from swiftclient.client import ClientException
from trove.taskmanager import api
from trove.common.remote import create_swift_client
from trove.common import utils
from trove.quota.quota import run_with_quotas
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SWIFT_CONTAINER = CONF.backup_swift_container
class BackupState(object):
NEW = "NEW"
BUILDING = "BUILDING"
SAVING = "SAVING"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
DELETE_FAILED = "DELETE_FAILED"
RUNNING_STATES = [NEW, BUILDING, SAVING]
END_STATES = [COMPLETED, FAILED, DELETE_FAILED]
class Backup(object):
@classmethod
def create(cls, context, instance, name, description=None):
"""
create db record for Backup
:param cls:
:param context: tenant_id included
:param instance:
:param name:
:param description:
:return:
"""
def _create_resources():
# parse the ID from the Ref
instance_id = utils.get_id_from_href(instance)
# verify that the instance exist and can perform actions
from trove.instance.models import Instance
instance_model = Instance.load(context, instance_id)
instance_model.validate_can_perform_action()
cls.verify_swift_auth_token(context)
try:
db_info = DBBackup.create(name=name,
description=description,
tenant_id=context.tenant,
state=BackupState.NEW,
instance_id=instance_id,
deleted=False)
except exception.InvalidModelError as ex:
LOG.exception("Unable to create Backup record:")
raise exception.BackupCreationError(str(ex))
api.API(context).create_backup(db_info.id, instance_id)
return db_info
return run_with_quotas(context.tenant,
{'backups': 1},
_create_resources)
@classmethod
def running(cls, instance_id, exclude=None):
"""
Returns the first running backup for instance_id
:param instance_id: Id of the instance
:param exclude: Backup ID to exclude from the query (any other running)
"""
query = DBBackup.query()
query = query.filter(DBBackup.instance_id == instance_id,
DBBackup.state.in_(BackupState.RUNNING_STATES))
# filter out deleted backups, PEP8 does not like field == False!
query = query.filter_by(deleted=False)
if exclude:
query = query.filter(DBBackup.id != exclude)
return query.first()
@classmethod
def get_by_id(cls, context, backup_id, deleted=False):
"""
get the backup for that id
:param cls:
:param backup_id: Id of the backup to return
:param deleted: Return deleted backups
:return:
"""
try:
db_info = DBBackup.find_by(context=context,
id=backup_id,
deleted=deleted)
return db_info
except exception.NotFound:
raise exception.NotFound(uuid=backup_id)
@classmethod
def list(cls, context):
"""
list all live Backups belong to given tenant
:param cls:
:param context: tenant_id included
:return:
"""
db_info = DBBackup.find_all(tenant_id=context.tenant,
deleted=False)
return db_info
@classmethod
def list_for_instance(cls, instance_id):
"""
list all live Backups associated with given instance
:param cls:
:param instance_id:
:return:
"""
db_info = DBBackup.find_all(instance_id=instance_id,
deleted=False)
return db_info
@classmethod
def fail_for_instance(cls, instance_id):
query = DBBackup.query()
query = query.filter(DBBackup.instance_id == instance_id,
DBBackup.state.in_(BackupState.RUNNING_STATES))
query = query.filter_by(deleted=False)
for backup in query.all():
backup.state = BackupState.FAILED
backup.save()
@classmethod
def delete(cls, context, backup_id):
"""
update Backup table on deleted flag for given Backup
:param cls:
:param context: context containing the tenant id and token
:param backup_id: Backup uuid
:return:
"""
def _delete_resources():
backup = cls.get_by_id(context, backup_id)
if backup.is_running:
msg = ("Backup %s cannot be delete because it is running." %
backup_id)
raise exception.UnprocessableEntity(msg)
cls.verify_swift_auth_token(context)
api.API(context).delete_backup(backup_id)
return run_with_quotas(context.tenant,
{'backups': -1},
_delete_resources)
@classmethod
def verify_swift_auth_token(cls, context):
try:
client = create_swift_client(context)
client.get_account()
except ClientException:
raise exception.SwiftAuthError(tenant_id=context.tenant)
@classmethod
def check_object_exist(cls, context, location):
try:
parts = location.split('/')
obj = parts[-1]
container = parts[-2]
client = create_swift_client(context)
client.head_object(container, obj)
return True
except ClientException as e:
if e.http_status == 404:
return False
else:
raise exception.SwiftAuthError(tenant_id=context.tenant)
def persisted_models():
return {'backups': DBBackup}
class DBBackup(DatabaseModelBase):
"""A table for Backup records"""
_data_fields = ['id', 'name', 'description', 'location', 'backup_type',
'size', 'tenant_id', 'state', 'instance_id',
'checksum', 'backup_timestamp', 'deleted', 'created',
'updated', 'deleted_at']
preserve_on_delete = True
@property
def is_running(self):
return self.state in BackupState.RUNNING_STATES
@property
def is_done(self):
return self.state in BackupState.END_STATES
@property
def filename(self):
if self.location:
last_slash = self.location.rfind("/")
if last_slash < 0:
raise ValueError("Bad location for backup object.")
return self.location[last_slash + 1:]
else:
return None
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import template_format
from heat.engine import stack_resource
from heat.engine.resources import nova_utils
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
lb_template = r'''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Built in HAProxy server",
"Parameters" : {
"KeyName" : {
"Type" : "String"
}
},
"Resources": {
"latency_watcher": {
"Type": "AWS::CloudWatch::Alarm",
"Properties": {
"MetricName": "Latency",
"Namespace": "AWS/ELB",
"Statistic": "Average",
"Period": "60",
"EvaluationPeriods": "1",
"Threshold": "2",
"AlarmActions": [],
"ComparisonOperator": "GreaterThanThreshold"
}
},
"CfnLBUser" : {
"Type" : "AWS::IAM::User"
},
"CfnLBAccessKey" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnLBUser"}
}
},
"LB_instance": {
"Type": "AWS::EC2::Instance",
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"cronie" : [],
"haproxy" : [],
"socat" : [],
"python-psutil" : []
}
},
"services": {
"systemd": {
"crond" : { "enabled" : "true", "ensureRunning" : "true" }
}
},
"files": {
"/etc/cfn/cfn-credentials" : {
"content" : { "Fn::Join" : ["", [
"AWSAccessKeyId=", { "Ref" : "CfnLBAccessKey" }, "\n",
"AWSSecretKey=", {"Fn::GetAtt": ["CfnLBAccessKey",
"SecretAccessKey"]}, "\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/cfn/cfn-hup.conf" : {
"content" : { "Fn::Join" : ["", [
"[main]\n",
"stack=", { "Ref" : "AWS::StackId" }, "\n",
"credential-file=/etc/cfn/cfn-credentials\n",
"region=", { "Ref" : "AWS::Region" }, "\n",
"interval=60\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/cfn/hooks.conf" : {
"content": { "Fn::Join" : ["", [
"[cfn-init]\n",
"triggers=post.update\n",
"path=Resources.LB_instance.Metadata\n",
"action=/opt/aws/bin/cfn-init -s ",
{ "Ref": "AWS::StackId" },
" -r LB_instance ",
" --region ", { "Ref": "AWS::Region" }, "\n",
"runas=root\n",
"\n",
"[reload]\n",
"triggers=post.update\n",
"path=Resources.LB_instance.Metadata\n",
"action=systemctl reload-or-restart haproxy.service\n",
"runas=root\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/haproxy/haproxy.cfg": {
"content": "",
"mode": "000644",
"owner": "root",
"group": "root"
},
"/tmp/cfn-hup-crontab.txt" : {
"content" : { "Fn::Join" : ["", [
"MAIL=\"\"\n",
"\n",
"* * * * * /opt/aws/bin/cfn-hup -f\n",
"* * * * * /opt/aws/bin/cfn-push-stats ",
" --watch ", { "Ref" : "latency_watcher" }, " --haproxy\n"
]]},
"mode" : "000600",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId": "F17-x86_64-cfntools",
"InstanceType": "m1.small",
"KeyName": { "Ref": "KeyName" },
"UserData": { "Fn::Base64": { "Fn::Join": ["", [
"#!/bin/bash -v\n",
"# Helper function\n",
"function error_exit\n",
"{\n",
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{ "Ref" : "WaitHandle" }, "'\n",
" exit 1\n",
"}\n",
"/opt/aws/bin/cfn-init -s ",
{ "Ref": "AWS::StackId" },
" -r LB_instance ",
" --region ", { "Ref": "AWS::Region" }, "\n",
"# install cfn-hup crontab\n",
"crontab /tmp/cfn-hup-crontab.txt\n",
"# LB setup completed, signal success\n",
"/opt/aws/bin/cfn-signal -e 0 -r \"LB server setup complete\" '",
{ "Ref" : "WaitHandle" }, "'\n"
]]}}
}
},
"WaitHandle" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"WaitCondition" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn" : "LB_instance",
"Properties" : {
"Handle" : {"Ref" : "WaitHandle"},
"Timeout" : "600"
}
}
},
"Outputs": {
"PublicIp": {
"Value": { "Fn::GetAtt": [ "LB_instance", "PublicIp" ] },
"Description": "instance IP"
}
}
}
'''
#
# TODO(asalkeld) the above inline template _could_ be placed in an external
# file at the moment this is because we will probably need to implement a
# LoadBalancer based on keepalived as well (for for ssl support).
#
class LoadBalancer(stack_resource.StackResource):
listeners_schema = {
'InstancePort': {
'Type': 'Number',
'Required': True,
'Description': _('TCP port on which the instance server is'
' listening.')},
'LoadBalancerPort': {
'Type': 'Number',
'Required': True,
'Description': _('The external load balancer port number.')},
'Protocol': {
'Type': 'String',
'Required': True,
'AllowedValues': ['TCP', 'HTTP'],
'Description': _('The load balancer transport protocol to use.')},
'SSLCertificateId': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'PolicyNames': {
'Type': 'List',
'Implemented': False,
'Description': _('Not Implemented.')}
}
healthcheck_schema = {
'HealthyThreshold': {
'Type': 'Number',
'Required': True,
'Description': _('The number of consecutive health probe successes'
' required before moving the instance to the'
' healthy state.')},
'Interval': {
'Type': 'Number',
'Required': True,
'Description': _('The approximate interval, in seconds, between'
' health checks of an individual instance.')},
'Target': {
'Type': 'String',
'Required': True,
'Description': _('The port being checked.')},
'Timeout': {
'Type': 'Number',
'Required': True,
'Description': _('Health probe timeout, in seconds.')},
'UnhealthyThreshold': {
'Type': 'Number',
'Required': True,
'Description': _('The number of consecutive health probe failures'
' required before moving the instance to the'
' unhealthy state')},
}
properties_schema = {
'AvailabilityZones': {
'Type': 'List',
'Required': True,
'Description': _('The Availability Zones in which to create the'
' load balancer.')},
'HealthCheck': {
'Type': 'Map',
'Schema': healthcheck_schema,
'Description': _('An application health check for the'
' instances.')},
'Instances': {
'Type': 'List',
'UpdateAllowed': True,
'Description': _('The list of instance IDs load balanced.')},
'Listeners': {
'Type': 'List', 'Required': True,
'Schema': {'Type': 'Map', 'Schema': listeners_schema},
'Description': _('One or more listeners for this load balancer.')},
'AppCookieStickinessPolicy': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'LBCookieStickinessPolicy': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'SecurityGroups': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'Subnets': {
'Type': 'List',
'Implemented': False,
'Description': _('Not Implemented.')}
}
attributes_schema = {
"CanonicalHostedZoneName": ("The name of the hosted zone that is "
"associated with the LoadBalancer."),
"CanonicalHostedZoneNameID": ("The ID of the hosted zone name that is "
"associated with the LoadBalancer."),
"DNSName": "The DNS name for the LoadBalancer.",
"SourceSecurityGroup.GroupName": ("The security group that you can use"
" as part of your inbound rules for "
"your LoadBalancer's back-end "
"instances."),
"SourceSecurityGroup.OwnerAlias": "Owner of the source security group."
}
update_allowed_keys = ('Properties',)
def _haproxy_config(self, templ, instances):
# initial simplifications:
# - only one Listener
# - only http (no tcp or ssl)
#
# option httpchk HEAD /check.txt HTTP/1.0
gl = '''
global
daemon
maxconn 256
stats socket /tmp/.haproxy-stats
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
'''
listener = self.properties['Listeners'][0]
lb_port = listener['LoadBalancerPort']
inst_port = listener['InstancePort']
spaces = ' '
frontend = '''
frontend http
bind *:%s
''' % (lb_port)
health_chk = self.properties['HealthCheck']
if health_chk:
check = 'check inter %ss fall %s rise %s' % (
health_chk['Interval'],
health_chk['UnhealthyThreshold'],
health_chk['HealthyThreshold'])
timeout_check = 'timeout check %ds' % int(health_chk['Timeout'])
else:
check = ''
timeout_check = ''
backend = '''
default_backend servers
backend servers
balance roundrobin
option http-server-close
option forwardfor
option httpchk
%s
''' % timeout_check
servers = []
n = 1
client = self.nova()
for i in instances:
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
logger.debug('haproxy server:%s' % ip)
servers.append('%sserver server%d %s:%s %s' % (spaces, n,
ip, inst_port,
check))
n = n + 1
return '%s%s%s%s\n' % (gl, frontend, backend, '\n'.join(servers))
def handle_create(self):
templ = template_format.parse(lb_template)
if self.properties['Instances']:
md = templ['Resources']['LB_instance']['Metadata']
files = md['AWS::CloudFormation::Init']['config']['files']
cfg = self._haproxy_config(templ, self.properties['Instances'])
files['/etc/haproxy/haproxy.cfg']['content'] = cfg
# If the owning stack defines KeyName, we use that key for the nested
# template, otherwise use no key
try:
param = {'KeyName': self.stack.parameters['KeyName']}
except KeyError:
del templ['Resources']['LB_instance']['Properties']['KeyName']
del templ['Parameters']['KeyName']
param = {}
return self.create_with_template(templ, param)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
'''
re-generate the Metadata
save it to the db.
rely on the cfn-hup to reconfigure HAProxy
'''
if 'Instances' in prop_diff:
templ = template_format.parse(lb_template)
cfg = self._haproxy_config(templ, prop_diff['Instances'])
md = self.nested()['LB_instance'].metadata
files = md['AWS::CloudFormation::Init']['config']['files']
files['/etc/haproxy/haproxy.cfg']['content'] = cfg
self.nested()['LB_instance'].metadata = md
def handle_delete(self):
return self.delete_nested()
def validate(self):
'''
Validate any of the provided params
'''
res = super(LoadBalancer, self).validate()
if res:
return res
health_chk = self.properties['HealthCheck']
if health_chk:
if float(health_chk['Interval']) < float(health_chk['Timeout']):
return {'Error':
'Interval must be larger than Timeout'}
def FnGetRefId(self):
return unicode(self.name)
def _resolve_attribute(self, name):
'''
We don't really support any of these yet.
'''
if name == 'DNSName':
return self.get_output('PublicIp')
elif name in self.attributes_schema:
# Not sure if we should return anything for the other attribs
# since they aren't really supported in any meaningful way
return ''
def resource_mapping():
return {
'AWS::ElasticLoadBalancing::LoadBalancer': LoadBalancer,
}
| |
"""
Support for Apple TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.apple_tv/
"""
import asyncio
import logging
from homeassistant.components.apple_tv import (
ATTR_ATV, ATTR_POWER, DATA_APPLE_TV, DATA_ENTITIES)
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF,
STATE_PAUSED, STATE_PLAYING, STATE_STANDBY)
from homeassistant.core import callback
import homeassistant.util.dt as dt_util
DEPENDENCIES = ['apple_tv']
_LOGGER = logging.getLogger(__name__)
SUPPORT_APPLE_TV = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | \
SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
@asyncio.coroutine
def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Apple TV platform."""
if not discovery_info:
return
# Manage entity cache for service handler
if DATA_ENTITIES not in hass.data:
hass.data[DATA_ENTITIES] = []
name = discovery_info[CONF_NAME]
host = discovery_info[CONF_HOST]
atv = hass.data[DATA_APPLE_TV][host][ATTR_ATV]
power = hass.data[DATA_APPLE_TV][host][ATTR_POWER]
entity = AppleTvDevice(atv, name, power)
@callback
def on_hass_stop(event):
"""Stop push updates when hass stops."""
atv.push_updater.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
if entity not in hass.data[DATA_ENTITIES]:
hass.data[DATA_ENTITIES].append(entity)
async_add_entities([entity])
class AppleTvDevice(MediaPlayerDevice):
"""Representation of an Apple TV device."""
def __init__(self, atv, name, power):
"""Initialize the Apple TV device."""
self.atv = atv
self._name = name
self._playing = None
self._power = power
self._power.listeners.append(self)
self.atv.push_updater.listener = self
@asyncio.coroutine
def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._power.init()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self.atv.metadata.device_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the device."""
if not self._power.turned_on:
return STATE_OFF
if self._playing:
from pyatv import const
state = self._playing.play_state
if state in (const.PLAY_STATE_IDLE, const.PLAY_STATE_NO_MEDIA,
const.PLAY_STATE_LOADING):
return STATE_IDLE
if state == const.PLAY_STATE_PLAYING:
return STATE_PLAYING
if state in (const.PLAY_STATE_PAUSED,
const.PLAY_STATE_FAST_FORWARD,
const.PLAY_STATE_FAST_BACKWARD):
# Catch fast forward/backward here so "play" is default action
return STATE_PAUSED
return STATE_STANDBY # Bad or unknown state?
@callback
def playstatus_update(self, updater, playing):
"""Print what is currently playing when it changes."""
self._playing = playing
self.async_schedule_update_ha_state()
@callback
def playstatus_error(self, updater, exception):
"""Inform about an error and restart push updates."""
_LOGGER.warning('A %s error occurred: %s',
exception.__class__, exception)
# This will wait 10 seconds before restarting push updates. If the
# connection continues to fail, it will flood the log (every 10
# seconds) until it succeeds. A better approach should probably be
# implemented here later.
updater.start(initial_delay=10)
self._playing = None
self.async_schedule_update_ha_state()
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing:
from pyatv import const
media_type = self._playing.media_type
if media_type == const.MEDIA_TYPE_VIDEO:
return MEDIA_TYPE_VIDEO
if media_type == const.MEDIA_TYPE_MUSIC:
return MEDIA_TYPE_MUSIC
if media_type == const.MEDIA_TYPE_TV:
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
state = self.state
if state in (STATE_PLAYING, STATE_PAUSED):
return dt_util.utcnow()
@asyncio.coroutine
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
yield from self.atv.airplay.play_url(media_id)
@property
def media_image_hash(self):
"""Hash value for media image."""
state = self.state
if self._playing and state not in [STATE_OFF, STATE_IDLE]:
return self._playing.hash
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
state = self.state
if self._playing and state not in [STATE_OFF, STATE_IDLE]:
return (yield from self.atv.metadata.artwork()), 'image/png'
return None, None
@property
def media_title(self):
"""Title of current playing media."""
if self._playing:
if self.state == STATE_IDLE:
return 'Nothing playing'
title = self._playing.title
return title if title else 'No title'
return 'Establishing a connection to {0}...'.format(self._name)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_APPLE_TV
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._power.set_power_on(True)
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._playing = None
self._power.set_power_on(False)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
state = self.state
if state == STATE_PAUSED:
return self.atv.remote_control.play()
if state == STATE_PLAYING:
return self.atv.remote_control.pause()
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.play()
def async_media_stop(self):
"""Stop the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.stop()
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.pause()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.next()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.previous()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing:
return self.atv.remote_control.set_position(position)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import shutil
import threading
import unittest
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer import writer_cache
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.test_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.test_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.test_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
with open(filepath) as csvfile:
output = ' '.join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
def test_TensorBoard_histogram_freq_must_have_validation_data(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
inp = keras.Input((INPUT_DIM,))
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit w/o validation data should raise ValueError if histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit(
x_train, y_train, batch_size=BATCH_SIZE, callbacks=cbs, epochs=3)
for cb in cbs:
cb.on_train_end()
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbs)
for cb in cbs:
cb.on_train_end()
# Make sure file writer cache is clear to avoid failures during cleanup.
writer_cache.FileWriterCache.clear()
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
tsb._writer_class = FileWriterStub
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 0.5, 1, 1.5, 2, 2.5])
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_dim=100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.test_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
logdir = 'fake_dir'
# log every batch
tb_cbk = keras.callbacks.TensorBoard(logdir)
tb_cbk.writer = FileWriterStub(logdir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': np.float32(batch)})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
logdir = 'fake_dir'
tb_cbk = keras.callbacks.TensorBoard(logdir)
tb_cbk.writer = FileWriterStub(logdir)
tb_cbk.on_batch_end(0, {'acc': np.float32(5.0)})
tb_cbk.on_epoch_end(0, {'acc': np.float32(10.0)})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.test_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
if __name__ == '__main__':
test.main()
| |
import logging
import os
import select
import socket
import sys
import time
import traceback
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.properties import ObjectProperty, ListProperty
from kivy.uix.actionbar import ActionItem
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader
from kivy.uix.textinput import TextInput
import cap.cap_manager
from session import create_session
import term.term_keyboard
from term.terminal_gui import TerminalGUI
from uix.term_kivy_login import prompt_login as pl
from uix.term_kivy_password import prompt_password as pp
from uix.terminal_widget_kivy import TerminalWidgetKivy
Builder.load_file(os.path.join(os.path.dirname(__file__), 'term_kivy.kv'))
class RootWidget(FloatLayout):
term_panel = ObjectProperty(None)
txt_host = ObjectProperty(None)
txt_port = ObjectProperty(None)
btn_connect = ObjectProperty(None)
spnr_conn_history = ObjectProperty(None)
class ActionTextInput(TextInput, ActionItem):
def __init__(self, *args, **kwargs):
super(ActionTextInput, self).__init__(*args, **kwargs)
self.hint_text='user@host'
self.multiline=False
class ActionLabel(Label, ActionItem):
def __init__(self, *args, **kwargs):
super(ActionLabel, self).__init__(*args, **kwargs)
class TermTabbedPanel(TabbedPanel):
def on_do_default_tab(self, instance, value):
super(TermTabbedPanel, self).on_do_default_tab(instance, value)
class TermBoxLayout(BoxLayout):
def __init__(self, **kwargs):
super(TermBoxLayout, self).__init__(**kwargs)
self.term_widget = None
self.started = False
self.trigger_start_session = Clock.create_trigger(self.start_session, .5)
def start_session(self, *largs):
self.term_widget.session.start()
def do_layout(self, *largs):
super(TermBoxLayout, self).do_layout(*largs)
if not self.started:
self.started = True
self.term_widget.focus = True
self.trigger_start_session()
class TermTextInput(TerminalWidgetKivy):
def __init__(self, session, **kwargs):
super(TermTextInput, self).__init__(session, **kwargs)
self.visible_rows = 1
self.visible_cols = 1
self.scroll_region = None
self.keyboard_handled = False
def keyboard_on_textinput(self, window, text):
if self.keyboard_handled:
return True
logging.getLogger('term_kivy').debug('key board send text {}'.format(text))
self.session.send(text)
return True
def keyboard_on_key_down(self, keyboard, keycode, text, modifiers):
logging.getLogger('term_kivy').debug('The key {} {}'.format(keycode, 'have been pressed'))
logging.getLogger('term_kivy').debug(' - text is %r' % text)
logging.getLogger('term_kivy').debug(' - modifiers are %r' % modifiers)
if self.session.terminal.process_key(keycode, text, modifiers):
self.keyboard_handled = True
return True
v, handled = term.term_keyboard.translate_key(self.session.terminal, keycode, text, modifiers)
if len(v) > 0:
self.session.send(v)
logging.getLogger('term_kivy').debug(' - translated %r, %d' % (v, handled))
# Return True to accept the key. Otherwise, it will be used by
# the system.
self.keyboard_handled = handled
return handled
def cal_visible_rows(self):
lh = self.line_height
dy = lh + self.line_spacing
padding_left, padding_top, padding_right, padding_bottom = self.padding
vh = self.height - padding_top - padding_bottom
self.visible_rows = int(float(vh) / float(dy) + 0.1)
if self.visible_rows == 0:
self.visible_rows = 1
def cal_visible_cols(self):
padding_left, padding_top, padding_right, padding_bottom = self.padding
vw = self.width - padding_left - padding_right
text = ''.join([chr(c) for c in range(ord('A'), ord('Z') + 1)])
tw = self._get_text_width(text)
self.visible_cols = int(float(vw) / float(tw) * 26)
if self.visible_cols == 0:
self.visible_cols = 1
def on_size(self, instance, value):
padding_left, padding_top, padding_right, padding_bottom = self.padding
vh = self.height - padding_top - padding_bottom
vw = self.width - padding_left - padding_right
self.cal_visible_rows()
self.cal_visible_cols()
logging.getLogger('term_kivy').debug('on size: cols={} rows={} width={} height={} size={} pos={}'.format(self.visible_cols, self.visible_rows, vw, vh, self.size, self.pos))
self.session.resize_pty(self.visible_cols, self.visible_rows, vw, vh)
self.session.terminal.resize_terminal()
self.session.terminal.refresh_display()
class TerminalKivyApp(App):
conn_history = ListProperty([])
def __init__(self, cfg):
App.__init__(self)
self.cfg = cfg
self.current_tab = None
def get_application_name(self):
return 'Multi-Tab Terminal Emulator in Python & Kivy'
def build(self):
self.root_widget = RootWidget()
self.root_widget.term_panel.do_default_tab = False
self.root_widget.term_panel.bind(current_tab=self.on_current_tab)
self.root_widget.btn_connect.bind(on_press=self.on_connect)
self.root_widget.spnr_conn_history.bind(text=self.on_conn_history)
self.trigger_switch_to_tab = Clock.create_trigger(self._switch_to_tab)
return self.root_widget
def _switch_to_tab(self, *largs):
if not self.current_tab:
return
self.root_widget.term_panel.switch_to(self.current_tab)
def switch_to_tab(self, current_tab):
self.current_tab = current_tab
self.trigger_switch_to_tab()
def connect_to(self, conn_str, port):
cfg = self.cfg.clone()
cfg.set_conn_str(conn_str)
cfg.port = port
cfg.session_type = 'ssh'
for current_tab in self.root_widget.term_panel.tab_list:
if current_tab.session.stopped:
current_tab.session.cfg = cfg
current_tab.session.start()
self.switch_to_tab(current_tab)
return
self.add_term_widget(cfg)
def on_conn_history(self, instance, value):
if not isinstance(value, basestring):
return
parts = value.split(':')
self.connect_to(parts[0], int(parts[1]))
def on_connect(self, instance):
self.connect_to(self.root_widget.txt_host.text, int(self.root_widget.txt_port.text))
def create_terminal(self, cfg):
return TerminalKivy(cfg)
def start(self):
self.run()
def on_start(self):
self.add_term_widget(self.cfg.clone())
def on_current_tab(self, instance, value):
term_widget = self.root_widget.term_panel.current_tab.term_widget
if term_widget:
def update(ut):
term_widget.focus = True
Clock.unschedule(update)
Clock.schedule_once(update)
def add_term_widget(self, cfg):
layout = TermBoxLayout()
ti = TabbedPanelHeader()
ti.text = ' '.join([str(len(self.root_widget.term_panel.tab_list) + 1), 'Terminal'])
ti.content = layout
ti.size_hint = (1,1)
self.root_widget.term_panel.add_widget(ti)
session = create_session(cfg, self.create_terminal(cfg))
term_widget = TermTextInput(session)
term_widget.size_hint = (1, 1)
term_widget.pos_hint = {'center_y':.5, 'center_x':.5}
layout.add_widget(term_widget)
layout.term_widget = term_widget
ti.term_widget = term_widget
ti.session = session
ti.session.term_widget = term_widget
ti.session.terminal.term_widget = term_widget
Clock.unschedule(self.root_widget.term_panel._load_default_tab_content)
self.switch_to_tab(ti)
conn_str = cfg.get_conn_str()
if conn_str in self.conn_history:
self.conn_history.remove(conn_str)
self.conn_history.insert(0, conn_str)
def on_stop(self):
for current_tab in self.root_widget.term_panel.tab_list:
current_tab.session.stop()
def close_settings(self, *largs):
App.close_settings(self, *largs)
class TerminalKivy(TerminalGUI):
def __init__(self, cfg):
super(TerminalKivy, self).__init__(cfg)
def prompt_login(self, t, username):
pl(self, t, username)
def prompt_password(self, action):
pp(action)
| |
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import string
import os
import logging
import re
import itertools
from collections import defaultdict
# bsd licensed - pip install jinja2
from jinja2 import Environment, FileSystemLoader
# fsutils, , misc filesystem utils, internal
import fsutils
# validate, , validate various things, internal
import validate
Template_Dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
logger = logging.getLogger('cmakegen')
Ignore_Subdirs = set(('build','yotta_modules', 'yotta_targets', 'CMake'))
jinja_environment = Environment(loader=FileSystemLoader(Template_Dir), trim_blocks=True, lstrip_blocks=True)
def replaceBackslashes(s):
return s.replace('\\', '/')
jinja_environment.filters['replaceBackslashes'] = replaceBackslashes
class SourceFile(object):
def __init__(self, fullpath, relpath, lang):
super(SourceFile, self).__init__()
self.fullpath = fullpath
self.relpath = relpath
self.lang = lang
def __repr__(self):
return self.fullpath
class CMakeGen(object):
def __init__(self, directory, target):
super(CMakeGen, self).__init__()
self.buildroot = directory
logger.info("generate for target: %s" % target)
self.target = target
def _writeFile(self, path, contents):
dirname = os.path.dirname(path)
fsutils.mkDirP(dirname)
self.writeIfDifferent(path, contents)
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None):
''' generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error)
'''
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, 'ym')
if processed_components is None:
processed_components = dict()
if not self.target:
yield 'Target "%s" is not a valid build target' % self.target
toplevel = not len(processed_components)
logger.debug('generate build files: %s (target=%s)' % (component, self.target))
# because of the way c-family language includes work we need to put the
# public header directories of all components that this component
# depends on (directly OR indirectly) into the search path, which means
# we need to first enumerate all the direct and indirect dependencies
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True
)
for name, dep in dependencies.items():
if not dep:
yield 'Required dependency "%s" of "%s" is not installed.' % (name, component)
# ensure this component is assumed to have been installed before we
# check for its dependencies, in case it has a circular dependency on
# itself
processed_components[component.getName()] = component
new_dependencies = {name:c for name,c in dependencies.items() if c and not name in processed_components}
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, toplevel)
logger.debug('recursive deps of %s:' % component)
for d in recursive_deps.values():
logger.debug(' %s' % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components
):
yield error
def checkStandardSourceDir(self, dirname, component):
err = validate.sourceDirValidationError(dirname, component.getName())
if err:
logger.warn(err)
def _sanitizeTarget(self, targetname):
return re.sub('[^a-zA-Z0-9]', '_', targetname).upper()
def _sanitizeSymbol(self, sym):
return re.sub('[^a-zA-Z0-9]', '_', sym)
def _listSubDirectories(self, component):
''' return: {
manual: [list of subdirectories with manual CMakeLists],
auto: [list of pairs: (subdirectories name to autogenerate, a list of source files in that dir)],
bin: {dictionary of subdirectory name to binary name},
test: [list of directories that build tests]
resource: [list of directories that contain resources]
}
'''
manual_subdirs = []
auto_subdirs = []
bin_subdirs = {os.path.normpath(x) : y for x,y in component.getBinaries().items()};
test_subdirs = []
resource_subdirs = []
for f in os.listdir(component.path):
if f in Ignore_Subdirs or f.startswith('.') or f.startswith('_'):
continue
if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):
self.checkStandardSourceDir(f, component)
# if the subdirectory has a CMakeLists.txt in it, then use that
manual_subdirs.append(f)
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
elif f in ('source', 'test') or os.path.normpath(f) in bin_subdirs:
# otherwise, if the directory has source files, generate a
# CMakeLists in the corresponding temporary directory, and add
# that.
# For now we only do this for the source and test directories -
# in theory we could do others
sources = self.containsSourceFiles(os.path.join(component.path, f), component)
if sources:
auto_subdirs.append((f, sources))
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
elif f in ('resource'):
resource_subdirs.append(os.path.join(component.path, f))
elif f.lower() in ('source', 'src', 'test', 'resource'):
self.checkStandardSourceDir(f, component)
return {
"manual": manual_subdirs,
"auto": auto_subdirs,
"bin": bin_subdirs,
"test": test_subdirs,
"resource": resource_subdirs
}
def generate(
self, builddir, modbuilddir, component, active_dependencies, immediate_dependencies, all_dependencies, toplevel
):
''' active_dependencies is the dictionary of components that need to be
built for this component, but will not already have been built for
another component.
'''
include_own_dir = 'include_directories("%s")\n' % component.path
include_root_dirs = ''
include_sys_dirs = ''
include_other_dirs = ''
for name, c in itertools.chain(((component.getName(), component),), all_dependencies.items()):
include_root_dirs += 'include_directories("%s")\n' % replaceBackslashes(c.path)
dep_sys_include_dirs = c.getExtraSysIncludes()
for d in dep_sys_include_dirs:
include_sys_dirs += 'include_directories(SYSTEM "%s")\n' % replaceBackslashes(os.path.join(c.path, d))
dep_extra_include_dirs = c.getExtraIncludes()
for d in dep_extra_include_dirs:
include_other_dirs += 'include_directories("%s")\n' % replaceBackslashes(os.path.join(c.path, d))
add_depend_subdirs = ''
for name, c in active_dependencies.items():
depend_subdir = replaceBackslashes(os.path.join(modbuilddir, name))
add_depend_subdirs += 'add_subdirectory("%s" "%s")\n' % (
depend_subdir, depend_subdir
)
delegate_to_existing = None
delegate_build_dir = None
if os.path.isfile(os.path.join(component.path, 'CMakeLists.txt')):
delegate_to_existing = component.path
add_own_subdirs = []
logger.debug("delegate to build dir: %s", builddir)
delegate_build_dir = os.path.join(builddir, 'existing')
else:
subdirs = self._listSubDirectories(component)
manual_subdirs = subdirs['manual']
autogen_subdirs = subdirs['auto']
binary_subdirs = subdirs['bin']
test_subdirs = subdirs['test']
resource_subdirs = subdirs['resource']
add_own_subdirs = []
for f in manual_subdirs:
if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):
add_own_subdirs.append(
(os.path.join(component.path, f), os.path.join(builddir, f))
)
# names of all directories at this level with stuff in: used to figure
# out what to link automatically
all_subdirs = manual_subdirs + [x[0] for x in autogen_subdirs]
for f, source_files in autogen_subdirs:
if f in binary_subdirs:
exe_name = binary_subdirs[f]
else:
exe_name = None
if f in test_subdirs:
self.generateTestDirList(
builddir, f, source_files, component, immediate_dependencies
)
else:
self.generateSubDirList(
builddir, f, source_files, component, all_subdirs,
immediate_dependencies, exe_name, resource_subdirs
)
add_own_subdirs.append(
(os.path.join(builddir, f), os.path.join(builddir, f))
)
# if we're not building anything other than tests, then we need to
# generate a dummy library so that this component can still be linked
# against
if len(add_own_subdirs) <= len(test_subdirs):
add_own_subdirs.append(self.createDummyLib(
component, builddir, [x for x in immediate_dependencies]
))
target_definitions = '-DTARGET=' + self._sanitizeTarget(self.target.getName()) + ' '
set_targets_like = 'set(TARGET_LIKE_' + self._sanitizeTarget(self.target.getName()) + ' TRUE)\n'
for target in self.target.dependencyResolutionOrder():
if '*' not in target:
target_definitions += '-DTARGET_LIKE_' + self._sanitizeTarget(target) + ' '
set_targets_like += 'set(TARGET_LIKE_' + self._sanitizeTarget(target) + ' TRUE)\n'
template = jinja_environment.get_template('base_CMakeLists.txt')
file_contents = template.render({
"toplevel": toplevel,
"target_name": self.target.getName(),
"set_targets_like": set_targets_like,
"toolchain_file": self.target.getToolchainFile(),
"component_name": component.getName(),
"include_own_dir": include_own_dir,
"include_root_dirs": include_root_dirs,
"include_sys_dirs": include_sys_dirs,
"include_other_dirs": include_other_dirs,
"add_depend_subdirs": add_depend_subdirs,
"add_own_subdirs": add_own_subdirs,
"yotta_target_definitions": target_definitions,
"component_version": component.getVersion(),
"delegate_to": delegate_to_existing,
"delegate_build_dir": delegate_build_dir
})
self._writeFile(os.path.join(builddir, 'CMakeLists.txt'), file_contents)
def createDummyLib(self, component, builddir, link_dependencies):
safe_name = self._sanitizeSymbol(component.getName())
dummy_dirname = 'yotta_dummy_lib_%s' % safe_name
dummy_cfile_name = 'dummy.c'
logger.debug("create dummy lib: %s, %s, %s" % (safe_name, dummy_dirname, dummy_cfile_name))
dummy_template = jinja_environment.get_template('dummy_CMakeLists.txt')
dummy_cmakelists = dummy_template.render({
"cfile_name": dummy_cfile_name,
"libname": component.getName(),
"link_dependencies": link_dependencies
})
self._writeFile(os.path.join(builddir, dummy_dirname, "CMakeLists.txt"), dummy_cmakelists)
dummy_cfile = "void __yotta_dummy_lib_symbol_%s(){}\n" % safe_name
self._writeFile(os.path.join(builddir, dummy_dirname, dummy_cfile_name), dummy_cfile)
return (os.path.join(builddir, dummy_dirname), os.path.join(builddir, dummy_dirname))
def writeIfDifferent(self, fname, contents):
try:
with open(fname, "r+") as f:
current_contents = f.read()
if current_contents != contents:
f.seek(0)
f.write(contents)
f.truncate()
except IOError:
with open(fname, "w") as f:
f.write(contents)
def generateTestDirList(self, builddir, dirname, source_files, component, immediate_dependencies):
logger.debug('generate CMakeLists.txt for directory: %s' % os.path.join(component.path, dirname))
link_dependencies = [x for x in immediate_dependencies]
fname = os.path.join(builddir, dirname, 'CMakeLists.txt')
# group the list of source files by subdirectory: generate one test for
# each subdirectory, and one test for each file at the top level
subdirs = defaultdict(list)
toplevel_srcs = []
for f in source_files:
if f.lang in ('c', 'cpp', 'objc'):
subrelpath = os.path.relpath(f.relpath, dirname)
subdir = os.path.split(subrelpath)[0]
if subdir:
subdirs[subdir].append(f)
else:
toplevel_srcs.append(f)
tests = []
for f in toplevel_srcs:
object_name = '%s-test-%s' % (
component.getName(), os.path.basename(os.path.splitext(str(f))[0]).lower()
)
tests.append([[str(f)], object_name, [f.lang]])
for subdirname, sources in subdirs.items():
object_name = '%s-test-%s' % (
component.getName(), fsutils.fullySplitPath(subdirname)[0].lower()
)
tests.append([[str(f) for f in sources], object_name, [f.lang for f in sources]])
# link tests against the main executable
link_dependencies.append(component.getName())
# Find cmake files
cmake_files = []
for root, dires, files in os.walk(os.path.join(component.path, dirname)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.cmake':
cmake_files.append(os.path.join(root, f))
test_template = jinja_environment.get_template('test_CMakeLists.txt')
file_contents = test_template.render({
'source_directory':os.path.join(component.path, dirname),
'tests':tests,
'link_dependencies':link_dependencies,
'cmake_files': cmake_files
})
self._writeFile(fname, file_contents)
def generateSubDirList(self, builddir, dirname, source_files, component, all_subdirs, immediate_dependencies, executable_name, resource_subdirs):
logger.debug('generate CMakeLists.txt for directory: %s' % os.path.join(component.path, dirname))
link_dependencies = [x for x in immediate_dependencies]
fname = os.path.join(builddir, dirname, 'CMakeLists.txt')
if dirname == 'source' or executable_name:
if executable_name:
object_name = executable_name
executable = True
else:
object_name = component.getName()
executable = False
# if we're building the main library, or an executable for this
# component, then we should link against all the other directories
# containing cmakelists:
link_dependencies += [x for x in all_subdirs if x not in ('source', 'test', dirname)]
# Find resource files
resource_files = []
for f in resource_subdirs:
for root, dires, files in os.walk(f):
if root.endswith(".xcassets"):
resource_files.append(root)
break;
for f in files:
resource_files.append(os.path.join(root, f))
# Find cmake files
cmake_files = []
for root, dires, files in os.walk(os.path.join(component.path, dirname)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.cmake':
cmake_files.append(os.path.join(root, f))
subdir_template = jinja_environment.get_template('subdir_CMakeLists.txt')
file_contents = subdir_template.render({
'source_directory': os.path.join(component.path, dirname),
'executable': executable,
'file_names': [str(f) for f in source_files],
'object_name': object_name,
'link_dependencies': link_dependencies,
'languages': set(f.lang for f in source_files),
'source_files': set((f.fullpath, f.lang) for f in source_files),
'resource_files': resource_files,
'cmake_files': cmake_files
})
else:
raise Exception('auto CMakeLists for non-source/test directories is not supported')
self._writeFile(fname, file_contents)
def containsSourceFiles(self, directory, component):
c_exts = set(('.c',))
cpp_exts = set(('.cpp','.cc','.cxx'))
objc_exts = set(('.m', '.mm'))
header_exts = set(('.h',))
sources = []
for root, dires, files in os.walk(directory):
for f in files:
name, ext = os.path.splitext(f)
ext = ext.lower()
fullpath = os.path.join(root, f)
relpath = os.path.relpath(fullpath, component.path)
if component.ignores(relpath):
continue
if ext in c_exts:
sources.append(SourceFile(fullpath, relpath, 'c'))
elif ext in cpp_exts:
sources.append(SourceFile(fullpath, relpath, 'cpp'))
elif ext in objc_exts:
sources.append(SourceFile(fullpath, relpath, 'objc'))
elif ext in header_exts:
sources.append(SourceFile(fullpath, relpath, 'header'))
return sources
| |
"""An abstract class for entities."""
import asyncio
import logging
import functools as ft
from timeit import default_timer as timer
from typing import Optional, List
from homeassistant.const import (
ATTR_ASSUMED_STATE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN, ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT, DEVICE_DEFAULT_NAME, STATE_OFF, STATE_ON,
STATE_UNAVAILABLE, STATE_UNKNOWN, TEMP_CELSIUS, TEMP_FAHRENHEIT,
ATTR_ENTITY_PICTURE, ATTR_SUPPORTED_FEATURES, ATTR_DEVICE_CLASS)
from homeassistant.core import HomeAssistant
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.exceptions import NoEntitySpecifiedError
from homeassistant.util import ensure_unique_string, slugify
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
def generate_entity_id(entity_id_format: str, name: Optional[str],
current_ids: Optional[List[str]]=None,
hass: Optional[HomeAssistant]=None) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
if current_ids is None:
if hass is None:
raise ValueError("Missing required parameter currentids or hass")
else:
return run_callback_threadsafe(
hass.loop, async_generate_entity_id, entity_id_format, name,
current_ids, hass
).result()
name = (name or DEVICE_DEFAULT_NAME).lower()
return ensure_unique_string(
entity_id_format.format(slugify(name)), current_ids)
def async_generate_entity_id(entity_id_format: str, name: Optional[str],
current_ids: Optional[List[str]]=None,
hass: Optional[HomeAssistant]=None) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
if current_ids is None:
if hass is None:
raise ValueError("Missing required parameter currentids or hass")
current_ids = hass.states.async_entity_ids()
name = (name or DEVICE_DEFAULT_NAME).lower()
return ensure_unique_string(
entity_id_format.format(slugify(name)), current_ids)
class Entity(object):
"""An abstract class for Home Assistant entities."""
# pylint: disable=no-self-use
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id = None # type: str
# Owning hass instance. Will be set by EntityComponent
hass = None # type: Optional[HomeAssistant]
# If we reported if this entity was slow
_slow_reported = False
# protect for multible updates
_update_warn = None
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return "{}.{}".format(self.__class__, id(self))
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> str:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def state_attributes(self):
"""Return the state attributes.
Implemented by component base class.
"""
return None
@property
def device_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def hidden(self) -> bool:
"""Return True if the entity should be hidden from UIs."""
return False
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> int:
"""Flag supported features."""
return None
def update(self):
"""Retrieve latest state.
When not implemented, will forward call to async version if available.
"""
async_update = getattr(self, 'async_update', None)
if async_update is None:
return
run_coroutine_threadsafe(async_update(), self.hass.loop).result()
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
def update_ha_state(self, force_refresh=False):
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
"""
_LOGGER.warning("'update_ha_state' is deprecated. "
"Use 'schedule_update_ha_state' instead.")
self.schedule_update_ha_state(force_refresh)
@asyncio.coroutine
def async_update_ha_state(self, force_refresh=False):
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError("Attribute hass is None for {}".format(self))
if self.entity_id is None:
raise NoEntitySpecifiedError(
"No entity id specified for entity {}".format(self.name))
# update entity data
if force_refresh:
if self._update_warn:
_LOGGER.warning('Update for %s is already in progress',
self.entity_id)
return
self._update_warn = self.hass.loop.call_later(
SLOW_UPDATE_WARNING, _LOGGER.warning,
'Update of %s is taking over %s seconds.', self.entity_id,
SLOW_UPDATE_WARNING
)
try:
if hasattr(self, 'async_update'):
# pylint: disable=no-member
yield from self.async_update()
else:
yield from self.hass.loop.run_in_executor(
None, self.update)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Update for %s fails', self.entity_id)
return
finally:
self._update_warn.cancel()
self._update_warn = None
start = timer()
if not self.available:
state = STATE_UNAVAILABLE
attr = {}
else:
state = self.state
if state is None:
state = STATE_UNKNOWN
else:
state = str(state)
attr = self.state_attributes or {}
device_attr = self.device_state_attributes
if device_attr is not None:
attr.update(device_attr)
self._attr_setter('unit_of_measurement', str, ATTR_UNIT_OF_MEASUREMENT,
attr)
self._attr_setter('name', str, ATTR_FRIENDLY_NAME, attr)
self._attr_setter('icon', str, ATTR_ICON, attr)
self._attr_setter('entity_picture', str, ATTR_ENTITY_PICTURE, attr)
self._attr_setter('hidden', bool, ATTR_HIDDEN, attr)
self._attr_setter('assumed_state', bool, ATTR_ASSUMED_STATE, attr)
self._attr_setter('supported_features', int, ATTR_SUPPORTED_FEATURES,
attr)
self._attr_setter('device_class', str, ATTR_DEVICE_CLASS, attr)
end = timer()
if not self._slow_reported and end - start > 0.4:
self._slow_reported = True
_LOGGER.warning('Updating state for %s took %.3f seconds. '
'Please report platform to the developers at '
'https://goo.gl/Nvioub', self.entity_id,
end - start)
# Overwrite properties that have been set in the config file.
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Remove hidden property if false so it won't show up.
if not attr.get(ATTR_HIDDEN, True):
attr.pop(ATTR_HIDDEN)
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT) and
unit_of_measure != units.temperature_unit):
prec = len(state) - state.index('.') - 1 if '.' in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update)
def schedule_update_ha_state(self, force_refresh=False):
"""Schedule a update ha state change task.
That is only needed on executor to not block.
"""
self.hass.add_job(self.async_update_ha_state(force_refresh))
def remove(self) -> None:
"""Remove entity from HASS."""
run_coroutine_threadsafe(
self.async_remove(), self.hass.loop
).result()
@asyncio.coroutine
def async_remove(self) -> None:
"""Remove entity from async HASS.
This method must be run in the event loop.
"""
self.hass.states.async_remove(self.entity_id)
def _attr_setter(self, name, typ, attr, attrs):
"""Helper method to populate attributes based on properties."""
if attr in attrs:
return
value = getattr(self, name)
if not value:
return
try:
attrs[attr] = typ(value)
except (TypeError, ValueError):
pass
def __eq__(self, other):
"""Return the comparison."""
return (isinstance(other, Entity) and
other.unique_id == self.unique_id)
def __repr__(self):
"""Return the representation."""
return "<Entity {}: {}>".format(self.name, self.state)
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
# pylint: disable=no-self-use
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
raise NotImplementedError()
def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
raise NotImplementedError()
def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.turn_off, **kwargs))
def toggle(self) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off()
else:
self.turn_on()
def async_toggle(self):
"""Toggle the entity.
This method must be run in the event loop and returns a coroutine.
"""
if self.is_on:
return self.async_turn_off()
else:
return self.async_turn_on()
| |
#!/usr/bin/env python
"""
Dummy server used for unit testing.
"""
from __future__ import print_function
import errno
import logging
import os
import random
import string
import sys
import threading
import socket
import warnings
from urllib3.exceptions import HTTPWarning
from tornado.platform.auto import set_close_exec
import tornado.httpserver
import tornado.ioloop
import tornado.web
log = logging.getLogger(__name__)
CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
DEFAULT_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.key'),
}
NO_SAN_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'),
'keyfile': DEFAULT_CERTS['keyfile']
}
IPV6_ADDR_CERTS = {
'certfile': os.path.join(CERTS_PATH, 'server.ipv6addr.crt'),
'keyfile': os.path.join(CERTS_PATH, 'server.ipv6addr.key'),
}
DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem')
DEFAULT_CA_DIR = os.path.join(CERTS_PATH, 'ca_path_test')
IPV6_ADDR_CA = os.path.join(CERTS_PATH, 'server.ipv6addr.crt')
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except:
pass
if sock:
sock.close()
return has_ipv6
# Some systems may have IPv6 support but DNS may not be configured
# properly. We can not count that localhost will resolve to ::1 on all
# systems. See https://github.com/shazow/urllib3/pull/611 and
# https://bugs.python.org/issue18792
HAS_IPV6_AND_DNS = _has_ipv6('localhost')
HAS_IPV6 = _has_ipv6('::1')
# Different types of servers we have:
class NoIPv6Warning(HTTPWarning):
"IPv6 is not available"
pass
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
USE_IPV6 = HAS_IPV6_AND_DNS
def __init__(self, socket_handler, host='localhost', port=8081,
ready_event=None):
threading.Thread.__init__(self)
self.daemon = True
self.socket_handler = socket_handler
self.host = host
self.ready_event = ready_event
def _start_server(self):
if self.USE_IPV6:
sock = socket.socket(socket.AF_INET6)
else:
warnings.warn("No IPv6 support. Falling back to IPv4.",
NoIPv6Warning)
sock = socket.socket(socket.AF_INET)
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
# Once listen() returns, the server socket is ready
sock.listen(0)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def run(self):
self.server = self._start_server()
# FIXME: there is a pull request patching bind_sockets in Tornado directly.
# If it gets merged and released we can drop this and use
# `tornado.netutil.bind_sockets` again.
# https://github.com/facebook/tornado/pull/977
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,
flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not HAS_IPV6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
binded_port = None
for res in set(socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0, flags)):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if e.args[0] == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and binded_port is not None:
sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
binded_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
def run_tornado_app(app, io_loop, certs, scheme, host):
if scheme == 'https':
http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,
io_loop=io_loop)
else:
http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)
sockets = bind_sockets(None, address=host)
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
def run_loop_in_thread(io_loop):
t = threading.Thread(target=io_loop.start)
t.start()
return t
def get_unreachable_address():
while True:
host = ''.join(random.choice(string.ascii_lowercase)
for _ in range(60))
sockaddr = (host, 54321)
# check if we are really "lucky" and hit an actual server
try:
s = socket.create_connection(sockaddr)
except socket.error:
return sockaddr
else:
s.close()
if __name__ == '__main__':
# For debugging dummyserver itself - python -m dummyserver.server
from .testcase import TestingApp
host = '127.0.0.1'
io_loop = tornado.ioloop.IOLoop()
app = tornado.web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, None,
'http', host)
server_thread = run_loop_in_thread(io_loop)
print("Listening on http://{host}:{port}".format(host=host, port=port))
| |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import collections
from edb import errors
from edb.common import ordered
from edb.common import topological
from . import delta as sd
from . import expraliases as s_expraliases
from . import functions as s_func
from . import inheriting
from . import name as sn
from . import objects as so
from . import pointers as s_pointers
from . import constraints as s_constraints
from . import referencing
from . import types as s_types
if TYPE_CHECKING:
from . import schema as s_schema
class DepGraphEntryExtra(NamedTuple):
implicit_ancestors: List[sn.Name]
DepGraphKey = Tuple[str, str]
DepGraphEntry = topological.DepGraphEntry[
DepGraphKey, Tuple[sd.Command, ...], DepGraphEntryExtra,
]
DepGraph = Dict[DepGraphKey, DepGraphEntry]
def linearize_delta(
delta: sd.DeltaRoot,
old_schema: Optional[s_schema.Schema],
new_schema: s_schema.Schema,
) -> sd.DeltaRoot:
"""Reorder the *delta* tree in-place to satisfy command dependency order.
Args:
delta:
Input delta command tree.
old_schema:
Schema used to resolve original object state.
new_schema:
Schema used to resolve final schema state.
Returns:
Input delta tree reordered according to topological ordering of
commands.
"""
# We take the scatter-sort-gather approach here, where the original
# tree is broken up into linear branches, which are then sorted
# and reassembled back into a tree.
# A map of commands to root->command paths through the tree.
# Nodes are duplicated so the interior nodes of the path are
# distinct.
opmap: Dict[sd.Command, List[sd.Command]] = {}
strongrefs: Dict[sn.Name, sn.Name] = {}
for op in _get_sorted_subcommands(delta):
_break_down(opmap, strongrefs, [delta, op])
depgraph: DepGraph = {}
renames: Dict[sn.Name, sn.Name] = {}
renames_r: Dict[sn.Name, sn.Name] = {}
deletions: Set[sn.Name] = set()
for op in opmap:
if isinstance(op, sd.RenameObject):
renames[op.classname] = op.new_name
renames_r[op.new_name] = op.classname
elif isinstance(op, sd.DeleteObject):
deletions.add(op.classname)
for op, opbranch in opmap.items():
if isinstance(op, sd.AlterObject) and not op.get_subcommands():
continue
_trace_op(op, opbranch, depgraph, renames,
renames_r, strongrefs, old_schema, new_schema)
depgraph = dict(filter(lambda i: i[1].item != (), depgraph.items()))
everything = set(depgraph)
for item in depgraph.values():
item.deps &= everything
item.weak_deps &= everything
sortedlist = [i[1] for i in topological.sort_ex(depgraph)]
reconstructed = reconstruct_tree(sortedlist, depgraph)
delta.replace_all(reconstructed.get_subcommands())
return delta
def reconstruct_tree(
sortedlist: List[DepGraphEntry],
depgraph: DepGraph,
) -> sd.DeltaRoot:
result = sd.DeltaRoot()
# Child to parent mapping.
parents: Dict[sd.Command, sd.Command] = {}
# A mapping of commands to their dependencies.
dependencies: Dict[sd.Command, Set[sd.Command]] = (
collections.defaultdict(set))
# Current address of command within a tree in the form of
# a tuple of indexes where each index represents relative
# position within the tree rank.
offsets: Dict[sd.Command, Tuple[int, ...]] = {}
# Object commands indexed by command type and object name and
# implicitness, where each entry represents the latest seen
# command of the type for a particular object. Implicit commands
# are included, but can only be attached to by other implicit
# commands.
opindex: Dict[
Tuple[Type[sd.ObjectCommand[so.Object]], sn.Name, bool],
sd.ObjectCommand[so.Object]
] = {}
def ok_to_attach_to(
op_to_attach: sd.Command,
op_to_attach_to: sd.ObjectCommand[so.Object],
only_if_confident: bool = False,
) -> bool:
"""Determine if a given command can be attached to another.
Returns True, if *op_to_attach* can be attached to *op_to_attach_to*
without violating the dependency order.
"""
if only_if_confident and isinstance(op_to_attach, sd.ObjectCommand):
# Avoid reattaching the subcommand if confidence is below 100%,
# so that granular prompts can be generated.
confidence = op_to_attach.get_annotation('confidence')
if confidence is not None and confidence < 1.0:
return False
tgt_offset = offsets[op_to_attach_to]
tgt_offset_len = len(tgt_offset)
deps = dependencies[op_to_attach]
return all(offsets[dep][:tgt_offset_len] <= tgt_offset for dep in deps)
def attach(
opbranch: Tuple[sd.Command, ...],
new_parent: sd.Command,
slice_start: int = 1,
as_implicit: bool = False,
) -> None:
"""Attach a portion of a given command branch to another parent.
Args:
opbranch:
Command branch to attach to *new_parent*.
new_parent:
Command node to attach the specified portion of *opbranch* to.
slice_start:
Offset into *opbranch* that determines which commands
get attached.
as_implicit:
If True, the command branch is considered to be implicit,
i.e. it is not recorded in the command index.
"""
parent = opbranch[slice_start]
op = opbranch[-1]
offset_within_parent = new_parent.get_nonattr_subcommand_count()
if not isinstance(new_parent, sd.DeltaRoot):
parent_offset = offsets[new_parent] + (offset_within_parent,)
else:
parent_offset = (offset_within_parent,)
new_parent.add(parent)
old_parent = parents[parent]
old_parent.discard(parent)
parents[parent] = new_parent
for i in range(slice_start, len(opbranch)):
op = opbranch[i]
if isinstance(op, sd.ObjectCommand):
ancestor_key = (type(op), op.classname, as_implicit)
opindex[ancestor_key] = op
if op in offsets:
op_offset = offsets[op][slice_start:]
else:
op_offset = (0,) * (i - slice_start)
offsets[op] = parent_offset + op_offset
def maybe_replace_preceding(
op: sd.Command,
) -> bool:
"""Possibly merge and replace an earlier command with *op*.
If *op* is a DELETE command, or an ALTER command that has no
subcommands, and there is an earlier ALTER command operating
on the same object as *op*, merge that command into *op* and
replace it with *op*.
Returns:
True if merge and replace happened, False otherwise.
"""
if not (
isinstance(op, sd.DeleteObject)
or (
isinstance(op, sd.AlterObject)
and op.get_nonattr_special_subcommand_count() == 0
)
):
return False
alter_cmd_cls = sd.get_object_command_class(
sd.AlterObject, op.get_schema_metaclass())
if alter_cmd_cls is None:
# ALTER isn't even defined for this object class, bail.
return False
alter_key = ((alter_cmd_cls), op.classname, False)
alter_op = opindex.get(alter_key)
if alter_op is None:
# No preceding ALTER, bail.
return False
if (
not ok_to_attach_to(op, alter_op)
or (
isinstance(parents[op], sd.DeltaRoot)
!= isinstance(parents[alter_op], sd.DeltaRoot)
)
):
return False
for alter_sub in reversed(alter_op.get_prerequisites()):
op.prepend_prerequisite(alter_sub)
parents[alter_sub] = op
for alter_sub in reversed(
alter_op.get_subcommands(include_prerequisites=False)
):
op.prepend(alter_sub)
parents[alter_sub] = op
attached_root = parents[alter_op]
attached_root.replace(alter_op, op)
opindex[alter_key] = op
opindex[type(op), op.classname, False] = op
offsets[op] = offsets[alter_op]
parents[op] = attached_root
return True
def maybe_attach_to_preceding(
opbranch: Tuple[sd.Command, ...],
parent_candidates: List[sn.Name],
allowed_op_types: List[Type[sd.ObjectCommand[so.Object]]],
as_implicit: bool = False,
slice_start: int = 1,
) -> bool:
"""Find a parent and attach a given portion of command branch to it.
Args:
opbranch:
Command branch to consider.
parent_candidates:
A list of parent object names to consider when looking for
a parent command.
allowed_op_types:
A list of command types to consider when looking for a
parent command.
as_implicit:
If True, the command branch is considered to be implicit,
i.e. it is not recorded in the command index.
slice_start:
Offset into *opbranch* that determines which commands
get attached.
"""
for candidate in parent_candidates:
for op_type in allowed_op_types:
parent_op = opindex.get((op_type, candidate, False))
# implicit ops are allowed to attach to other implicit
# ops. (Since we want them to chain properly in
# inheritance order.)
if parent_op is None and as_implicit:
parent_op = opindex.get((op_type, candidate, True))
if (
parent_op is not None
and ok_to_attach_to(
op,
parent_op,
only_if_confident=not as_implicit,
)
):
attach(
opbranch,
parent_op,
as_implicit=as_implicit,
slice_start=slice_start,
)
return True
return False
# First, build parents and dependencies maps.
for info in sortedlist:
opbranch = info.item
op = opbranch[-1]
for j, pop in enumerate(opbranch[1:]):
parents[pop] = opbranch[j]
for dep in info.deps:
dep_item = depgraph[dep]
dep_stack = dep_item.item
dep_op = dep_stack[-1]
dependencies[op].add(dep_op)
for info in sortedlist:
opbranch = info.item
op = opbranch[-1]
# Elide empty ALTER statements from output.
if isinstance(op, sd.AlterObject) and not op.get_subcommands():
continue
# If applicable, replace a preceding command with this op.
if maybe_replace_preceding(op):
continue
if (
isinstance(op, sd.ObjectCommand)
and not isinstance(op, sd.CreateObject)
and info.extra is not None
and info.extra.implicit_ancestors
):
# This command is deemed to be an implicit effect of another
# command, such as when alteration is propagated through the
# inheritance chain. If so, find a command that operates on
# a parent object and attach this branch to it.
allowed_ops = [type(op)]
if isinstance(op, sd.DeleteObject):
allowed_ops.append(op.get_other_command_class(sd.DeleteObject))
if maybe_attach_to_preceding(
opbranch,
info.extra.implicit_ancestors,
allowed_ops,
as_implicit=True,
):
continue
# Walking the branch toward root, see if there's a matching
# branch prefix we could attach to.
for depth, ancestor_op in enumerate(reversed(opbranch[1:-1])):
assert isinstance(ancestor_op, sd.ObjectCommand)
allowed_ops = []
create_cmd_t = ancestor_op.get_other_command_class(sd.CreateObject)
if type(ancestor_op) != create_cmd_t:
allowed_ops.append(create_cmd_t)
allowed_ops.append(type(ancestor_op))
if maybe_attach_to_preceding(
opbranch,
[ancestor_op.classname],
allowed_ops,
slice_start=len(opbranch) - (depth + 1),
):
break
else:
# No branches to attach to, so attach to root.
attach(opbranch, result)
return result
def _command_key(cmd: sd.Command) -> Any:
if isinstance(cmd, sd.ObjectCommand):
return (cmd.get_schema_metaclass().__name__, cmd.classname)
elif isinstance(cmd, sd.AlterObjectProperty):
return ('.field', cmd.property)
else:
return ('_generic', type(cmd).__name__)
def _get_sorted_subcommands(cmd: sd.Command) -> List[sd.Command]:
subcommands = list(cmd.get_subcommands())
subcommands.sort(key=_command_key)
return subcommands
def _break_down(
opmap: Dict[sd.Command, List[sd.Command]],
strongrefs: Dict[sn.Name, sn.Name],
opbranch: List[sd.Command],
) -> None:
if len(opbranch) > 2:
new_opbranch = _extract_op(opbranch)
else:
new_opbranch = opbranch
op = new_opbranch[-1]
breakable_commands = (
referencing.ReferencedObjectCommand,
sd.RenameObject,
inheriting.RebaseInheritingObject,
)
for sub_op in _get_sorted_subcommands(op):
if (
isinstance(sub_op, sd.AlterObjectProperty)
and not isinstance(op, sd.DeleteObject)
):
assert isinstance(op, sd.ObjectCommand)
mcls = op.get_schema_metaclass()
field = mcls.get_field(sub_op.property)
# Break a possible reference cycle
# (i.e. Type.rptr <-> Pointer.target)
if (
field.weak_ref
or (
isinstance(op, sd.AlterObject)
and issubclass(field.type, so.Object)
)
):
_break_down(opmap, strongrefs, new_opbranch + [sub_op])
elif (
isinstance(sub_op, sd.AlterSpecialObjectField)
and not isinstance(
sub_op,
(
referencing.AlterOwned,
s_pointers.SetPointerType,
)
)
):
pass
elif (
isinstance(sub_op, referencing.ReferencedObjectCommandBase)
and sub_op.is_strong_ref
):
assert isinstance(op, sd.ObjectCommand)
strongrefs[sub_op.classname] = op.classname
elif isinstance(sub_op, breakable_commands):
_break_down(opmap, strongrefs, new_opbranch + [sub_op])
# For SET TYPE and friends, we need to make sure that an alter
# (with children) makes it into the opmap so it is processed.
if (
isinstance(op, sd.AlterSpecialObjectField)
and not isinstance(op, referencing.AlterOwned)
):
opmap[new_opbranch[-2]] = new_opbranch[:-1]
opmap[op] = new_opbranch
def _trace_op(
op: sd.Command,
opbranch: List[sd.Command],
depgraph: DepGraph,
renames: Dict[sn.Name, sn.Name],
renames_r: Dict[sn.Name, sn.Name],
strongrefs: Dict[sn.Name, sn.Name],
old_schema: Optional[s_schema.Schema],
new_schema: s_schema.Schema,
) -> None:
def get_deps(key: DepGraphKey) -> DepGraphEntry:
try:
item = depgraph[key]
except KeyError:
item = depgraph[key] = DepGraphEntry(
item=(),
deps=ordered.OrderedSet(),
weak_deps=ordered.OrderedSet(),
)
return item
def record_field_deps(
op: sd.AlterObjectProperty,
parent_op: sd.ObjectCommand[so.Object],
) -> str:
if isinstance(op.new_value, (so.Object, so.ObjectShell)):
obj = op.new_value
nvn = obj.get_name(new_schema)
if nvn is not None:
deps.add(('create', str(nvn)))
deps.add(('alter', str(nvn)))
if nvn in renames_r:
deps.add(('rename', str(renames_r[nvn])))
if isinstance(obj, so.ObjectShell):
obj = obj.resolve(new_schema)
# For SET TYPE, we want to finish any rebasing into the
# target type before we change the type.
if isinstance(obj, so.InheritingObject):
for desc in obj.descendants(new_schema):
deps.add(('rebase', str(desc.get_name(new_schema))))
graph_key = f'{parent_op.classname}%%{op.property}'
deps.add(('create', str(parent_op.classname)))
deps.add(('alter', str(parent_op.classname)))
if isinstance(op.old_value, (so.Object, so.ObjectShell)):
assert old_schema is not None
ovn = op.old_value.get_name(old_schema)
nvn = op.new_value.get_name(new_schema)
if ovn != nvn:
ov_item = get_deps(('delete', str(ovn)))
ov_item.deps.add((tag, graph_key))
return graph_key
def write_dep_matrix(
dependent: str,
dependent_tags: Tuple[str, ...],
dependency: str,
dependency_tags: Tuple[str, ...],
*,
as_weak: bool = False,
) -> None:
for dependent_tag in dependent_tags:
item = get_deps((dependent_tag, dependent))
for dependency_tag in dependency_tags:
if as_weak:
item.weak_deps.add((dependency_tag, dependency))
else:
item.deps.add((dependency_tag, dependency))
def write_ref_deps(
ref: so.Object,
obj: so.Object,
this_name_str: str,
) -> None:
ref_name = ref.get_name(new_schema)
if ref_name in renames_r:
ref_name = renames_r[ref_name]
ref_name_str = str(ref_name)
if ((isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(new_schema) == obj)
or (isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(new_schema) == ref)):
# Mostly ignore refs generated by refdict backref, but
# make create/alter depend on renames of the backref.
# This makes sure that a rename is done before the innards are
# modified. DDL doesn't actually require this but some of the
# internals for producing the DDL do (since otherwise we can
# generate references to the renamed type in our delta before
# it is renamed).
if tag in ('create', 'alter'):
deps.add(('rename', ref_name_str))
return
write_dep_matrix(
dependent=ref_name_str,
dependent_tags=('create', 'alter', 'rebase'),
dependency=this_name_str,
dependency_tags=('create', 'alter', 'rename'),
)
item = get_deps(('rename', ref_name_str))
item.deps.add(('create', this_name_str))
item.deps.add(('alter', this_name_str))
item.deps.add(('rename', this_name_str))
if isinstance(ref, s_pointers.Pointer):
# The current item is a type referred to by
# a link or property in another type. Set the referring
# type and its descendants as weak dependents of the current
# item to reduce the number of unnecessary ALTERs in the
# final delta, especially ones that might result in SET TYPE
# commands being generated.
ref_src = ref.get_source(new_schema)
if isinstance(ref_src, s_pointers.Pointer):
ref_src_src = ref_src.get_source(new_schema)
if ref_src_src is not None:
ref_src = ref_src_src
if ref_src is not None:
for desc in ref_src.descendants(new_schema) | {ref_src}:
desc_name = str(desc.get_name(new_schema))
write_dep_matrix(
dependent=desc_name,
dependent_tags=('create', 'alter'),
dependency=this_name_str,
dependency_tags=('create', 'alter', 'rename'),
as_weak=True,
)
deps: ordered.OrderedSet[Tuple[str, str]] = ordered.OrderedSet()
graph_key: str
implicit_ancestors: List[sn.Name] = []
if isinstance(op, sd.CreateObject):
tag = 'create'
elif isinstance(op, sd.AlterObject):
tag = 'alter'
elif isinstance(op, sd.RenameObject):
tag = 'rename'
elif isinstance(op, inheriting.RebaseInheritingObject):
tag = 'rebase'
elif isinstance(op, sd.DeleteObject):
tag = 'delete'
elif isinstance(op, referencing.AlterOwned):
if op.get_attribute_value('owned'):
tag = 'setowned'
else:
tag = 'dropowned'
elif isinstance(op, (sd.AlterObjectProperty, sd.AlterSpecialObjectField)):
tag = 'field'
else:
raise RuntimeError(
f'unexpected delta command type at top level: {op!r}'
)
if isinstance(op, (sd.DeleteObject, referencing.AlterOwned)):
assert old_schema is not None
try:
obj = get_object(old_schema, op)
except errors.InvalidReferenceError:
if isinstance(op, sd.DeleteObject) and op.if_exists:
# If this is conditional deletion and the object isn't there,
# then don't bother with analysis, since this command wouldn't
# get executed.
return
else:
raise
refs = _get_referrers(old_schema, obj, strongrefs)
for ref in refs:
ref_name_str = str(ref.get_name(old_schema))
if (
(
isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(old_schema) == ref
)
):
# If the referrer is enclosing the object
# (i.e. the reference is a refdict reference),
# we sort the enclosed operation first.
ref_item = get_deps(('delete', ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedInheritingObject)
and (
op.classname
in {
b.get_name(old_schema)
for b in ref.get_implicit_ancestors(old_schema)
}
)
and (
not isinstance(ref, s_pointers.Pointer)
or not ref.get_from_alias(old_schema)
)
):
# If the ref is an implicit descendant (i.e. an inherited ref),
# we also sort it _after_ the parent, because we'll pull
# it as a child of the parent op at the time of tree
# reassembly.
ref_item = get_deps(('delete', ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(old_schema) == obj
):
# Skip refdict.backref_attr to avoid dependency cycles.
continue
else:
# Otherwise, things must be deleted _after_ their referrers
# have been deleted or altered.
deps.add(('delete', ref_name_str))
# (except for aliases, which in the collection case
# specifically need the old target deleted before the
# new one is created)
if not isinstance(ref, s_expraliases.Alias):
deps.add(('alter', ref_name_str))
if type(ref) == type(obj):
deps.add(('rebase', ref_name_str))
# The deletion of any implicit ancestors needs to come after
# the deletion of any referrers also.
if isinstance(obj, referencing.ReferencedInheritingObject):
for ancestor in obj.get_implicit_ancestors(old_schema):
ancestor_name = ancestor.get_name(old_schema)
anc_item = get_deps(('delete', str(ancestor_name)))
anc_item.deps.add(('delete', ref_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(old_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name: sn.Name = referrer.get_name(old_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
# A drop needs to come *before* drop owned on the referrer
# which will do it itself.
if tag == 'delete':
ref_item = get_deps(('dropowned', str(referrer_name)))
ref_item.deps.add(('delete', str(op.classname)))
# For SET OWNED, we need any rebase of the enclosing
# object to come *after*, because otherwise obj could
# get dropped before the SET OWNED takes effect.
# DROP, also.
if tag in ('setowned', 'delete'):
ref_item = get_deps(('rebase', str(referrer_name)))
ref_item.deps.add((tag, str(op.classname)))
else:
deps.add(('rebase', str(referrer_name)))
if (
isinstance(obj, referencing.ReferencedInheritingObject)
and (
not isinstance(obj, s_pointers.Pointer)
or not obj.get_from_alias(old_schema)
)
):
for ancestor in obj.get_implicit_ancestors(old_schema):
ancestor_name = ancestor.get_name(old_schema)
implicit_ancestors.append(ancestor_name)
if isinstance(op, referencing.AlterOwned):
anc_item = get_deps(('delete', str(ancestor_name)))
anc_item.deps.add((tag, str(op.classname)))
if tag == 'setowned':
# SET OWNED must come before ancestor rebases too
anc_item = get_deps(('rebase', str(ancestor_name)))
anc_item.deps.add(('setowned', str(op.classname)))
if tag == 'dropowned':
deps.add(('alter', str(op.classname)))
graph_key = str(op.classname)
elif isinstance(op, sd.AlterObjectProperty):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
graph_key = record_field_deps(op, parent_op)
elif isinstance(op, sd.AlterSpecialObjectField):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
field_op = op._get_attribute_set_cmd(op._field)
assert field_op is not None
graph_key = record_field_deps(field_op, parent_op)
elif isinstance(op, sd.ObjectCommand):
# If the object was renamed, use the new name, else use regular.
name = renames.get(op.classname, op.classname)
obj = get_object(new_schema, op, name)
this_name_str = str(op.classname)
if tag == 'rename':
# On renames, we want to delete any references before we
# do the rename. This is because for functions and
# constraints we implicitly rename the object when
# something it references is renamed, and this implicit
# rename can interfere with a CREATE/DELETE pair. So we
# make sure to put the DELETE before the RENAME of a
# referenced object. (An improvement would be to elide a
# CREATE/DELETE pair when it could be implicitly handled
# by a rename).
assert old_schema
old_obj = get_object(old_schema, op, op.classname)
for ref in _get_referrers(old_schema, old_obj, strongrefs):
deps.add(('delete', str(ref.get_name(old_schema))))
refs = _get_referrers(new_schema, obj, strongrefs)
for ref in refs:
write_ref_deps(ref, obj, this_name_str)
if tag in ('create', 'alter'):
# In a delete/create cycle, deletion must obviously
# happen first.
deps.add(('delete', str(op.classname)))
# Renaming also
deps.add(('rename', str(op.classname)))
if isinstance(obj, s_func.Function) and old_schema is not None:
old_funcs = old_schema.get_functions(
sn.shortname_from_fullname(op.classname),
default=(),
)
for old_func in old_funcs:
deps.add(('delete', str(old_func.get_name(old_schema))))
if tag == 'alter':
# Alteration must happen after creation, if any.
deps.add(('create', this_name_str))
deps.add(('rename', this_name_str))
deps.add(('rebase', this_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(new_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name = referrer.get_name(new_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
ref_name_str = str(referrer_name)
deps.add(('create', ref_name_str))
if op.ast_ignore_ownership() or tag == 'rename':
ref_item = get_deps(('rebase', ref_name_str))
ref_item.deps.add((tag, this_name_str))
else:
deps.add(('rebase', ref_name_str))
# Addition and removal of constraints can cause
# changes to the cardinality of expressions that refer
# to them. Add the appropriate dependencies in.
if (
isinstance(obj, s_constraints.Constraint)
and isinstance(referrer, s_pointers.Pointer)
):
refs = _get_referrers(new_schema, referrer, strongrefs)
for ref in refs:
write_ref_deps(ref, referrer, this_name_str)
if (
isinstance(obj, referencing.ReferencedInheritingObject)
# Changes to owned objects can't necessarily be merged
# in with parents, so we make sure not to.
and not obj.get_owned(new_schema)
):
implicit_ancestors = [
b.get_name(new_schema)
for b in obj.get_implicit_ancestors(new_schema)
]
if not isinstance(op, sd.CreateObject):
assert old_schema is not None
name = renames_r.get(op.classname, op.classname)
old_obj = get_object(old_schema, op, name)
assert isinstance(
old_obj,
referencing.ReferencedInheritingObject,
)
implicit_ancestors += [
b.get_name(old_schema)
for b in old_obj.get_implicit_ancestors(old_schema)
]
graph_key = this_name_str
else:
raise AssertionError(f'unexpected op type: {op!r}')
item = get_deps((tag, graph_key))
item.item = tuple(opbranch)
item.deps |= deps
item.extra = DepGraphEntryExtra(
implicit_ancestors=[renames_r.get(a, a) for a in implicit_ancestors],
)
def get_object(
schema: s_schema.Schema,
op: sd.ObjectCommand[so.Object],
name: Optional[sn.Name] = None,
) -> so.Object:
metaclass = op.get_schema_metaclass()
if name is None:
name = op.classname
if issubclass(metaclass, s_types.Collection):
if isinstance(name, sn.QualName):
return schema.get(name)
else:
return schema.get_global(metaclass, name)
elif not issubclass(metaclass, so.QualifiedObject):
obj = schema.get_global(metaclass, name)
assert isinstance(obj, so.Object)
return obj
else:
return schema.get(name)
def _get_referrers(
schema: s_schema.Schema,
obj: so.Object,
strongrefs: Dict[sn.Name, sn.Name],
) -> List[so.Object]:
refs = schema.get_referrers(obj)
result: Set[so.Object] = set()
for ref in refs:
if not ref.is_blocking_ref(schema, obj):
continue
parent_ref = strongrefs.get(ref.get_name(schema))
if parent_ref is not None:
referrer: so.Object = schema.get(parent_ref)
else:
referrer = ref
result.add(referrer)
return list(sorted(
result,
key=lambda o: (type(o).__name__, o.get_name(schema)),
))
def _extract_op(stack: Sequence[sd.Command]) -> List[sd.Command]:
parent_op = stack[0]
new_stack = [parent_op]
for stack_op in stack[1:-1]:
assert isinstance(stack_op, sd.ObjectCommand)
alter_class = stack_op.get_other_command_class(sd.AlterObject)
alter_delta = alter_class(
classname=stack_op.classname,
ddl_identity=stack_op.ddl_identity,
aux_object_data=stack_op.aux_object_data,
annotations=stack_op.annotations,
orig_cmd_type=type(stack_op),
)
parent_op.add(alter_delta)
parent_op = alter_delta
new_stack.append(parent_op)
stack[-2].discard(stack[-1])
parent_op.add(stack[-1])
new_stack.append(stack[-1])
return new_stack
| |
# -*- coding: utf-8 -*-
""" Surrogate Analysis
"""
# Author: Avraam Marimpis <avraam.marimpis@gmail.com>
from typing import Optional, Tuple, Callable
import numpy as np
import numbers
def surrogate_analysis(
ts1: "np.ndarray[np.float32]",
ts2: "np.ndarray[np.float32]",
num_surr: Optional[int] = 1000,
estimator_func: Optional[
Callable[["np.ndarray[np.float32]", "np.ndarray[np.float32]"], float]
] = None,
ts1_no_surr: bool = False,
rng: Optional[np.random.RandomState] = None,
) -> Tuple[float, "np.ndarray[np.int32]", "np.ndarray[np.float32]", float]:
""" Surrogate Analysis
Parameters
----------
ts1 :
ts2 :
num_surr : int
estimator_func : function
ts1_no_surr : boolean
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
p_val : float
corr_surr :
surrogates :
r_value : float
"""
if rng is None:
rng = np.random.RandomState(0)
if estimator_func is None:
def estimator(x, y):
return np.abs(np.corrcoef(x, y))[0, 1]
estimator_func = estimator
r_value = estimator_func(ts1, ts2)
if isinstance(r_value, numbers.Real):
r_value = [r_value]
num_samples = len(ts1)
num_r_vals = len(r_value)
surrogates = np.zeros([2, num_surr, num_samples])
if ts1_no_surr is True:
surrogates[0, ...] = np.tile(ts1, [num_surr, 1])
else:
surrogates[0, ...] = aaft(ts1, num_surr, rng)
surrogates[1, ...] = aaft(ts2, num_surr, rng)
surr_vals = np.zeros((num_surr, len(r_value)))
for i in range(num_surr):
surr_vals[i, :] = estimator_func(surrogates[0, i, ...], surrogates[1, i, ...])
surr_vals = np.array(surr_vals)
p_vals = np.zeros((num_r_vals))
for i in range(num_r_vals):
r = np.where(surr_vals[:, i] > r_value[i])[0]
p_val = 0.0
if len(r) == 0:
p_val = 1.0 / float(num_surr)
else:
p_val = len(r) / float(num_surr)
p_vals[i] = p_val
p_vals = p_vals.squeeze()
surr_vals = surr_vals.squeeze()
return p_vals, surr_vals, surrogates, r_value
def aaft(
ts: "np.ndarray[np.float32]",
num_surr: Optional[int] = 1,
rng: Optional[np.random.RandomState] = None,
) -> "np.ndarray[np.float32]":
""" Amplitude Adjusted Fourier Transform
Parameters
----------
ts :
num_surr :
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
"""
if rng is None:
rng = np.random.RandomState()
n_samples = len(ts)
s = np.zeros((num_surr, n_samples))
for i in range(num_surr):
y = ts
normal = np.sort(rng.randn(1, n_samples)).ravel()
y, T = np.sort(ts), np.argsort(ts)
T, r = np.sort(T), np.argsort(T)
normal = phase_rand(normal[r], 1, rng).ravel()
normal, T = np.sort(normal), np.argsort(normal)
T, r = np.sort(T), np.argsort(T)
s[i, :] = y[r]
return s
def fdr(
p_values: "np.ndarray[np.float32]",
q: Optional[float] = 0.01,
method: Optional[str] = "pdep",
) -> Tuple[bool, float]:
""" False Discovery Rate
Parameters
----------
p_values :
q :
method :
Returns
-------
"""
crit_p = 0.0
h = False
sorted_p_values = np.sort(p_values)
m = len(sorted_p_values)
thresh = np.arange(1, m + 1) * (q / m)
rej = sorted_p_values <= thresh
max_id = np.where(rej == True)[0]
if max_id.size == 0:
crit_p = 0.0
h = p_values * 0
else:
max_id = np.max(max_id)
crit_p = sorted_p_values[max_id]
crit_p = crit_p.squeeze()
h = p_values <= crit_p
h = h.squeeze()
h = h.astype(np.bool)
return h, crit_p
def phase_rand(
data, num_surr: Optional[int] = 1, rng: Optional[np.random.RandomState] = None
) -> "np.ndarray[np.float32]":
""" Phase-randomized suggorates
Parameters
----------
data :
num_surr :
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
"""
if rng is None:
rng = np.random.RandomState()
n_samples = np.shape(data)[0]
surrogates = np.zeros((num_surr, n_samples))
half = np.int32(np.floor(n_samples / 2.0))
surrogates = np.zeros((num_surr, n_samples))
y = np.fft.fft(data)
m = np.abs(y)
p = np.angle(y)
for i in range(num_surr):
if n_samples % 2 == 0:
p1 = rng.randn(half - 1, 1) * 2.0 * np.pi
a = p1.T.ravel()
b = p[half]
c = np.flipud(p1).T.ravel()
p[list(range(1, n_samples))] = np.hstack((a, b, -c))
a = m[list(range(0, half + 1))]
b = np.flipud(m[list(range(1, half))])
m = np.hstack((a, b))
else:
p1 = rng.randn(half, 1) * 2.0 * np.pi
a = p1
b = np.flipud(p1).ravel()
p[list(range(1, n_samples))] = a - b
surrogates[i, :] = np.real(np.fft.ifft(np.exp(1j * p) * m))
return surrogates
| |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype,
UInt32Dtype, UInt64Dtype)
from pandas.tests.extension import base
def make_data():
return (list(range(1, 9)) + [np.nan] + list(range(10, 98))
+ [np.nan] + [99, 100])
@pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype,
UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype])
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return integer_array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return integer_array([1, np.nan, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are np.nan
return lambda x, y: np.isnan(x) and np.isnan(y)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = np.nan
return integer_array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
@pytest.mark.skip(reason="using multiple dtypes")
def test_is_dtype_unboxes_dtype(self):
# we have multiple dtypes, so skip
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
if s.dtype.is_unsigned_integer and (op_name == '__rsub__'):
# TODO see https://github.com/pandas-dev/pandas/issues/22023
pytest.skip("unsigned subtraction gives negative values")
if (hasattr(other, 'dtype')
and not is_extension_array_dtype(other.dtype)
and pd.api.types.is_integer_dtype(other.dtype)):
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(s.dtype.numpy_dtype)
result = op(s, other)
expected = s.combine(other, op)
if op_name in ('__rtruediv__', '__truediv__', '__div__'):
expected = expected.astype(float)
if op_name == '__rtruediv__':
# TODO reverse operators result in object dtype
result = result.astype(float)
elif op_name.startswith('__r'):
# TODO reverse operators result in object dtype
# see https://github.com/pandas-dev/pandas/issues/22024
expected = expected.astype(s.dtype)
result = result.astype(s.dtype)
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(s.dtype)
pass
if (op_name == '__rpow__') and isinstance(other, pd.Series):
# TODO pow on Int arrays gives different result with NA
# see https://github.com/pandas-dev/pandas/issues/22022
result = result.fillna(1)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
@pytest.mark.skip(reason="intNA does not error on ops")
def test_error(self, data, all_arithmetic_operators):
# other specific errors tested in the integer array specific tests
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
# for test_concat_mixed_dtypes test
# concat of an Integer and Int coerces to object dtype
# TODO(jreback) once integrated this would
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.parametrize('dropna', [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(
dropna=dropna).sort_index()
expected.index = expected.index.astype(all_data.dtype)
self.assert_series_equal(result, expected)
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
pass
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
| |
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.internal.well_known_types."""
__author__ = 'jieluo@google.com (Jie Luo)'
from datetime import datetime
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import any_pb2
from google.protobuf import duration_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import any_test_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import well_known_types
from google.protobuf import descriptor
from google.protobuf import text_format
class TimeUtilTestBase(unittest.TestCase):
def CheckTimestampConversion(self, message, text):
self.assertEqual(text, message.ToJsonString())
parsed_message = timestamp_pb2.Timestamp()
parsed_message.FromJsonString(text)
self.assertEqual(message, parsed_message)
def CheckDurationConversion(self, message, text):
self.assertEqual(text, message.ToJsonString())
parsed_message = duration_pb2.Duration()
parsed_message.FromJsonString(text)
self.assertEqual(message, parsed_message)
class TimeUtilTest(TimeUtilTestBase):
def testTimestampSerializeAndParse(self):
message = timestamp_pb2.Timestamp()
# Generated output should contain 3, 6, or 9 fractional digits.
message.seconds = 0
message.nanos = 0
self.CheckTimestampConversion(message, '1970-01-01T00:00:00Z')
message.nanos = 10000000
self.CheckTimestampConversion(message, '1970-01-01T00:00:00.010Z')
message.nanos = 10000
self.CheckTimestampConversion(message, '1970-01-01T00:00:00.000010Z')
message.nanos = 10
self.CheckTimestampConversion(message, '1970-01-01T00:00:00.000000010Z')
# Test min timestamps.
message.seconds = -62135596800
message.nanos = 0
self.CheckTimestampConversion(message, '0001-01-01T00:00:00Z')
# Test max timestamps.
message.seconds = 253402300799
message.nanos = 999999999
self.CheckTimestampConversion(message, '9999-12-31T23:59:59.999999999Z')
# Test negative timestamps.
message.seconds = -1
self.CheckTimestampConversion(message, '1969-12-31T23:59:59.999999999Z')
# Parsing accepts an fractional digits as long as they fit into nano
# precision.
message.FromJsonString('1970-01-01T00:00:00.1Z')
self.assertEqual(0, message.seconds)
self.assertEqual(100000000, message.nanos)
# Parsing accpets offsets.
message.FromJsonString('1970-01-01T00:00:00-08:00')
self.assertEqual(8 * 3600, message.seconds)
self.assertEqual(0, message.nanos)
def testDurationSerializeAndParse(self):
message = duration_pb2.Duration()
# Generated output should contain 3, 6, or 9 fractional digits.
message.seconds = 0
message.nanos = 0
self.CheckDurationConversion(message, '0s')
message.nanos = 10000000
self.CheckDurationConversion(message, '0.010s')
message.nanos = 10000
self.CheckDurationConversion(message, '0.000010s')
message.nanos = 10
self.CheckDurationConversion(message, '0.000000010s')
# Test min and max
message.seconds = 315576000000
message.nanos = 999999999
self.CheckDurationConversion(message, '315576000000.999999999s')
message.seconds = -315576000000
message.nanos = -999999999
self.CheckDurationConversion(message, '-315576000000.999999999s')
# Parsing accepts an fractional digits as long as they fit into nano
# precision.
message.FromJsonString('0.1s')
self.assertEqual(100000000, message.nanos)
message.FromJsonString('0.0000001s')
self.assertEqual(100, message.nanos)
def testTimestampIntegerConversion(self):
message = timestamp_pb2.Timestamp()
message.FromNanoseconds(1)
self.assertEqual('1970-01-01T00:00:00.000000001Z',
message.ToJsonString())
self.assertEqual(1, message.ToNanoseconds())
message.FromNanoseconds(-1)
self.assertEqual('1969-12-31T23:59:59.999999999Z',
message.ToJsonString())
self.assertEqual(-1, message.ToNanoseconds())
message.FromMicroseconds(1)
self.assertEqual('1970-01-01T00:00:00.000001Z',
message.ToJsonString())
self.assertEqual(1, message.ToMicroseconds())
message.FromMicroseconds(-1)
self.assertEqual('1969-12-31T23:59:59.999999Z',
message.ToJsonString())
self.assertEqual(-1, message.ToMicroseconds())
message.FromMilliseconds(1)
self.assertEqual('1970-01-01T00:00:00.001Z',
message.ToJsonString())
self.assertEqual(1, message.ToMilliseconds())
message.FromMilliseconds(-1)
self.assertEqual('1969-12-31T23:59:59.999Z',
message.ToJsonString())
self.assertEqual(-1, message.ToMilliseconds())
message.FromSeconds(1)
self.assertEqual('1970-01-01T00:00:01Z',
message.ToJsonString())
self.assertEqual(1, message.ToSeconds())
message.FromSeconds(-1)
self.assertEqual('1969-12-31T23:59:59Z',
message.ToJsonString())
self.assertEqual(-1, message.ToSeconds())
message.FromNanoseconds(1999)
self.assertEqual(1, message.ToMicroseconds())
# For negative values, Timestamp will be rounded down.
# For example, "1969-12-31T23:59:59.5Z" (i.e., -0.5s) rounded to seconds
# will be "1969-12-31T23:59:59Z" (i.e., -1s) rather than
# "1970-01-01T00:00:00Z" (i.e., 0s).
message.FromNanoseconds(-1999)
self.assertEqual(-2, message.ToMicroseconds())
def testDurationIntegerConversion(self):
message = duration_pb2.Duration()
message.FromNanoseconds(1)
self.assertEqual('0.000000001s',
message.ToJsonString())
self.assertEqual(1, message.ToNanoseconds())
message.FromNanoseconds(-1)
self.assertEqual('-0.000000001s',
message.ToJsonString())
self.assertEqual(-1, message.ToNanoseconds())
message.FromMicroseconds(1)
self.assertEqual('0.000001s',
message.ToJsonString())
self.assertEqual(1, message.ToMicroseconds())
message.FromMicroseconds(-1)
self.assertEqual('-0.000001s',
message.ToJsonString())
self.assertEqual(-1, message.ToMicroseconds())
message.FromMilliseconds(1)
self.assertEqual('0.001s',
message.ToJsonString())
self.assertEqual(1, message.ToMilliseconds())
message.FromMilliseconds(-1)
self.assertEqual('-0.001s',
message.ToJsonString())
self.assertEqual(-1, message.ToMilliseconds())
message.FromSeconds(1)
self.assertEqual('1s', message.ToJsonString())
self.assertEqual(1, message.ToSeconds())
message.FromSeconds(-1)
self.assertEqual('-1s',
message.ToJsonString())
self.assertEqual(-1, message.ToSeconds())
# Test truncation behavior.
message.FromNanoseconds(1999)
self.assertEqual(1, message.ToMicroseconds())
# For negative values, Duration will be rounded towards 0.
message.FromNanoseconds(-1999)
self.assertEqual(-1, message.ToMicroseconds())
def testDatetimeConverison(self):
message = timestamp_pb2.Timestamp()
dt = datetime(1970, 1, 1)
message.FromDatetime(dt)
self.assertEqual(dt, message.ToDatetime())
message.FromMilliseconds(1999)
self.assertEqual(datetime(1970, 1, 1, 0, 0, 1, 999000),
message.ToDatetime())
def testTimedeltaConversion(self):
message = duration_pb2.Duration()
message.FromNanoseconds(1999999999)
td = message.ToTimedelta()
self.assertEqual(1, td.seconds)
self.assertEqual(999999, td.microseconds)
message.FromNanoseconds(-1999999999)
td = message.ToTimedelta()
self.assertEqual(-1, td.days)
self.assertEqual(86398, td.seconds)
self.assertEqual(1, td.microseconds)
message.FromMicroseconds(-1)
td = message.ToTimedelta()
self.assertEqual(-1, td.days)
self.assertEqual(86399, td.seconds)
self.assertEqual(999999, td.microseconds)
converted_message = duration_pb2.Duration()
converted_message.FromTimedelta(td)
self.assertEqual(message, converted_message)
def testInvalidTimestamp(self):
message = timestamp_pb2.Timestamp()
self.assertRaisesRegex(
ValueError,
'time data \'10000-01-01T00:00:00\' does not match'
' format \'%Y-%m-%dT%H:%M:%S\'',
message.FromJsonString, '10000-01-01T00:00:00.00Z')
self.assertRaisesRegex(
well_known_types.ParseError,
'nanos 0123456789012 more than 9 fractional digits.',
message.FromJsonString,
'1970-01-01T00:00:00.0123456789012Z')
self.assertRaisesRegex(
well_known_types.ParseError,
(r'Invalid timezone offset value: \+08.'),
message.FromJsonString,
'1972-01-01T01:00:00.01+08',)
self.assertRaisesRegex(
ValueError,
'year is out of range',
message.FromJsonString,
'0000-01-01T00:00:00Z')
message.seconds = 253402300800
self.assertRaisesRegex(
OverflowError,
'date value out of range',
message.ToJsonString)
def testInvalidDuration(self):
message = duration_pb2.Duration()
self.assertRaisesRegex(
well_known_types.ParseError,
'Duration must end with letter "s": 1.',
message.FromJsonString, '1')
self.assertRaisesRegex(
well_known_types.ParseError,
'Couldn\'t parse duration: 1...2s.',
message.FromJsonString, '1...2s')
text = '-315576000001.000000000s'
self.assertRaisesRegex(
well_known_types.Error,
r'Duration is not valid\: Seconds -315576000001 must be in range'
r' \[-315576000000\, 315576000000\].',
message.FromJsonString, text)
text = '315576000001.000000000s'
self.assertRaisesRegex(
well_known_types.Error,
r'Duration is not valid\: Seconds 315576000001 must be in range'
r' \[-315576000000\, 315576000000\].',
message.FromJsonString, text)
message.seconds = -315576000001
message.nanos = 0
self.assertRaisesRegex(
well_known_types.Error,
r'Duration is not valid\: Seconds -315576000001 must be in range'
r' \[-315576000000\, 315576000000\].',
message.ToJsonString)
class FieldMaskTest(unittest.TestCase):
def testStringFormat(self):
mask = field_mask_pb2.FieldMask()
self.assertEqual('', mask.ToJsonString())
mask.paths.append('foo')
self.assertEqual('foo', mask.ToJsonString())
mask.paths.append('bar')
self.assertEqual('foo,bar', mask.ToJsonString())
mask.FromJsonString('')
self.assertEqual('', mask.ToJsonString())
mask.FromJsonString('foo')
self.assertEqual(['foo'], mask.paths)
mask.FromJsonString('foo,bar')
self.assertEqual(['foo', 'bar'], mask.paths)
# Test camel case
mask.Clear()
mask.paths.append('foo_bar')
self.assertEqual('fooBar', mask.ToJsonString())
mask.paths.append('bar_quz')
self.assertEqual('fooBar,barQuz', mask.ToJsonString())
mask.FromJsonString('')
self.assertEqual('', mask.ToJsonString())
mask.FromJsonString('fooBar')
self.assertEqual(['foo_bar'], mask.paths)
mask.FromJsonString('fooBar,barQuz')
self.assertEqual(['foo_bar', 'bar_quz'], mask.paths)
def testDescriptorToFieldMask(self):
mask = field_mask_pb2.FieldMask()
msg_descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
mask.AllFieldsFromDescriptor(msg_descriptor)
self.assertEqual(75, len(mask.paths))
self.assertTrue(mask.IsValidForDescriptor(msg_descriptor))
for field in msg_descriptor.fields:
self.assertTrue(field.name in mask.paths)
mask.paths.append('optional_nested_message.bb')
self.assertTrue(mask.IsValidForDescriptor(msg_descriptor))
mask.paths.append('repeated_nested_message.bb')
self.assertFalse(mask.IsValidForDescriptor(msg_descriptor))
def testCanonicalFrom(self):
mask = field_mask_pb2.FieldMask()
out_mask = field_mask_pb2.FieldMask()
# Paths will be sorted.
mask.FromJsonString('baz.quz,bar,foo')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('bar,baz.quz,foo', out_mask.ToJsonString())
# Duplicated paths will be removed.
mask.FromJsonString('foo,bar,foo')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('bar,foo', out_mask.ToJsonString())
# Sub-paths of other paths will be removed.
mask.FromJsonString('foo.b1,bar.b1,foo.b2,bar')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('bar,foo.b1,foo.b2', out_mask.ToJsonString())
# Test more deeply nested cases.
mask.FromJsonString(
'foo.bar.baz1,foo.bar.baz2.quz,foo.bar.baz2')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('foo.bar.baz1,foo.bar.baz2',
out_mask.ToJsonString())
mask.FromJsonString(
'foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('foo.bar.baz1,foo.bar.baz2',
out_mask.ToJsonString())
mask.FromJsonString(
'foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz,foo.bar')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('foo.bar', out_mask.ToJsonString())
mask.FromJsonString(
'foo.bar.baz1,foo.bar.baz2,foo.bar.baz2.quz,foo')
out_mask.CanonicalFormFromMask(mask)
self.assertEqual('foo', out_mask.ToJsonString())
def testUnion(self):
mask1 = field_mask_pb2.FieldMask()
mask2 = field_mask_pb2.FieldMask()
out_mask = field_mask_pb2.FieldMask()
mask1.FromJsonString('foo,baz')
mask2.FromJsonString('bar,quz')
out_mask.Union(mask1, mask2)
self.assertEqual('bar,baz,foo,quz', out_mask.ToJsonString())
# Overlap with duplicated paths.
mask1.FromJsonString('foo,baz.bb')
mask2.FromJsonString('baz.bb,quz')
out_mask.Union(mask1, mask2)
self.assertEqual('baz.bb,foo,quz', out_mask.ToJsonString())
# Overlap with paths covering some other paths.
mask1.FromJsonString('foo.bar.baz,quz')
mask2.FromJsonString('foo.bar,bar')
out_mask.Union(mask1, mask2)
self.assertEqual('bar,foo.bar,quz', out_mask.ToJsonString())
def testIntersect(self):
mask1 = field_mask_pb2.FieldMask()
mask2 = field_mask_pb2.FieldMask()
out_mask = field_mask_pb2.FieldMask()
# Test cases without overlapping.
mask1.FromJsonString('foo,baz')
mask2.FromJsonString('bar,quz')
out_mask.Intersect(mask1, mask2)
self.assertEqual('', out_mask.ToJsonString())
# Overlap with duplicated paths.
mask1.FromJsonString('foo,baz.bb')
mask2.FromJsonString('baz.bb,quz')
out_mask.Intersect(mask1, mask2)
self.assertEqual('baz.bb', out_mask.ToJsonString())
# Overlap with paths covering some other paths.
mask1.FromJsonString('foo.bar.baz,quz')
mask2.FromJsonString('foo.bar,bar')
out_mask.Intersect(mask1, mask2)
self.assertEqual('foo.bar.baz', out_mask.ToJsonString())
mask1.FromJsonString('foo.bar,bar')
mask2.FromJsonString('foo.bar.baz,quz')
out_mask.Intersect(mask1, mask2)
self.assertEqual('foo.bar.baz', out_mask.ToJsonString())
def testMergeMessage(self):
# Test merge one field.
src = unittest_pb2.TestAllTypes()
test_util.SetAllFields(src)
for field in src.DESCRIPTOR.fields:
if field.containing_oneof:
continue
field_name = field.name
dst = unittest_pb2.TestAllTypes()
# Only set one path to mask.
mask = field_mask_pb2.FieldMask()
mask.paths.append(field_name)
mask.MergeMessage(src, dst)
# The expected result message.
msg = unittest_pb2.TestAllTypes()
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
repeated_src = getattr(src, field_name)
repeated_msg = getattr(msg, field_name)
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
for item in repeated_src:
repeated_msg.add().CopyFrom(item)
else:
repeated_msg.extend(repeated_src)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
getattr(msg, field_name).CopyFrom(getattr(src, field_name))
else:
setattr(msg, field_name, getattr(src, field_name))
# Only field specified in mask is merged.
self.assertEqual(msg, dst)
# Test merge nested fields.
nested_src = unittest_pb2.NestedTestAllTypes()
nested_dst = unittest_pb2.NestedTestAllTypes()
nested_src.child.payload.optional_int32 = 1234
nested_src.child.child.payload.optional_int32 = 5678
mask = field_mask_pb2.FieldMask()
mask.FromJsonString('child.payload')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(1234, nested_dst.child.payload.optional_int32)
self.assertEqual(0, nested_dst.child.child.payload.optional_int32)
mask.FromJsonString('child.child.payload')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(1234, nested_dst.child.payload.optional_int32)
self.assertEqual(5678, nested_dst.child.child.payload.optional_int32)
nested_dst.Clear()
mask.FromJsonString('child.child.payload')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(0, nested_dst.child.payload.optional_int32)
self.assertEqual(5678, nested_dst.child.child.payload.optional_int32)
nested_dst.Clear()
mask.FromJsonString('child')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(1234, nested_dst.child.payload.optional_int32)
self.assertEqual(5678, nested_dst.child.child.payload.optional_int32)
# Test MergeOptions.
nested_dst.Clear()
nested_dst.child.payload.optional_int64 = 4321
# Message fields will be merged by default.
mask.FromJsonString('child.payload')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(1234, nested_dst.child.payload.optional_int32)
self.assertEqual(4321, nested_dst.child.payload.optional_int64)
# Change the behavior to replace message fields.
mask.FromJsonString('child.payload')
mask.MergeMessage(nested_src, nested_dst, True, False)
self.assertEqual(1234, nested_dst.child.payload.optional_int32)
self.assertEqual(0, nested_dst.child.payload.optional_int64)
# By default, fields missing in source are not cleared in destination.
nested_dst.payload.optional_int32 = 1234
self.assertTrue(nested_dst.HasField('payload'))
mask.FromJsonString('payload')
mask.MergeMessage(nested_src, nested_dst)
self.assertTrue(nested_dst.HasField('payload'))
# But they are cleared when replacing message fields.
nested_dst.Clear()
nested_dst.payload.optional_int32 = 1234
mask.FromJsonString('payload')
mask.MergeMessage(nested_src, nested_dst, True, False)
self.assertFalse(nested_dst.HasField('payload'))
nested_src.payload.repeated_int32.append(1234)
nested_dst.payload.repeated_int32.append(5678)
# Repeated fields will be appended by default.
mask.FromJsonString('payload.repeatedInt32')
mask.MergeMessage(nested_src, nested_dst)
self.assertEqual(2, len(nested_dst.payload.repeated_int32))
self.assertEqual(5678, nested_dst.payload.repeated_int32[0])
self.assertEqual(1234, nested_dst.payload.repeated_int32[1])
# Change the behavior to replace repeated fields.
mask.FromJsonString('payload.repeatedInt32')
mask.MergeMessage(nested_src, nested_dst, False, True)
self.assertEqual(1, len(nested_dst.payload.repeated_int32))
self.assertEqual(1234, nested_dst.payload.repeated_int32[0])
def testSnakeCaseToCamelCase(self):
self.assertEqual('fooBar',
well_known_types._SnakeCaseToCamelCase('foo_bar'))
self.assertEqual('FooBar',
well_known_types._SnakeCaseToCamelCase('_foo_bar'))
self.assertEqual('foo3Bar',
well_known_types._SnakeCaseToCamelCase('foo3_bar'))
# No uppercase letter is allowed.
self.assertRaisesRegex(
well_known_types.Error,
'Fail to print FieldMask to Json string: Path name Foo must '
'not contain uppercase letters.',
well_known_types._SnakeCaseToCamelCase,
'Foo')
# Any character after a "_" must be a lowercase letter.
# 1. "_" cannot be followed by another "_".
# 2. "_" cannot be followed by a digit.
# 3. "_" cannot appear as the last character.
self.assertRaisesRegex(
well_known_types.Error,
'Fail to print FieldMask to Json string: The character after a '
'"_" must be a lowercase letter in path name foo__bar.',
well_known_types._SnakeCaseToCamelCase,
'foo__bar')
self.assertRaisesRegex(
well_known_types.Error,
'Fail to print FieldMask to Json string: The character after a '
'"_" must be a lowercase letter in path name foo_3bar.',
well_known_types._SnakeCaseToCamelCase,
'foo_3bar')
self.assertRaisesRegex(
well_known_types.Error,
'Fail to print FieldMask to Json string: Trailing "_" in path '
'name foo_bar_.',
well_known_types._SnakeCaseToCamelCase,
'foo_bar_')
def testCamelCaseToSnakeCase(self):
self.assertEqual('foo_bar',
well_known_types._CamelCaseToSnakeCase('fooBar'))
self.assertEqual('_foo_bar',
well_known_types._CamelCaseToSnakeCase('FooBar'))
self.assertEqual('foo3_bar',
well_known_types._CamelCaseToSnakeCase('foo3Bar'))
self.assertRaisesRegex(
well_known_types.ParseError,
'Fail to parse FieldMask: Path name foo_bar must not contain "_"s.',
well_known_types._CamelCaseToSnakeCase,
'foo_bar')
class StructTest(unittest.TestCase):
def testStruct(self):
struct = struct_pb2.Struct()
struct_class = struct.__class__
struct['key1'] = 5
struct['key2'] = 'abc'
struct['key3'] = True
struct.get_or_create_struct('key4')['subkey'] = 11.0
struct_list = struct.get_or_create_list('key5')
struct_list.extend([6, 'seven', True, False, None])
struct_list.add_struct()['subkey2'] = 9
self.assertTrue(isinstance(struct, well_known_types.Struct))
self.assertEqual(5, struct['key1'])
self.assertEqual('abc', struct['key2'])
self.assertIs(True, struct['key3'])
self.assertEqual(11, struct['key4']['subkey'])
inner_struct = struct_class()
inner_struct['subkey2'] = 9
self.assertEqual([6, 'seven', True, False, None, inner_struct],
list(struct['key5'].items()))
serialized = struct.SerializeToString()
struct2 = struct_pb2.Struct()
struct2.ParseFromString(serialized)
self.assertEqual(struct, struct2)
self.assertTrue(isinstance(struct2, well_known_types.Struct))
self.assertEqual(5, struct2['key1'])
self.assertEqual('abc', struct2['key2'])
self.assertIs(True, struct2['key3'])
self.assertEqual(11, struct2['key4']['subkey'])
self.assertEqual([6, 'seven', True, False, None, inner_struct],
list(struct2['key5'].items()))
struct_list = struct2['key5']
self.assertEqual(6, struct_list[0])
self.assertEqual('seven', struct_list[1])
self.assertEqual(True, struct_list[2])
self.assertEqual(False, struct_list[3])
self.assertEqual(None, struct_list[4])
self.assertEqual(inner_struct, struct_list[5])
struct_list[1] = 7
self.assertEqual(7, struct_list[1])
struct_list.add_list().extend([1, 'two', True, False, None])
self.assertEqual([1, 'two', True, False, None],
list(struct_list[6].items()))
text_serialized = str(struct)
struct3 = struct_pb2.Struct()
text_format.Merge(text_serialized, struct3)
self.assertEqual(struct, struct3)
struct.get_or_create_struct('key3')['replace'] = 12
self.assertEqual(12, struct['key3']['replace'])
class AnyTest(unittest.TestCase):
def testAnyMessage(self):
# Creates and sets message.
msg = any_test_pb2.TestAny()
msg_descriptor = msg.DESCRIPTOR
all_types = unittest_pb2.TestAllTypes()
all_descriptor = all_types.DESCRIPTOR
all_types.repeated_string.append('\u00fc\ua71f')
# Packs to Any.
msg.value.Pack(all_types)
self.assertEqual(msg.value.type_url,
'type.googleapis.com/%s' % all_descriptor.full_name)
self.assertEqual(msg.value.value,
all_types.SerializeToString())
# Tests Is() method.
self.assertTrue(msg.value.Is(all_descriptor))
self.assertFalse(msg.value.Is(msg_descriptor))
# Unpacks Any.
unpacked_message = unittest_pb2.TestAllTypes()
self.assertTrue(msg.value.Unpack(unpacked_message))
self.assertEqual(all_types, unpacked_message)
# Unpacks to different type.
self.assertFalse(msg.value.Unpack(msg))
# Only Any messages have Pack method.
try:
msg.Pack(all_types)
except AttributeError:
pass
else:
raise AttributeError('%s should not have Pack method.' %
msg_descriptor.full_name)
def testMessageName(self):
# Creates and sets message.
submessage = any_test_pb2.TestAny()
submessage.int_value = 12345
msg = any_pb2.Any()
msg.Pack(submessage)
self.assertEqual(msg.TypeName(), 'google.protobuf.internal.TestAny')
def testPackWithCustomTypeUrl(self):
submessage = any_test_pb2.TestAny()
submessage.int_value = 12345
msg = any_pb2.Any()
# Pack with a custom type URL prefix.
msg.Pack(submessage, 'type.myservice.com')
self.assertEqual(msg.type_url,
'type.myservice.com/%s' % submessage.DESCRIPTOR.full_name)
# Pack with a custom type URL prefix ending with '/'.
msg.Pack(submessage, 'type.myservice.com/')
self.assertEqual(msg.type_url,
'type.myservice.com/%s' % submessage.DESCRIPTOR.full_name)
# Pack with an empty type URL prefix.
msg.Pack(submessage, '')
self.assertEqual(msg.type_url,
'/%s' % submessage.DESCRIPTOR.full_name)
# Test unpacking the type.
unpacked_message = any_test_pb2.TestAny()
self.assertTrue(msg.Unpack(unpacked_message))
self.assertEqual(submessage, unpacked_message)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2018 Open Source Foundries Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Common code used by commands which execute runners.
'''
import argparse
from os import getcwd, path
from subprocess import CalledProcessError
import textwrap
from west import cmake
from west import log
from west import util
from west.build import DEFAULT_BUILD_DIR, is_zephyr_build
from west.commands import CommandContextError
from runners import get_runner_cls, ZephyrBinaryRunner
from zephyr_ext_common import cached_runner_config
# Context-sensitive help indentation.
# Don't change this, or output from argparse won't match up.
INDENT = ' ' * 2
def add_parser_common(parser_adder, command):
parser = parser_adder.add_parser(
command.name,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=command.help,
description=command.description)
# Remember to update scripts/west-completion.bash if you add or remove
# flags
parser.add_argument('-H', '--context', action='store_true',
help='''Rebuild application and print context-sensitive
help; this may be combined with --runner to restrict
output to a given runner.''')
group = parser.add_argument_group(title='General Options')
group.add_argument('-d', '--build-dir',
help='''Build directory to obtain runner information
from. If not given, this command tries to use build/
and then the current working directory, in that
order.''')
group.add_argument('-c', '--cmake-cache',
help='''Path to CMake cache file containing runner
configuration (this is generated by the Zephyr
build system when compiling binaries);
default: {}.
If this is a relative path, it is assumed relative to
the build directory. An absolute path can also be
given instead.'''.format(cmake.DEFAULT_CACHE))
group.add_argument('-r', '--runner',
help='''If given, overrides any cached {}
runner.'''.format(command.name))
group.add_argument('--skip-rebuild', action='store_true',
help='''If given, do not rebuild the application
before running {} commands.'''.format(command.name))
group = parser.add_argument_group(
title='Configuration overrides',
description=textwrap.dedent('''\
These values usually come from the Zephyr build system itself
as stored in the CMake cache; providing these options
overrides those settings.'''))
# Important:
#
# 1. The destination variables of these options must match
# the RunnerConfig slots.
# 2. The default values for all of these must be None.
#
# This is how we detect if the user provided them or not when
# overriding values from the cached configuration.
command_verb = "flash" if command == "flash" else "debug"
group.add_argument('--board-dir',
help='Zephyr board directory')
group.add_argument('--elf-file',
help='Path to elf file to {0}'.format(command_verb))
group.add_argument('--hex-file',
help='Path to hex file to {0}'.format(command_verb))
group.add_argument('--bin-file',
help='Path to binary file to {0}'.format(command_verb))
group.add_argument('--gdb',
help='Path to GDB, if applicable')
group.add_argument('--openocd',
help='Path to OpenOCD, if applicable')
group.add_argument(
'--openocd-search',
help='Path to add to OpenOCD search path, if applicable')
return parser
def desc_common(command_name):
return textwrap.dedent('''\
Any options not recognized by this command are passed to the
back-end {command} runner (run "west {command} --context"
for help on available runner-specific options).
If you need to pass an option to a runner which has the
same name as one recognized by this command, you can
end argument parsing with a '--', like so:
west {command} --{command}-arg=value -- --runner-arg=value2
'''.format(**{'command': command_name}))
def _override_config_from_namespace(cfg, namespace):
'''Override a RunnerConfig's contents with command-line values.'''
for var in cfg.__slots__:
if var in namespace:
val = getattr(namespace, var)
if val is not None:
setattr(cfg, var, val)
def _build_dir(args, die_if_none=True):
# Get the build directory for the given argument list and environment.
if args.build_dir:
return args.build_dir
cwd = getcwd()
default = path.join(cwd, DEFAULT_BUILD_DIR)
if is_zephyr_build(default):
return default
elif is_zephyr_build(cwd):
return cwd
elif die_if_none:
log.die('--build-dir was not given, and neither {} '
'nor {} are zephyr build directories.'.
format(default, cwd))
else:
return None
def do_run_common(command, args, runner_args, cached_runner_var):
if args.context:
_dump_context(command, args, runner_args, cached_runner_var)
return
command_name = command.name
build_dir = _build_dir(args)
if not args.skip_rebuild:
try:
cmake.run_build(build_dir)
except CalledProcessError:
if args.build_dir:
log.die('cannot run {}, build in {} failed'.format(
command_name, args.build_dir))
else:
log.die('cannot run {}; no --build-dir given and build in '
'current directory {} failed'.format(command_name,
build_dir))
# Runner creation, phase 1.
#
# Get the default runner name from the cache, allowing a command
# line override. Get the ZephyrBinaryRunner class by name, and
# make sure it supports the command.
cache_file = path.join(build_dir, args.cmake_cache or cmake.DEFAULT_CACHE)
cache = cmake.CMakeCache(cache_file)
board = cache['CACHED_BOARD']
available = cache.get_list('ZEPHYR_RUNNERS')
if not available:
log.wrn('No cached runners are available in', cache_file)
runner = args.runner or cache.get(cached_runner_var)
if runner is None:
raise CommandContextError(textwrap.dedent("""
No {} runner available for {}. Please either specify one
manually, or check your board's documentation for
alternative instructions.""".format(command_name, board)))
log.inf('Using runner:', runner)
if runner not in available:
log.wrn('Runner {} is not configured for use with {}, '
'this may not work'.format(runner, board))
runner_cls = get_runner_cls(runner)
if command_name not in runner_cls.capabilities().commands:
log.die('Runner {} does not support command {}'.format(
runner, command_name))
# Runner creation, phase 2.
#
# At this point, the common options above are already parsed in
# 'args', and unrecognized arguments are in 'runner_args'.
#
# - Pull the RunnerConfig out of the cache
# - Override cached values with applicable command-line options
cfg = cached_runner_config(build_dir, cache)
_override_config_from_namespace(cfg, args)
# Runner creation, phase 3.
#
# - Pull out cached runner arguments, and append command-line
# values (which should override the cache)
# - Construct a runner-specific argument parser to handle cached
# values plus overrides given in runner_args
# - Parse arguments and create runner instance from final
# RunnerConfig and parsed arguments.
cached_runner_args = cache.get_list(
'ZEPHYR_RUNNER_ARGS_{}'.format(cmake.make_c_identifier(runner)))
assert isinstance(runner_args, list), runner_args
# If the user passed -- to force the parent argument parser to stop
# parsing, it will show up here, and needs to be filtered out.
runner_args = [arg for arg in runner_args if arg != '--']
final_runner_args = cached_runner_args + runner_args
parser = argparse.ArgumentParser(prog=runner)
runner_cls.add_parser(parser)
parsed_args, unknown = parser.parse_known_args(args=final_runner_args)
if unknown:
raise CommandContextError('Runner', runner,
'received unknown arguments', unknown)
runner = runner_cls.create(cfg, parsed_args)
runner.run(command_name)
#
# Context-specific help
#
def _dump_context(command, args, runner_args, cached_runner_var):
build_dir = _build_dir(args, die_if_none=False)
# Try to figure out the CMake cache file based on the build
# directory or an explicit argument.
if build_dir is not None:
cache_file = path.abspath(
path.join(build_dir, args.cmake_cache or cmake.DEFAULT_CACHE))
elif args.cmake_cache:
cache_file = path.abspath(args.cmake_cache)
else:
cache_file = None
# Load the cache itself, if possible.
if cache_file is None:
log.wrn('No build directory (--build-dir) or CMake cache '
'(--cache-file) given or found; output will be limited')
cache = None
else:
try:
cache = cmake.CMakeCache(cache_file)
except Exception:
log.die('Cannot load cache {}.'.format(cache_file))
# If we have a build directory, try to ensure build artifacts are
# up to date. If that doesn't work, still try to print information
# on a best-effort basis.
if build_dir and not args.skip_rebuild:
try:
cmake.run_build(build_dir)
except CalledProcessError:
msg = 'Failed re-building application; cannot load context. '
if args.build_dir:
msg += 'Is {} the right --build-dir?'.format(args.build_dir)
else:
msg += textwrap.dedent('''\
Use --build-dir (-d) to specify a build directory; the one
used was {}.'''.format(build_dir))
log.die('\n'.join(textwrap.wrap(msg, initial_indent='',
subsequent_indent=INDENT,
break_on_hyphens=False)))
if cache is None:
_dump_no_context_info(command, args)
if not args.runner:
return
if args.runner:
# Just information on one runner was requested.
_dump_one_runner_info(cache, args, build_dir, INDENT)
return
board = cache['CACHED_BOARD']
all_cls = {cls.name(): cls for cls in ZephyrBinaryRunner.get_runners() if
command.name in cls.capabilities().commands}
available = [r for r in cache.get_list('ZEPHYR_RUNNERS') if r in all_cls]
available_cls = {r: all_cls[r] for r in available if r in all_cls}
default_runner = cache.get(cached_runner_var)
cfg = cached_runner_config(build_dir, cache)
log.inf('All Zephyr runners which support {}:'.format(command.name),
colorize=True)
for line in util.wrap(', '.join(all_cls.keys()), INDENT):
log.inf(line)
log.inf('(Not all may work with this build, see available runners below.)',
colorize=True)
if cache is None:
log.warn('Missing or invalid CMake cache {}; there is no context.',
'Use --build-dir to specify the build directory.')
return
log.inf('Build directory:', colorize=True)
log.inf(INDENT + build_dir)
log.inf('Board:', colorize=True)
log.inf(INDENT + board)
log.inf('CMake cache:', colorize=True)
log.inf(INDENT + cache_file)
if not available:
# Bail with a message if no runners are available.
msg = ('No runners available for {}. '
'Consult the documentation for instructions on how to run '
'binaries on this target.').format(board)
for line in util.wrap(msg, ''):
log.inf(line, colorize=True)
return
log.inf('Available {} runners:'.format(command.name), colorize=True)
log.inf(INDENT + ', '.join(available))
log.inf('Additional options for available', command.name, 'runners:',
colorize=True)
for runner in available:
_dump_runner_opt_help(runner, all_cls[runner])
log.inf('Default {} runner:'.format(command.name), colorize=True)
log.inf(INDENT + default_runner)
_dump_runner_config(cfg, '', INDENT)
log.inf('Runner-specific information:', colorize=True)
for runner in available:
log.inf('{}{}:'.format(INDENT, runner), colorize=True)
_dump_runner_cached_opts(cache, runner, INDENT * 2, INDENT * 3)
_dump_runner_caps(available_cls[runner], INDENT * 2)
if len(available) > 1:
log.inf('(Add -r RUNNER to just print information about one runner.)',
colorize=True)
def _dump_no_context_info(command, args):
all_cls = {cls.name(): cls for cls in ZephyrBinaryRunner.get_runners() if
command.name in cls.capabilities().commands}
log.inf('All Zephyr runners which support {}:'.format(command.name),
colorize=True)
for line in util.wrap(', '.join(all_cls.keys()), INDENT):
log.inf(line)
if not args.runner:
log.inf('Add -r RUNNER to print more information about any runner.',
colorize=True)
def _dump_one_runner_info(cache, args, build_dir, indent):
runner = args.runner
cls = get_runner_cls(runner)
if cache is None:
_dump_runner_opt_help(runner, cls)
_dump_runner_caps(cls, '')
return
available = runner in cache.get_list('ZEPHYR_RUNNERS')
cfg = cached_runner_config(build_dir, cache)
log.inf('Build directory:', colorize=True)
log.inf(INDENT + build_dir)
log.inf('Board:', colorize=True)
log.inf(INDENT + cache['CACHED_BOARD'])
log.inf('CMake cache:', colorize=True)
log.inf(INDENT + cache.cache_file)
log.inf(runner, 'is available:', 'yes' if available else 'no',
colorize=True)
_dump_runner_opt_help(runner, cls)
_dump_runner_config(cfg, '', indent)
if available:
_dump_runner_cached_opts(cache, runner, '', indent)
_dump_runner_caps(cls, '')
if not available:
log.wrn('Runner', runner, 'is not configured in this build.')
def _dump_runner_caps(cls, base_indent):
log.inf('{}Capabilities:'.format(base_indent), colorize=True)
log.inf('{}{}'.format(base_indent + INDENT, cls.capabilities()))
def _dump_runner_opt_help(runner, cls):
# Construct and print the usage text
dummy_parser = argparse.ArgumentParser(prog='', add_help=False)
cls.add_parser(dummy_parser)
formatter = dummy_parser._get_formatter()
for group in dummy_parser._action_groups:
# Break the abstraction to filter out the 'flash', 'debug', etc.
# TODO: come up with something cleaner (may require changes
# in the runner core).
actions = group._group_actions
if len(actions) == 1 and actions[0].dest == 'command':
# This is the lone positional argument. Skip it.
continue
formatter.start_section('REMOVE ME')
formatter.add_text(group.description)
formatter.add_arguments(actions)
formatter.end_section()
# Get the runner help, with the "REMOVE ME" string gone
runner_help = '\n'.join(formatter.format_help().splitlines()[1:])
log.inf('{} options:'.format(runner), colorize=True)
log.inf(runner_help)
def _dump_runner_config(cfg, initial_indent, subsequent_indent):
log.inf('{}Cached common runner configuration:'.format(initial_indent),
colorize=True)
for var in cfg.__slots__:
log.inf('{}--{}={}'.format(subsequent_indent, var, getattr(cfg, var)))
def _dump_runner_cached_opts(cache, runner, initial_indent, subsequent_indent):
runner_args = _get_runner_args(cache, runner)
if not runner_args:
return
log.inf('{}Cached runner-specific options:'.format(initial_indent),
colorize=True)
for arg in runner_args:
log.inf('{}{}'.format(subsequent_indent, arg))
def _get_runner_args(cache, runner):
runner_ident = cmake.make_c_identifier(runner)
args_var = 'ZEPHYR_RUNNER_ARGS_{}'.format(runner_ident)
return cache.get_list(args_var)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rudra Rugge
from vnc_api.vnc_api import *
from config_db import *
from agent import Agent
from module_logger import ServiceMonitorModuleLogger,MessageID
from sandesh.port_tuple import ttypes as sandesh
class PortTupleAgent(Agent):
def __init__(self, svc_mon, vnc_lib, object_db, config_section, logger):
super(PortTupleAgent, self).__init__(svc_mon, vnc_lib,
object_db, config_section)
self.sc_ipam_obj = None
self.logger = logger
# Register log functions to be used for port tuple logs.
log_funcs = {
MessageID.ERROR : sandesh.PortTupleErrorLog,
MessageID.INFO : sandesh.PortTupleInfoLog,
MessageID.DEBUG : sandesh.PortTupleDebugLog,
}
self.logger.add_messages(**log_funcs)
self._get_service_chain_ipam()
def handle_service_type(self):
return 'port-tuple'
def _get_service_chain_ipam(self):
fq_name = ['default-domain', 'default-project', 'service-chain-flat-ipam']
self.sc_ipam_obj = self._vnc_lib.network_ipam_read(fq_name=fq_name)
def _allocate_iip_for_family(self, iip_family, si, port, vmi):
create_iip = True
update_vmi = False
iip_name = si.uuid + '-' + port['type'] + '-' + iip_family
for iip_id in si.instance_ips:
iip = InstanceIpSM.get(iip_id)
if iip and iip.name == iip_name:
create_iip = False
iip_id = iip.uuid
if iip.uuid not in vmi.instance_ips:
update_vmi = True
break
if create_iip:
iip_obj = InstanceIp(name=iip_name, instance_ip_family=iip_family)
iip_obj.set_service_instance_ip(True)
iip_obj.set_instance_ip_secondary(True)
iip_obj.set_instance_ip_mode('active-active')
iip_obj.add_network_ipam(self.sc_ipam_obj)
try:
self._vnc_lib.instance_ip_create(iip_obj)
except RefsExistError:
self._vnc_lib.instance_ip_update(iip_obj)
except Exception as e:
return
iip_id = iip_obj.uuid
tag = ServiceInterfaceTag(interface_type=port['type'])
self._vnc_lib.ref_update('service-instance', si.uuid,
'instance-ip', iip_id, None, 'ADD', tag)
InstanceIpSM.locate(iip_id)
si.update()
if create_iip or update_vmi:
self._vnc_lib.ref_update('instance-ip', iip_id,
'virtual-machine-interface', vmi.uuid, None, 'ADD')
self._vnc_lib.ref_relax_for_delete(iip_id, vmi.uuid)
vmi.update()
def _allocate_shared_iip(self, si, port, vmi):
self._allocate_iip_for_family('v4', si, port, vmi)
self._allocate_iip_for_family('v6', si, port, vmi)
def _allocate_health_check_iip_for_family(self, si, health_id, iip_family, port, vmi):
iip_name = si.uuid + '-' + port['type'] + '-' + \
iip_family + '-health-check-' + health_id
iip_obj = InstanceIp(name=iip_name, instance_ip_family=iip_family)
vn_obj = self._vnc_lib.virtual_network_read(id=vmi.virtual_network)
iip_obj.add_virtual_network(vn_obj)
iip_obj.set_service_health_check_ip(True)
try:
self._vnc_lib.instance_ip_create(iip_obj)
self._vnc_lib.ref_relax_for_delete(iip_obj.uuid, vn_obj.uuid)
except RefsExistError:
self._vnc_lib.instance_ip_update(iip_obj)
except Exception as e:
return
InstanceIpSM.locate(iip_obj.uuid)
self._vnc_lib.ref_update('instance-ip', iip_obj.uuid,
'virtual-machine-interface', vmi.uuid, None, 'ADD')
vmi.update()
def _allocate_health_check_iip(self, si, health_id, port, vmi):
self._allocate_health_check_iip_for_family(si, health_id, 'v4', port, vmi)
self._allocate_health_check_iip_for_family(si, health_id, 'v6', port, vmi)
def _delete_health_check_iip(self, iip, vmi):
for vmi_id in iip.virtual_machine_interfaces:
self._vnc_lib.ref_update('instance-ip', iip.uuid,
'virtual-machine-interface', vmi.uuid, None, 'DELETE')
try:
self._vnc_lib.instance_ip_delete(id=iip.uuid)
InstanceIpSM.delete(iip.uuid)
except NoIdError:
return
def update_health_check_iip(self, si, port, vmi):
allocate_hc_iip = False
for health_id, if_type in si.service_health_checks.items():
health = ServiceHealthCheckSM.get(health_id)
if not health:
continue
if if_type['interface_type'] != vmi.if_type:
continue
if health.params.get('health_check_type', None) != 'end-to-end':
continue
allocate_hc_iip = True
break
hc_iip = None
for iip_id in list(vmi.instance_ips):
iip = InstanceIpSM.get(iip_id)
if not iip or not iip.service_health_check_ip or si.uuid not in iip.name:
continue
hc_iip = iip
break
if allocate_hc_iip:
self._allocate_health_check_iip(si, health_id, port, vmi)
elif hc_iip:
self._delete_health_check_iip(hc_iip, vmi)
def set_port_service_health_check(self, si, port, vmi):
# handle add
for health_id in port['service-health-checks']:
if health_id in vmi.service_health_checks:
continue
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'service-health-check', health_id, None, 'ADD')
self._vnc_lib.ref_relax_for_delete(vmi.uuid, health_id)
vmi.update()
# handle deletes
for health_id in list(vmi.service_health_checks):
if health_id in port['service-health-checks']:
continue
health = ServiceHealthCheckSM.get(health_id)
pt_present = False
if health and health.service_instances:
for pt_id in vmi.port_tuples:
si_uuid = (PortTupleSM.get(pt_id)).parent_uuid
si_temp = ServiceInstanceSM.get(si_uuid)
if not si_temp:
continue
if health.uuid in si_temp.service_health_checks:
pt_present = True
break
if pt_present == False:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'service-health-check', health_id, None, 'DELETE')
vmi.service_health_checks.remove(health_id)
# update health check ip
self.update_health_check_iip(si, port, vmi)
def set_port_static_routes(self, port, vmi):
# handle add
for irt_id in port['interface-route-tables']:
if irt_id in vmi.interface_route_tables:
continue
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'interface-route-table', irt_id, None, 'ADD')
vmi.update()
self._vnc_lib.ref_relax_for_delete(vmi.uuid, irt_id)
# handle deletes
for irt_id in list(vmi.interface_route_tables):
if irt_id in port['interface-route-tables']:
continue
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'interface-route-table', irt_id, None, 'DELETE')
vmi.update()
def update_secondary_iip(self, vmi):
for iip_id in list(vmi.instance_ips):
iip = InstanceIpSM.get(iip_id)
if not iip:
continue
if not iip.instance_ip_secondary or not iip.service_instance_ip:
continue
update = False
if vmi.aaps and len(vmi.aaps):
if iip.secondary_tracking_ip != vmi.aaps[0]['ip']:
tracking_ip = vmi.aaps[0]['ip']
ip_mode = vmi.aaps[0].get('address_mode', 'active-standby')
update = True
else:
if iip.secondary_tracking_ip:
tracking_ip = None
ip_mode = 'active-active'
update = True
if not update:
continue
try:
iip_obj = self._vnc_lib.instance_ip_read(id=iip.uuid)
iip_obj.set_secondary_ip_tracking_ip(tracking_ip)
iip_obj.set_instance_ip_mode(ip_mode)
self._vnc_lib.instance_ip_update(iip_obj)
iip.update()
except NoIdError:
self.logger.error("Instance IP %s update failed" % (iip.name))
continue
def set_port_allowed_address_pairs(self, port, vmi, vmi_obj):
if not port['allowed-address-pairs'] or \
not port['allowed-address-pairs'].get('allowed_address_pair', None):
if vmi.aaps and len(vmi.aaps):
vmi_obj.set_virtual_machine_interface_allowed_address_pairs(AllowedAddressPairs())
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
vmi.update()
self.update_secondary_iip(vmi)
return
aaps = port['allowed-address-pairs'].get('allowed_address_pair', None)
update_aap = False
if len(aaps) != len(vmi.aaps or []):
update_aap = True
else:
for idx in range(0, len(vmi.aaps)):
if vmi.aaps[idx]['ip'] != aaps[idx]['ip'] or \
vmi.aaps[idx]['mac'] != aaps[idx]['mac']:
update_aap = True
break
if update_aap:
vmi_obj.set_virtual_machine_interface_allowed_address_pairs(
port['allowed-address-pairs'])
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
vmi.update()
self.update_secondary_iip(vmi)
def delete_shared_iip(self, iip):
if not iip.service_instance_ip or not iip.instance_ip_secondary:
return
if iip.service_instance:
return
for vmi_id in list(iip.virtual_machine_interfaces):
self._vnc_lib.ref_update('instance-ip', iip.uuid,
'virtual-machine-interface', vmi_id, None, 'DELETE')
try:
self._vnc_lib.instance_ip_delete(id=iip.uuid)
InstanceIpSM.delete(iip.uuid)
except NoIdError:
self.logger.error("Instance IP %s delete failed" % (iip.name))
return
def delete_old_vmi_links(self, vmi):
for iip_id in list(vmi.instance_ips):
iip = InstanceIpSM.get(iip_id)
if iip and (iip.service_instance or iip.service_health_check_ip):
pt_present = False
for pt_id in vmi.port_tuples:
si_uuid = (PortTupleSM.get(pt_id)).parent_uuid
if si_uuid in iip.name:
pt_present = True
break
if pt_present == False:
self._vnc_lib.ref_update('instance-ip', iip_id,
'virtual-machine-interface', vmi.uuid, None, 'DELETE')
vmi.instance_ips.remove(iip_id)
for irt_id in list(vmi.interface_route_tables):
irt = InterfaceRouteTableSM.get(irt_id)
if irt and irt.service_instances:
pt_present = False
for pt_id in vmi.port_tuples:
si_uuid = (PortTupleSM.get(pt_id)).parent_uuid
if si_uuid in irt.service_instances:
pt_present = True
if pt_present == False:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'interface-route-table', irt.uuid, None, 'DELETE')
vmi.interface_route_tables.remove(irt_id)
for health_id in list(vmi.service_health_checks):
health = ServiceHealthCheckSM.get(health_id)
if health and health.service_instances and vmi.service_vm == True:
pt_present = False
for pt_id in vmi.port_tuples:
si_uuid = (PortTupleSM.get(pt_id)).parent_uuid
si = ServiceInstanceSM.get(si_uuid)
if not si:
continue
if health.uuid in si.service_health_checks:
pt_present = True
break
if pt_present == False:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'service-health-check', health.uuid, None, 'DELETE')
vmi.service_health_checks.remove(health_id)
def set_port_service_chain_ip(self, si, port, vmi):
self._allocate_shared_iip(si, port, vmi)
def get_port_config(self, st, si):
st_if_list = st.params.get('interface_type', [])
si_if_list = si.params.get('interface_list', [])
port_config = {}
for index in range(0, len(st_if_list)):
try:
si_if = si_if_list[index]
st_if = st_if_list[index]
except IndexError:
continue
port = {}
port['type'] = st_if.get('service_interface_type')
port['shared-ip'] = st_if.get('shared_ip')
port['static-route-enable'] = st_if.get('static_route_enable')
port['allowed-address-pairs'] = si_if.get('allowed_address_pairs')
port['interface-route-tables'] = []
for irt_id, if_type in si.interface_route_tables.items():
irt = InterfaceRouteTableSM.get(irt_id)
if irt and if_type['interface_type'] == port['type']:
port['interface-route-tables'].append(irt.uuid)
port['service-health-checks'] = []
for health_id, if_type in si.service_health_checks.items():
health = ServiceHealthCheckSM.get(health_id)
if health and if_type['interface_type'] == port['type']:
port['service-health-checks'].append(health.uuid)
port_config[st_if.get('service_interface_type')] = port
return port_config
def update_vmi_port_tuples(self, vmi):
if vmi:
self.delete_old_vmi_links(vmi)
for pt_id in vmi.port_tuples:
self.update_port_tuple(pt_id)
def update_port_tuple(self, pt_id):
pt = PortTupleSM.get(pt_id)
if not pt:
self.logger.debug("No valid port tuple provided to update")
return
si = ServiceInstanceSM.get(pt.parent_key)
if not si:
self.logger.debug("Service Instance %s not found" % pt.parent_key)
return
st = ServiceTemplateSM.get(si.service_template)
port_config = self.get_port_config(st, si)
if not port_config:
self.logger.debug( \
"Failed to construct port config for Port Tuple %s" % pt.uuid)
return
for vmi_id in list(pt.virtual_machine_interfaces):
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if not vmi:
self.logger.debug( \
"VMI %s not found for Port Tuple %s" % (vmi_id, pt.uuid))
continue
if not vmi.params:
self.logger.debug( \
"VMI %s has invalid params for Port Tuple %s" % \
(vmi_id, pt.uuid))
continue
port = port_config[vmi.params.get('service_interface_type')]
if not port:
continue
vmi_obj = VirtualMachineInterface(fq_name=vmi.fq_name,
name=vmi.name, parent_type='project')
vmi_obj.uuid = vmi.uuid
self.set_port_service_chain_ip(si, port, vmi)
self.set_port_allowed_address_pairs(port, vmi, vmi_obj)
self.set_port_service_health_check(si, port, vmi)
self.set_port_static_routes(port, vmi)
def update_port_tuples(self):
for si in ServiceInstanceSM.values():
for pt_id in si.port_tuples:
self.update_port_tuple(pt_id=pt_id)
for iip in InstanceIpSM.values():
self.delete_shared_iip(iip)
for vmi in VirtualMachineInterfaceSM.values():
self.delete_old_vmi_links(vmi)
| |
# Copyright 2014, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import unittest
import pypowervm.adapter as adpt
import pypowervm.const as c
import pypowervm.tests.test_utils.test_wrapper_abc as twrap
import pypowervm.wrappers.base_partition as bp
import pypowervm.wrappers.storage as pvm_stor
import pypowervm.wrappers.virtual_io_server as vios
class TestVIOSWrapper(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def test_update_timeout(self):
self.adpt.update_by_path.return_value = self.dwrap.entry
self.assertEqual(self.dwrap.entry, self.dwrap.update().entry)
self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY,
timeout=3600)
self.assertEqual(self.dwrap.entry, self.dwrap.update(timeout=42).entry)
self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY,
timeout=42)
# If the session is configured for longer...
self.adpt.session.timeout = 10000
self.assertEqual(self.dwrap.entry, self.dwrap.update().entry)
# ...default to the longer value.
self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY,
timeout=10000)
# But explicit timeout can still be set.
self.assertEqual(self.dwrap.entry, self.dwrap.update(timeout=42).entry)
self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY,
timeout=42)
def test_get_ip_addresses(self):
expected_ips = ('9.1.2.4', '10.10.10.5')
self.assertEqual(expected_ips, self.dwrap.ip_addresses)
def test_mover_service_partition(self):
self.assertTrue(self.dwrap.is_mover_service_partition)
self.dwrap.is_mover_service_partition = False
self.assertFalse(self.dwrap.is_mover_service_partition)
def test_rmc_ip(self):
self.assertEqual('9.1.2.5', self.dwrap.rmc_ip)
def test_license_accept(self):
self.assertTrue(self.dwrap.is_license_accepted)
def test_vnic_capabilities(self):
self.assertTrue(self.dwrap.vnic_capable)
self.assertTrue(self.dwrap.vnic_failover_capable)
def test_hdisk_reserve_policy_found(self):
# Most are NoReserve; look for the only one that's SinglePath to make
# sure we're actually searching rather than picking first/last/random
found_policy = self.dwrap.hdisk_reserve_policy(
'6005076300838041300000000000002B')
self.assertEqual('SinglePath', found_policy)
def test_hdisk_reserve_policy_notfound(self):
# Most are NoReserve; look for the only one that's SinglePath to make
# sure we're actually searching rather than picking first/last/random
found_policy = self.dwrap.hdisk_reserve_policy('Bogus')
self.assertIsNone(found_policy)
def test_hdisk_from_uuid_found(self):
found_name = self.dwrap.hdisk_from_uuid(
'01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNQ==')
self.assertEqual('hdisk7', found_name)
def test_hdisk_from_uuid_notfound(self):
found_name = self.dwrap.hdisk_from_uuid('Bogus')
self.assertIsNone(found_name)
def test_seas(self):
self.assertEqual(1, len(self.dwrap.seas))
sea = self.dwrap.seas[0]
self.assertEqual(1, sea.pvid)
self.assertEqual(1, len(sea.addl_adpts))
def test_trunks(self):
self.assertEqual(3, len(self.dwrap.trunk_adapters))
self.assertEqual(1, self.dwrap.trunk_adapters[0].pvid)
self.assertEqual(4094, self.dwrap.trunk_adapters[1].pvid)
self.assertEqual(4093, self.dwrap.trunk_adapters[2].pvid)
def test_derive_orphan_trunk_adapters(self):
orphans = self.dwrap.derive_orphan_trunk_adapters()
self.assertEqual(1, len(orphans))
self.assertEqual(4093, orphans[0].pvid)
def test_wwpns(self):
"""Tests the helper methods to get WWPNs more easily."""
phys_paths = self.dwrap.get_pfc_wwpns()
self.assertIsNotNone(phys_paths)
self.assertEqual(2, len(phys_paths))
virt_paths = self.dwrap.get_vfc_wwpns()
self.assertIsNotNone(virt_paths)
self.assertEqual(2, len(virt_paths))
for virt_path in virt_paths:
self.assertEqual(2, len(virt_path))
self.assertEqual(1, len(self.dwrap.get_active_pfc_wwpns()))
self.assertEqual('10000090FA1B6302',
self.dwrap.get_active_pfc_wwpns()[0])
def test_pfc_ports(self):
"""Tests that the physical FC ports can be gathered."""
ports = self.dwrap.pfc_ports
self.assertIsNotNone(ports)
self.assertEqual(2, len(ports))
# Validate attributes on one.
self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T2', ports[0].loc_code)
self.assertEqual('fcs1', ports[0].name)
self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T2', ports[0].udid)
self.assertEqual('10000090FA1B6303', ports[0].wwpn)
self.assertEqual(0, ports[0].npiv_available_ports)
self.assertEqual(0, ports[0].npiv_total_ports)
def test_phys_vols(self):
"""Tests that the physical volumes can be gathered."""
phys_vols = self.dwrap.phys_vols
self.assertIsNotNone(phys_vols)
self.assertEqual(11, len(phys_vols))
# Validate attributes on one.
self.assertEqual(phys_vols[0].description, 'SAS Disk Drive')
self.assertEqual(phys_vols[0].udid,
'01M0lCTU1CRjI2MDBSQzUwMDAwMzk0NzgzQTUyQjg=')
self.assertEqual(phys_vols[0].capacity, 572325)
self.assertEqual(phys_vols[0].name, 'hdisk0')
self.assertEqual(phys_vols[0].state, 'active')
self.assertEqual(phys_vols[0]._encryption_state, 'Unlocked')
self.assertIsNone(phys_vols[0]._encryption_key)
agent = phys_vols[0]._encryption_agent
self.assertIsInstance(agent, pvm_stor._LUKSEncryptor)
self.assertEqual(agent.key_size, 512)
self.assertEqual(agent.cipher, 'aes-cbc-essiv:sha256')
self.assertEqual(agent.hash_spec, 'sha512')
class TestViosMappings(twrap.TestWrapper):
file = 'fake_vios_mappings.txt'
wrapper_class_to_test = vios.VIOS
mock_adapter_fx_args = {}
def setUp(self):
super(TestViosMappings, self).setUp()
self.adpt.build_href.return_value = "a_link"
def test_bld_scsi_mapping_vopt(self):
"""Validation that the element is correct."""
vopt = pvm_stor.VOptMedia.bld_ref(self.adpt, 'media_name')
vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', vopt)
self.assertIsNotNone(vmap)
self.assertIsNotNone(vmap.element)
self.assertEqual(vmap.client_adapter.side, 'Client')
self.assertTrue(vmap.client_adapter._get_val_bool(
'UseNextAvailableSlotID'))
self.assertEqual(vmap.server_adapter.side, 'Server')
# Validate the exact XML of the server adapter: ensure proper ordering.
self.assertEqual(
'<uom:ServerAdapter xmlns:uom="http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/" schemaVersion="V1_0"><uom:Metadata>'
'<uom:Atom/></uom:Metadata><uom:AdapterType>Server</uom:AdapterTyp'
'e><uom:UseNextAvailableSlotID>true</uom:UseNextAvailableSlotID></'
'uom:ServerAdapter>'.encode('utf-8'),
vmap.server_adapter.toxmlstring())
# If the slot number is None then REST will assign the first available.
self.assertIsNone(vmap.client_adapter.lpar_slot_num)
self.assertIsNone(vmap.target_dev)
self.assertEqual('media_name', vmap.backing_storage.media_name)
self.assertEqual('a_link', vmap.client_lpar_href)
self.assertIsInstance(vmap.backing_storage, pvm_stor.VOptMedia)
# Test the cloning
vopt2 = pvm_stor.VOptMedia.bld_ref(self.adpt, 'media_name2')
vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, vopt2)
self.assertIsNotNone(vmap2)
self.assertIsNotNone(vmap2.element)
self.assertEqual(vmap2.client_adapter.side, 'Client')
self.assertEqual(vmap2.server_adapter.side, 'Server')
self.assertIsNone(vmap2.client_adapter.lpar_slot_num)
self.assertIsNone(vmap2.target_dev)
self.assertEqual('media_name2', vmap2.backing_storage.media_name)
self.assertEqual('a_link', vmap2.client_lpar_href)
self.assertIsInstance(vmap2.backing_storage, pvm_stor.VOptMedia)
# Clone to a different device type
vdisk = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name')
vmap3 = vios.VSCSIMapping.bld_from_existing(
vmap, vdisk, lpar_slot_num=6, lua='vdisk_lua')
self.assertIsNotNone(vmap3)
self.assertIsNotNone(vmap3.element)
# Validate the exact XML of the client adapter: ensure proper ordering.
self.assertEqual(
'<uom:ClientAdapter xmlns:uom="http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/" schemaVersion="V1_0"><uom:Metadata>'
'<uom:Atom/></uom:Metadata><uom:AdapterType>Client</uom:AdapterTyp'
'e><uom:UseNextAvailableSlotID>false</uom:UseNextAvailableSlotID><'
'uom:VirtualSlotNumber>6</uom:VirtualSlotNumber></uom:ClientAdapte'
'r>'.encode('utf-8'), vmap3.client_adapter.toxmlstring())
self.assertEqual('Client', vmap3.client_adapter.side)
# Specifying 'lua' builds the appropriate type of target dev...
self.assertIsInstance(vmap3.target_dev, pvm_stor.VDiskTargetDev)
# ...with the correct LUA
self.assertEqual('vdisk_lua', vmap3.target_dev.lua)
self.assertEqual(6, vmap3.client_adapter.lpar_slot_num)
# Assert this is set to False when specifying the slot number
# and building from an existing mapping
self.assertFalse(vmap3.client_adapter._get_val_bool(
'UseNextAvailableSlotID'))
self.assertEqual('Server', vmap3.server_adapter.side)
self.assertEqual('disk_name', vmap3.backing_storage.name)
self.assertEqual('LogicalVolume', vmap3.backing_storage.vdtype)
self.assertEqual('a_link', vmap3.client_lpar_href)
self.assertIsInstance(vmap3.backing_storage, pvm_stor.VDisk)
def test_bld_scsi_mapping_vdisk(self):
"""Validation that the element is correct."""
vdisk = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name')
vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', vdisk,
lpar_slot_num=5, lua='vdisk_lua')
self.assertIsNotNone(vmap)
self.assertIsNotNone(vmap.element)
self.assertEqual('Client', vmap.client_adapter.side)
self.assertIsInstance(vmap.target_dev, pvm_stor.VDiskTargetDev)
self.assertEqual('vdisk_lua', vmap.target_dev.lua)
self.assertEqual(5, vmap.client_adapter.lpar_slot_num)
# Assert that we set this to False when specifying the slot number
self.assertFalse(vmap.client_adapter._get_val_bool(
'UseNextAvailableSlotID'))
self.assertEqual('Server', vmap.server_adapter.side)
self.assertEqual('disk_name', vmap.backing_storage.name)
self.assertEqual('LogicalVolume', vmap.backing_storage.vdtype)
self.assertEqual('a_link', vmap.client_lpar_href)
self.assertIsInstance(vmap.backing_storage, pvm_stor.VDisk)
# Test cloning
vdisk2 = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name2')
vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, vdisk2,
lpar_slot_num=6)
self.assertIsNotNone(vmap2)
self.assertIsNotNone(vmap2.element)
self.assertEqual('Client', vmap2.client_adapter.side)
# Cloning without specifying 'lua' doesn't clone the target dev
self.assertIsNone(vmap2.target_dev)
self.assertEqual(6, vmap2.client_adapter.lpar_slot_num)
self.assertFalse(vmap2.client_adapter._get_val_bool(
'UseNextAvailableSlotID'))
self.assertEqual('Server', vmap2.server_adapter.side)
self.assertEqual('disk_name2', vmap2.backing_storage.name)
self.assertEqual('LogicalVolume', vmap2.backing_storage.vdtype)
self.assertEqual('a_link', vmap2.client_lpar_href)
self.assertIsInstance(vmap2.backing_storage, pvm_stor.VDisk)
def test_bld_scsi_mapping_lu(self):
"""Validation that the element is correct."""
lu = pvm_stor.LU.bld_ref(self.adpt, 'disk_name', 'udid')
vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', lu,
lpar_slot_num=5)
self.assertIsNotNone(vmap)
self.assertIsNotNone(vmap.element)
self.assertEqual('Client', vmap.client_adapter.side)
self.assertIsNone(vmap.target_dev)
self.assertEqual(5, vmap.client_adapter.lpar_slot_num)
self.assertEqual('Server', vmap.server_adapter.side)
self.assertEqual('disk_name', vmap.backing_storage.name)
self.assertEqual('udid', vmap.backing_storage.udid)
self.assertEqual('a_link', vmap.client_lpar_href)
self.assertIsInstance(vmap.backing_storage, pvm_stor.LU)
# Test cloning
lu2 = pvm_stor.LU.bld_ref(self.adpt, 'disk_name2', 'udid2')
vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, lu2, lua='lu_lua')
self.assertIsNotNone(vmap2)
self.assertIsNotNone(vmap2.element)
self.assertEqual('Client', vmap2.client_adapter.side)
self.assertEqual(5, vmap2.client_adapter.lpar_slot_num)
self.assertIsInstance(vmap2.target_dev, pvm_stor.LUTargetDev)
self.assertEqual('lu_lua', vmap2.target_dev.lua)
self.assertEqual('Server', vmap2.server_adapter.side)
self.assertEqual('disk_name2', vmap2.backing_storage.name)
self.assertEqual('udid2', vmap2.backing_storage.udid)
self.assertEqual('a_link', vmap2.client_lpar_href)
self.assertIsInstance(vmap2.backing_storage, pvm_stor.LU)
def test_bld_scsi_mapping_pv(self):
"""Validation that the element is correct."""
pv = pvm_stor.PV.bld(self.adpt, 'disk_name', 'udid')
vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', pv,
lpar_slot_num=5, target_name='fake_name')
self.assertIsNotNone(vmap)
self.assertIsNotNone(vmap.element)
self.assertEqual('Client', vmap.client_adapter.side)
self.assertEqual(5, vmap.client_adapter.lpar_slot_num)
self.assertEqual('Server', vmap.server_adapter.side)
self.assertEqual('disk_name', vmap.backing_storage.name)
self.assertEqual('a_link', vmap.client_lpar_href)
self.assertEqual('fake_name', vmap.target_dev.name)
self.assertIsInstance(vmap.backing_storage, pvm_stor.PV)
# Test cloning
pv2 = pvm_stor.PV.bld(self.adpt, 'disk_name2', 'udid2')
vmap2 = vios.VSCSIMapping.bld_from_existing(
vmap, pv2, lpar_slot_num=6, lua='pv_lua')
self.assertIsNotNone(vmap2)
self.assertIsNotNone(vmap2.element)
self.assertEqual('Client', vmap2.client_adapter.side)
self.assertEqual(6, vmap2.client_adapter.lpar_slot_num)
self.assertIsInstance(vmap2.target_dev, pvm_stor.PVTargetDev)
self.assertEqual('pv_lua', vmap2.target_dev.lua)
self.assertEqual('Server', vmap2.server_adapter.side)
self.assertEqual('disk_name2', vmap2.backing_storage.name)
self.assertEqual('a_link', vmap2.client_lpar_href)
self.assertIsNone(vmap2.target_dev.name)
self.assertIsInstance(vmap2.backing_storage, pvm_stor.PV)
# Test empty target_dev_type
pv3 = pvm_stor.PV.bld(self.adpt, 'disk_name3', 'udid3')
vmap3 = vios.VSCSIMapping.bld_from_existing(
vmap, pv3, lpar_slot_num=6)
self.assertIsNone(vmap3.target_dev)
def test_clone_scsi_mapping_no_storage(self):
"""Clone a VSCSI mapping with no storage element."""
pv = pvm_stor.PV.bld(self.adpt, 'disk_name', 'udid')
vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', pv,
lpar_slot_num=5)
vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, None)
self.assertIsNotNone(vmap2)
self.assertIsNotNone(vmap2.element)
self.assertEqual('Client', vmap2.client_adapter.side)
self.assertEqual('Server', vmap2.server_adapter.side)
self.assertEqual('a_link', vmap2.client_lpar_href)
self.assertEqual(5, vmap2.client_adapter.lpar_slot_num)
self.assertIsNone(vmap.target_dev)
self.assertIsNone(vmap2.backing_storage)
# Illegal to specify target dev properties without backing storage.
self.assertRaises(ValueError, vios.VSCSIMapping.bld_from_existing,
vmap, None, lua='bogus')
def test_get_scsi_mappings(self):
mappings = self.dwrap.scsi_mappings
# Ensure that at least one adapter has a client LPAR & storage
found_client_uri = False
static_map = None
for mapping in mappings:
if mapping.client_lpar_href and mapping.backing_storage:
found_client_uri = True
static_map = mapping
self.assertTrue(found_client_uri)
# We'll use the previous mapping as a baseline for further validation
self.assertIsNotNone(static_map.client_adapter)
self.assertIsNotNone(static_map.backing_storage)
self.assertIsNotNone(static_map.server_adapter)
# Deeper check on each of these.
ca = static_map.client_adapter
self.assertEqual(5, ca.lpar_id)
self.assertEqual(1, ca.vios_id)
self.assertTrue(ca.is_varied_on)
self.assertIsNotNone(ca.lpar_slot_num)
self.assertIsNotNone(ca.vios_slot_num)
self.assertIsNotNone(ca.loc_code)
self.assertEqual(ca.side, 'Client')
sa = static_map.server_adapter
self.assertEqual(10, sa.lpar_id)
self.assertEqual(1, sa.vios_id)
self.assertIsNotNone(sa.name)
self.assertIsNotNone(sa.backing_dev_name)
self.assertIsNotNone(sa.udid)
self.assertEqual(sa.side, 'Server')
self.assertTrue(sa.is_varied_on)
self.assertIsNotNone(sa.lpar_slot_num)
self.assertIsNotNone(sa.vios_slot_num)
self.assertIsNotNone(sa.loc_code)
# Try copying the map and adding it in
new_map = copy.deepcopy(static_map)
orig_size = len(mappings)
mappings.append(new_map)
self.assertEqual(len(mappings), orig_size + 1)
self.assertEqual(len(self.dwrap.scsi_mappings), orig_size + 1)
mappings.remove(new_map)
self.dwrap.scsi_mappings = mappings
self.assertEqual(len(self.dwrap.scsi_mappings), orig_size)
def test_vfc_mappings(self):
mappings = self.dwrap.vfc_mappings
# Ensure that at least one adapter has a client LPAR
found_client_uri = False
static_map = None
for mapping in mappings:
if mapping.client_lpar_href:
found_client_uri = True
static_map = mapping
self.assertTrue(found_client_uri)
# We'll use the previous mapping as a baseline for further validation
self.assertIsNotNone(static_map.client_adapter)
self.assertIsNotNone(static_map.backing_port)
self.assertIsNotNone(static_map.server_adapter)
# Deeper check on each of these.
ca = static_map.client_adapter
self.assertIsNotNone(ca.wwpns)
self.assertIsNotNone(ca.lpar_id)
self.assertIsNotNone(ca.vios_id)
self.assertTrue(ca.is_varied_on)
self.assertIsNotNone(ca.lpar_slot_num)
self.assertIsNotNone(ca.vios_slot_num)
self.assertIsNotNone(ca.loc_code)
self.assertEqual(ca.side, 'Client')
bport = static_map.backing_port
self.assertIsNotNone(bport.loc_code)
self.assertIsNotNone(bport.name)
self.assertIsNotNone(bport.udid)
self.assertIsNotNone(bport.wwpn)
self.assertIsNotNone(bport.npiv_available_ports)
self.assertIsNotNone(bport.npiv_total_ports)
sa = static_map.server_adapter
self.assertIsNotNone(sa.name)
self.assertIsNotNone(sa.map_port)
self.assertIsNotNone(sa.udid)
self.assertEqual(sa.side, 'Server')
self.assertTrue(sa.is_varied_on)
self.assertIsNotNone(sa.lpar_slot_num)
self.assertIsNotNone(sa.vios_slot_num)
self.assertIsNotNone(sa.loc_code)
# Try copying the map and adding it in
new_map = copy.deepcopy(static_map)
orig_size = len(mappings)
mappings.append(new_map)
self.assertEqual(len(mappings), orig_size + 1)
self.assertEqual(len(self.dwrap.vfc_mappings), orig_size + 1)
mappings.remove(new_map)
self.dwrap.vfc_mappings = mappings
self.assertEqual(len(self.dwrap.vfc_mappings), orig_size)
def test_bld_vfc_mapping(self):
mapping = vios.VFCMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', 'fcs0', ['aa', 'bb'])
self.assertIsNotNone(mapping)
# Validate the FC Backing port
self.assertIsNotNone(mapping.backing_port)
# Validate the Server Adapter
self.assertIsNotNone(mapping.server_adapter)
# Validate the Client Adapter
self.assertIsNotNone(mapping.client_adapter)
self.assertEqual(['AA', 'BB'], mapping.client_adapter.wwpns)
def test_bld_vfc_mapping_with_slot(self):
mapping = vios.VFCMapping.bld(self.adpt, 'host_uuid',
'client_lpar_uuid', 'fcs0',
client_wwpns=['aa', 'bb'],
lpar_slot_num=3)
self.assertIsNotNone(mapping)
# Validate the FC Backing port
self.assertIsNotNone(mapping.backing_port)
# Validate the Server Adapter
self.assertIsNotNone(mapping.server_adapter)
# Validate the Client Adapter
self.assertIsNotNone(mapping.client_adapter)
self.assertEqual(['AA', 'BB'], mapping.client_adapter.wwpns)
# verify the slot number
self.assertEqual(3, mapping.client_adapter.lpar_slot_num)
# Assert that we set this to False when specifying the slot number
self.assertFalse(mapping.client_adapter._get_val_bool(
'UseNextAvailableSlotID'))
def test_bld_scsi_mapping_from_existing(self):
def map_has_pieces(smap, lpar_href=True, client_adapter=True,
server_adapter=True, storage=True,
target_device=True):
def has_piece(piece, has_it):
if has_it:
self.assertIsNotNone(piece)
else:
self.assertIsNone(piece)
has_piece(smap.client_lpar_href, lpar_href)
has_piece(smap.client_adapter, client_adapter)
has_piece(smap.server_adapter, server_adapter)
has_piece(smap.backing_storage, storage)
has_piece(smap.element.find('TargetDevice'), target_device)
stg = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name')
smaps = self.dwrap.scsi_mappings
# 0 has only ServerAdapter
sm = smaps[0]
map_has_pieces(sm, lpar_href=False, client_adapter=False,
storage=False, target_device=False)
smclone = vios.VSCSIMapping.bld_from_existing(sm, stg)
map_has_pieces(smclone, lpar_href=False, client_adapter=False,
target_device=False)
self.assertEqual(stg, smclone.backing_storage)
# 1 has ServerAdapter, Storage, and TargetDevice
sm = smaps[1]
map_has_pieces(sm, lpar_href=False, client_adapter=False)
self.assertNotEqual(stg, sm.backing_storage)
smclone = vios.VSCSIMapping.bld_from_existing(sm, stg)
# Target device *disappears*
map_has_pieces(smclone, lpar_href=False, client_adapter=False,
target_device=False)
self.assertEqual(stg, smclone.backing_storage)
# 3 has AssociatedLogicalPartition, ClientAdapter, ServerAdapter.
sm = smaps[3]
map_has_pieces(sm, storage=False, target_device=False)
smclone = vios.VSCSIMapping.bld_from_existing(sm, stg)
map_has_pieces(smclone, target_device=False)
self.assertEqual(stg, smclone.backing_storage)
# 12 has everything
sm = smaps[12]
map_has_pieces(sm)
self.assertNotEqual(stg, sm.backing_storage)
smclone = vios.VSCSIMapping.bld_from_existing(sm, stg)
# Target device *disappears*
map_has_pieces(smclone, target_device=False)
self.assertEqual(stg, smclone.backing_storage)
# Everything else cloned okay
self.assertEqual(sm.client_lpar_href, smclone.client_lpar_href)
self.assertEqual(sm.client_adapter, smclone.client_adapter)
self.assertEqual(sm.server_adapter, smclone.server_adapter)
class TestCrtRelatedHref(unittest.TestCase):
@mock.patch('pypowervm.adapter.Session')
def test_crt_related_href(self, mock_sess):
"""Tests to make sure that related elements are well formed."""
# Test with host_uuid defined
mock_sess.dest = 'root'
adapter = adpt.Adapter(mock_sess)
href = vios.VStorageMapping.crt_related_href(adapter, 'host', 'lpar')
self.assertEqual('root/rest/api/uom/ManagedSystem/host/'
'LogicalPartition/lpar', href)
# Test with host_uuid = None
href = vios.VStorageMapping.crt_related_href(adapter, None, 'lpar')
self.assertEqual('root/rest/api/uom/LogicalPartition/lpar', href)
class TestVSCSIBus(twrap.TestWrapper):
file = 'vscsibus_feed.txt'
wrapper_class_to_test = vios.VSCSIBus
def test_props(self):
self.assertEqual(4, len(self.entries))
bus = self.dwrap
self.assertEqual('1f25efc1-a42b-3384-85e7-f37158f46615', bus.uuid)
self.assertEqual(
'http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-3'
'42c-9f43-ddfeb9f8edd3/LogicalPartition/3DFF2EF5-6F99-4C29-B655-EE'
'57DF1B64C6', bus.client_lpar_href)
self.assertEqual(5, bus.client_adapter.lpar_id)
self.assertEqual(2, bus.client_adapter.lpar_slot_num)
self.assertEqual(5, bus.server_adapter.lpar_id)
self.assertEqual(2, bus.server_adapter.lpar_slot_num)
map1, map2 = bus.mappings
self.assertIsInstance(map1.backing_storage, pvm_stor.PV)
self.assertEqual('hdisk10', map1.backing_storage.name)
self.assertIsInstance(map1.target_dev, pvm_stor.PVTargetDev)
self.assertEqual('0x8100000000000000', map1.target_dev.lua)
self.assertIsInstance(map2.backing_storage, pvm_stor.VOptMedia)
self.assertEqual('cfg_My_OS_Image_V_3dff2ef5_000000.iso',
map2.backing_storage.name)
self.assertIsInstance(map2.target_dev, pvm_stor.VOptTargetDev)
self.assertEqual('0x8200000000000000', map2.target_dev.lua)
def test_bld(self):
self.adpt.build_href.return_value = 'href'
# Default slot number (use next available)
bus = vios.VSCSIBus.bld(self.adpt, 'client_lpar_uuid')
self.adpt.build_href.assert_called_once_with(
'LogicalPartition', 'client_lpar_uuid', xag=[])
self.assertEqual('href', bus.client_lpar_href)
self.assertTrue(bus.client_adapter._get_val_bool(
pvm_stor._VADPT_NEXT_SLOT))
self.assertIsNotNone(bus.server_adapter)
self.assertEqual([], bus.mappings)
# Specify slot number
bus = vios.VSCSIBus.bld(self.adpt, 'client_lpar_uuid',
lpar_slot_num=42)
self.assertFalse(bus.client_adapter._get_val_bool(
pvm_stor._VADPT_NEXT_SLOT))
self.assertEqual(42, bus.client_adapter.lpar_slot_num)
def test_bld_from_existing(self):
bus = vios.VSCSIBus.bld_from_existing(self.dwrap)
self.assertEqual(
'http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-3'
'42c-9f43-ddfeb9f8edd3/LogicalPartition/3DFF2EF5-6F99-4C29-B655-EE'
'57DF1B64C6', bus.client_lpar_href)
self.assertEqual(5, bus.client_adapter.lpar_id)
self.assertEqual(2, bus.client_adapter.lpar_slot_num)
self.assertEqual(5, bus.server_adapter.lpar_id)
self.assertEqual(2, bus.server_adapter.lpar_slot_num)
self.assertEqual([], bus.mappings)
def test_mappings(self):
# No LUA
lu1 = pvm_stor.LU.bld_ref(self.adpt, 'lu1', 'lu_udid1')
std1 = vios.STDev.bld(self.adpt, lu1)
self.assertIsInstance(std1.backing_storage, pvm_stor.LU)
self.assertEqual('lu1', std1.backing_storage.name)
self.assertIsNone(std1.target_dev)
# With LUA
vdisk1 = pvm_stor.VDisk.bld_ref(self.adpt, 'vdisk1')
std2 = vios.STDev.bld(self.adpt, vdisk1, lua='vdisk1_lua')
self.assertIsInstance(std2.backing_storage, pvm_stor.VDisk)
self.assertEqual('vdisk1', std2.backing_storage.name)
self.assertEqual('LogicalVolume', std2.backing_storage.vdtype)
self.assertIsInstance(std2.target_dev, pvm_stor.VDiskTargetDev)
self.assertEqual('vdisk1_lua', std2.target_dev.lua)
# Add 'em to a bus
bus = self.dwrap
self.assertEqual(2, len(bus.mappings))
bus.mappings.extend((std1, std2))
self.assertEqual(4, len(bus.mappings))
self.assertEqual('lu1', bus.mappings[2].backing_storage.name)
self.assertEqual('vdisk1', bus.mappings[3].backing_storage.name)
# Replace bus mappings
bus.mappings = [std2, std1]
self.assertEqual(2, len(bus.mappings))
self.assertEqual('vdisk1', bus.mappings[0].backing_storage.name)
self.assertEqual('lu1', bus.mappings[1].backing_storage.name)
class TestPartitionIOConfiguration(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestPartitionIOConfiguration, self).setUp()
self.io_config = self.dwrap.io_config
def test_max_slots(self):
self.assertEqual(80, self.io_config.max_virtual_slots)
def test_io_slots(self):
# IO Slots are typically associated with the VIOS. Further testing
# driven there.
self.assertIsNotNone(self.io_config.io_slots)
self.assertEqual(3, len(self.io_config.io_slots))
class TestIOSlots(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestIOSlots, self).setUp()
self.io_slot = self.dwrap.io_config.io_slots[0]
def test_attrs(self):
self.assertEqual('PCI-E SAS Controller', self.io_slot.description)
self.assertEqual('U78AB.001.WZSJBM3', self.io_slot.phys_loc)
self.assertEqual(825, self.io_slot.pc_adpt_id)
self.assertEqual(260, self.io_slot.pci_class)
self.assertEqual(825, self.io_slot.pci_dev_id)
self.assertEqual(826, self.io_slot.pci_subsys_dev_id)
self.assertEqual(4116, self.io_slot.pci_mfg_id)
self.assertEqual(1, self.io_slot.pci_rev_id)
self.assertEqual(4116, self.io_slot.pci_vendor_id)
self.assertEqual(4116, self.io_slot.pci_subsys_vendor_id)
self.assertEqual(553713674, self.io_slot.drc_index)
self.assertEqual('U78AB.001.WZSJBM3-P1-T9',
self.io_slot.drc_name)
self.assertEqual(False, self.io_slot.bus_grp_required)
self.assertEqual(False, self.io_slot.required)
def test_io_slots_setter(self):
old_len = len(self.dwrap.io_config.io_slots)
new_io_slots = self.dwrap.io_config.io_slots[:]
deleted_slot = new_io_slots[1]
del new_io_slots[1]
self.dwrap.io_config.io_slots = new_io_slots
self.assertEqual(old_len - 1, len(self.dwrap.io_config.io_slots))
self.assertNotIn(deleted_slot, self.dwrap.io_config.io_slots)
@mock.patch('warnings.warn')
def test_io_adpt(self, mock_warn):
self.assertEqual('553713674', self.io_slot.io_adapter.id)
# Verify deprecation warning on IOSlot.adapter
self.assertEqual('553713674', self.io_slot.adapter.id)
mock_warn.assert_called_with(mock.ANY, DeprecationWarning)
def test_bld(self):
new_slot = bp.IOSlot.bld(self.adpt, True, 12345678)
self.assertEqual(False, new_slot.required)
self.assertEqual(True, new_slot.bus_grp_required)
self.assertEqual(12345678, new_slot.drc_index)
class TestGenericIOAdapter(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestGenericIOAdapter, self).setUp()
self.io_adpt = self.dwrap.io_config.io_slots[0].io_adapter
def test_attrs(self):
self.assertEqual('553713674', self.io_adpt.id)
self.assertEqual('PCI-E SAS Controller', self.io_adpt.description)
self.assertEqual('U78AB.001.WZSJBM3-P1-T9',
self.io_adpt.dev_name)
self.assertEqual('U78AB.001.WZSJBM3-P1-T9',
self.io_adpt.drc_name)
self.assertEqual('T9', self.io_adpt.phys_loc_code)
self.assertFalse(isinstance(self.io_adpt, bp.PhysFCAdapter))
class TestPhysFCAdapter(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestPhysFCAdapter, self).setUp()
self.io_adpt = self.dwrap.io_config.io_slots[2].io_adapter
def test_attrs(self):
desc = '8 Gigabit PCI Express Dual Port Fibre Channel Adapter'
self.assertEqual('553714177', self.io_adpt.id)
self.assertEqual(desc, self.io_adpt.description)
self.assertEqual('U78AB.001.WZSJBM3-P1-C2', self.io_adpt.dev_name)
self.assertEqual('U78AB.001.WZSJBM3-P1-C2',
self.io_adpt.drc_name)
self.assertEqual('C2', self.io_adpt.phys_loc_code)
self.assertIsInstance(self.io_adpt, bp.PhysFCAdapter)
def test_fc_ports(self):
self.assertEqual(2, len(self.io_adpt.fc_ports))
class TestPhysFCPort(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestPhysFCPort, self).setUp()
self.io_port1 = self.dwrap.io_config.io_slots[2].io_adapter.fc_ports[0]
self.io_port2 = self.dwrap.io_config.io_slots[2].io_adapter.fc_ports[1]
def test_attrs(self):
self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T2', self.io_port1.loc_code)
self.assertEqual('fcs1', self.io_port1.name)
self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T2', self.io_port1.udid)
self.assertEqual('10000090FA1B6303', self.io_port1.wwpn)
self.assertEqual(0, self.io_port1.npiv_available_ports)
self.assertEqual(0, self.io_port1.npiv_total_ports)
self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T1', self.io_port2.loc_code)
self.assertEqual('fcs0', self.io_port2.name)
self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T1', self.io_port2.udid)
self.assertEqual('10000090FA1B6302', self.io_port2.wwpn)
self.assertEqual(64, self.io_port2.npiv_available_ports)
self.assertEqual(64, self.io_port2.npiv_total_ports)
class TestIOAdapterChoices(twrap.TestWrapper):
file = 'fake_vios_ssp_npiv.txt'
wrapper_class_to_test = vios.VIOS
def setUp(self):
super(TestIOAdapterChoices, self).setUp()
self.io_adpts = self.dwrap.io_adpts_for_link_agg
def test_adapter_choices(self):
self.assertEqual(len(self.io_adpts), 3)
self.assertEqual(self.io_adpts[0].id, '1')
self.assertEqual(
self.io_adpts[0].description,
'4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004)')
self.assertEqual(self.io_adpts[0].dev_name, 'ent3')
self.assertEqual(self.io_adpts[0].dev_type, 'physicalEthernetAdpter')
self.assertEqual(self.io_adpts[0].drc_name,
'U78AB.001.WZSJBM3-P1-C7-T4')
self.assertEqual(self.io_adpts[0].phys_loc_code,
'U78AB.001.WZSJBM3-P1-C7-T4')
self.assertEqual(self.io_adpts[0].udid,
'13U78AB.001.WZSJBM3-P1-C7-T4')
class TestFeed3(twrap.TestWrapper):
"""Tests that specifically need fake_vios_feed3.txt"""
file = 'fake_vios_feed3.txt'
wrapper_class_to_test = vios.VIOS
def test_vivify_io_adpts_for_link_agg(self):
"""Vivifying FreeIOAdaptersForLinkAggregation adds the Network xag."""
# The first VIOS doesn't have FreeIOAdapters...
vwrp = self.dwrap
self.assertIsNone(vwrp._find(vios._VIO_FREE_IO_ADPTS_FOR_LNAGG))
# Vivify it - should be empty
self.assertEqual([], vwrp.io_adpts_for_link_agg)
# Now it's in there
elem = vwrp._find(vios._VIO_FREE_IO_ADPTS_FOR_LNAGG)
self.assertIsNotNone(elem)
# Got the right xag
self.assertEqual(c.XAG.VIO_NET, elem.attrib['group'])
@mock.patch('warnings.warn')
def test_xags(self, mock_warn):
"""Test deprecated extented attribute groups on the VIOS class.
This can be removed once VIOS.xags is removed.
"""
expected = dict(NETWORK=c.XAG.VIO_NET, STORAGE=c.XAG.VIO_STOR,
SCSI_MAPPING=c.XAG.VIO_SMAP, FC_MAPPING=c.XAG.VIO_FMAP)
for key, val in expected.items():
# Test class accessor, ensure '.name' works.
self.assertEqual(val, getattr(vios.VIOS.xags, key).name)
mock_warn.assert_called_with(mock.ANY, DeprecationWarning)
mock_warn.reset_mock()
# Test instance accessor.
self.assertEqual(val, getattr(self.dwrap.xags, key))
mock_warn.assert_called_with(mock.ANY, DeprecationWarning)
mock_warn.reset_mock()
# And in case getattr(foo, 'bar') actually differs from foo.bar...
self.assertEqual(c.XAG.VIO_NET, vios.VIOS.xags.NETWORK)
mock_warn.assert_called_with(mock.ANY, DeprecationWarning)
mock_warn.reset_mock()
# Make sure the equality comparison works the other way
self.assertEqual(self.dwrap.xags.NETWORK, c.XAG.VIO_NET)
mock_warn.assert_called_with(mock.ANY, DeprecationWarning)
# Test sorting
self.assertTrue(c.XAG.VIO_NET < self.dwrap.xags.SCSI_MAPPING)
self.assertTrue(self.dwrap.xags.NETWORK < c.XAG.VIO_SMAP)
if __name__ == "__main__":
unittest.main()
| |
from parso.python import tree
from parso.python.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, tok_name, NAME)
from parso.parser import BaseParser
from parso.pgen2.parse import token_to_ilabel
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'comp_for': tree.CompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'list_for': tree.CompFor,
# Same here. This just exists in Python 2.6.
'gen_for': tree.CompFor,
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'old_lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
}
default_node = tree.PythonNode
def __init__(self, pgen_grammar, error_recovery=True, start_symbol='file_input'):
super(Parser, self).__init__(pgen_grammar, start_symbol, error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
# TODO do print absolute import detection here.
# try:
# del python_grammar_no_print_statement.keywords["print"]
# except KeyError:
# pass # Doesn't exist in the Python 3 grammar.
# if self.options["print_function"]:
# python_grammar = pygram.python_grammar_no_print_statement
# else:
def parse(self, tokens):
if self._error_recovery:
if self._start_symbol != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
node = super(Parser, self).parse(tokens)
if self._start_symbol == 'file_input' != node.type:
# If there's only one statement, we get back a non-module. That's
# not what we want, we want a module, so we add it here:
node = self.convert_node(
self._pgen_grammar,
self._pgen_grammar.symbol2number['file_input'],
[node]
)
return node
def convert_node(self, pgen_grammar, type, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
# TODO REMOVE symbol, we don't want type here.
symbol = pgen_grammar.number2symbol[type]
try:
return self.node_map[symbol](children)
except KeyError:
if symbol == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
elif symbol == 'list_if':
# Make transitioning from 2 to 3 easier.
symbol = 'comp_if'
elif symbol == 'listmaker':
# Same as list_if above.
symbol = 'testlist_comp'
return self.default_node(symbol, children)
def convert_leaf(self, pgen_grammar, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in pgen_grammar.keywords:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
elif type == STRING:
return tree.String(value, start_pos, prefix)
elif type == NUMBER:
return tree.Number(value, start_pos, prefix)
elif type == NEWLINE:
return tree.Newline(value, start_pos, prefix)
elif type == ENDMARKER:
return tree.EndMarker(value, start_pos, prefix)
else:
return tree.Operator(value, start_pos, prefix)
def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix,
add_token_callback):
def get_symbol_and_nodes(stack):
for dfa, state, (type_, nodes) in stack:
symbol = pgen_grammar.number2symbol[type_]
yield symbol, nodes
tos_nodes = stack.get_tos_nodes()
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_symbol == 'file_input' and \
(typ == ENDMARKER or typ == DEDENT and '\n' not in last_leaf.value):
def reduce_stack(states, newstate):
# reduce
state = newstate
while states[state] == [(0, state)]:
self.pgen_parser._pop()
dfa, state, (type_, nodes) = stack[-1]
states, first = dfa
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python ) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
#print('x', pprint.pprint(stack))
ilabel = token_to_ilabel(pgen_grammar, NEWLINE, value)
dfa, state, (type_, nodes) = stack[-1]
symbol = pgen_grammar.number2symbol[type_]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
if ilabel == i:
if symbol == 'simple_stmt':
# This is basically shifting
stack[-1] = (dfa, newstate, (type_, nodes))
reduce_stack(states, newstate)
add_token_callback(typ, value, start_pos, prefix)
return
# Check if we're at the right point
#for symbol, nodes in get_symbol_and_nodes(stack):
# self.pgen_parser._pop()
#break
break
#symbol = pgen_grammar.number2symbol[type_]
if not self._error_recovery:
return super(Parser, self).error_recovery(
pgen_grammar, stack, arcs, typ, value, start_pos, prefix,
add_token_callback)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for index, (symbol, nodes) in reversed(list(enumerate(get_symbol_and_nodes(stack)))):
# `suite` can sometimes be only simple_stmt, not stmt.
if symbol == 'file_input':
break
elif symbol == 'suite' and len(nodes) > 1:
# suites without an indent in them get discarded.
break
return index, symbol, nodes
index, symbol, nodes = current_suite(stack)
# print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index)
if self._stack_removal(pgen_grammar, stack, arcs, index + 1, value, start_pos):
add_token_callback(typ, value, start_pos, prefix)
else:
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(tok_name[typ].lower(), value, start_pos, prefix)
stack[-1][2][1].append(error_leaf)
if symbol == 'suite':
dfa, state, node = stack[-1]
states, first = dfa
arcs = states[state]
intended_label = pgen_grammar.symbol2label['stmt']
# Introduce a proper state transition. We're basically allowing
# there to be no valid statements inside a suite.
if [x[0] for x in arcs] == [intended_label]:
new_state = arcs[0][1]
stack[-1] = dfa, new_state, node
def _stack_removal(self, pgen_grammar, stack, arcs, start_index, value, start_pos):
failed_stack = False
found = False
all_nodes = []
for dfa, state, (type_, nodes) in stack[start_index:]:
if nodes:
found = True
if found:
failed_stack = True
all_nodes += nodes
if failed_stack:
stack[start_index - 1][2][1].append(tree.PythonErrorNode(all_nodes))
stack[start_index:] = []
return failed_stack
def _recovery_tokenize(self, tokens):
for typ, value, start_pos, prefix in tokens:
# print(tok_name[typ], repr(value), start_pos, repr(prefix))
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield typ, value, start_pos, prefix
| |
# Copyright 2013
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from django import template
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import tables as rrtbl
class RouterRulesTab(tabs.TableTab):
table_classes = (rrtbl.RouterRulesTable,)
name = _("Router Rules")
slug = "routerrules"
template_name = "horizon/common/_detail_table.html"
def allowed(self, request):
try:
getattr(self.tab_group.kwargs['router'], 'router_rules')
return True
except Exception:
return False
def get_routerrules_data(self):
try:
routerrules = getattr(self.tab_group.kwargs['router'],
'router_rules')
except Exception:
routerrules = []
return [rulemanager.RuleObject(r) for r in routerrules]
def post(self, request, *args, **kwargs):
if request.POST['action'] == 'routerrules__resetrules':
kwargs['reset_rules'] = True
rulemanager.remove_rules(request, [], **kwargs)
self.tab_group.kwargs['router'] = \
api.neutron.router_get(request, kwargs['router_id'])
class RulesGridTab(tabs.Tab):
name = _("Router Rules Grid")
slug = "rulesgrid"
template_name = ("project/routers/extensions/routerrules/grid.html")
def allowed(self, request):
try:
getattr(self.tab_group.kwargs['router'], 'router_rules')
return True
except Exception:
return False
def render(self):
context = template.RequestContext(self.request)
return render_to_string(self.get_template_name(self.request),
self.data, context_instance=context)
def get_context_data(self, request, **kwargs):
data = {'router': {'id':
self.tab_group.kwargs['router_id']}}
self.request = request
rules, supported = self.get_routerrules_data(checksupport=True)
if supported:
data["rulesmatrix"] = self.get_routerrulesgrid_data(rules)
return data
def get_routerrulesgrid_data(self, rules):
ports = self.tab_group.kwargs['ports']
networks = api.neutron.network_list_for_tenant(
self.request, self.request.user.tenant_id)
netnamemap = {}
subnetmap = {}
for n in networks:
netnamemap[n['id']] = n.name_or_id
for s in n.subnets:
subnetmap[s.id] = {'name': s.name,
'cidr': s.cidr}
matrix = []
subnets = []
for port in ports:
for ip in port['fixed_ips']:
if ip['subnet_id'] not in subnetmap:
continue
sub = {'ip': ip['ip_address'],
'subnetid': ip['subnet_id'],
'subnetname': subnetmap[ip['subnet_id']]['name'],
'networkid': port['network_id'],
'networkname': netnamemap[port['network_id']],
'cidr': subnetmap[ip['subnet_id']]['cidr']}
subnets.append(sub)
subnets.append({'ip': '0.0.0.0',
'subnetid': 'external',
'subnetname': '',
'networkname': 'external',
'networkid': 'external',
'cidr': '0.0.0.0/0'})
subnets.append({'ip': '0.0.0.0',
'subnetid': 'any',
'subnetname': '',
'networkname': 'any',
'networkid': 'any',
'cidr': '0.0.0.0/0'})
for source in subnets:
row = {'source': dict(source),
'targets': []}
for target in subnets:
target.update(self._get_subnet_connectivity(
source, target, rules))
row['targets'].append(dict(target))
matrix.append(row)
return matrix
def _get_subnet_connectivity(self, src_sub, dst_sub, rules):
v4_any_words = ['external', 'any']
connectivity = {'reachable': '',
'inverse_rule': {},
'rule_to_delete': False}
src = src_sub['cidr']
dst = dst_sub['cidr']
# differentiate between external and any
src_rulename = src_sub['subnetid'] if src == '0.0.0.0/0' else src
dst_rulename = dst_sub['subnetid'] if dst == '0.0.0.0/0' else dst
if str(src) == str(dst):
connectivity['reachable'] = 'full'
return connectivity
matchingrules = []
for rule in rules:
rd = rule['destination']
if rule['destination'] in v4_any_words:
rd = {'cidr': '0.0.0.0/0'}
rs = rule['source']
if rule['source'] in v4_any_words:
rs = {'cidr': '0.0.0.0/0'}
rs = netaddr.IPNetwork(rs['cidr'])
src = netaddr.IPNetwork(src)
rd = netaddr.IPNetwork(rd['cidr'])
dst = netaddr.IPNetwork(dst)
# check if cidrs are affected by rule first
if (int(dst.network) >= int(rd.broadcast) or
int(dst.broadcast) <= int(rd.network) or
int(src.network) >= int(rs.broadcast) or
int(src.broadcast) <= int(rs.network)):
continue
# skip matching rules for 'any' and 'external' networks
if (str(dst) == '0.0.0.0/0' and str(rd) != '0.0.0.0/0'):
continue
if (str(src) == '0.0.0.0/0' and str(rs) != '0.0.0.0/0'):
continue
# external network rules only affect external traffic
if (rule['source'] == 'external' and
src_rulename not in v4_any_words):
continue
if (rule['destination'] == 'external' and
dst_rulename not in v4_any_words):
continue
match = {'bitsinsrc': rs.prefixlen,
'bitsindst': rd.prefixlen,
'rule': rule}
matchingrules.append(match)
if not matchingrules:
connectivity['reachable'] = 'none'
connectivity['inverse_rule'] = {'source': src_rulename,
'destination': dst_rulename,
'action': 'permit'}
return connectivity
sortedrules = sorted(matchingrules,
key=lambda k: (k['bitsinsrc'], k['bitsindst']),
reverse=True)
match = sortedrules[0]
if (match['bitsinsrc'] > src.prefixlen or
match['bitsindst'] > dst.prefixlen):
connectivity['reachable'] = 'partial'
connectivity['conflicting_rule'] = match['rule']
return connectivity
if (match['rule']['source'] == src_rulename and
match['rule']['destination'] == dst_rulename):
connectivity['rule_to_delete'] = match['rule']
if match['rule']['action'] == 'permit':
connectivity['reachable'] = 'full'
inverseaction = 'deny'
else:
connectivity['reachable'] = 'none'
inverseaction = 'permit'
connectivity['inverse_rule'] = {'source': src_rulename,
'destination': dst_rulename,
'action': inverseaction}
return connectivity
def get_routerrules_data(self, checksupport=False):
try:
routerrules = getattr(self.tab_group.kwargs['router'],
'router_rules')
supported = True
except Exception:
routerrules = []
supported = False
if checksupport:
return routerrules, supported
return routerrules
| |
import contextlib
import itertools
from sqlalchemy import bindparam
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy import testing
from sqlalchemy.ext import baked
from sqlalchemy.orm import aliased
from sqlalchemy.orm import backref
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import Load
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.query import Query
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import CompiledSQL
from test.orm import _fixtures
class BakedTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
def setup(self):
self.bakery = baked.bakery()
class StateChangeTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def _assert_cache_key(self, key, elements):
eq_(key, tuple(elem.__code__ for elem in elements))
def test_initial_key(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
def test_inplace_add(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
q2 = q1.add_criteria(l2)
is_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1, l2])
eq_(q1.steps, [l1, l2])
def test_inplace_add_operator(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
q1 += l2
self._assert_cache_key(q1._cache_key, [l1, l2])
def test_chained_add(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1.with_criteria(l2)
is_not_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
def test_chained_add_operator(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1 + l2
is_not_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
class LikeQueryTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def test_first_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(Session()).first(), None)
def test_first_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
bq += lambda q: q.filter(User.name.like("%ed%")).order_by(User.id)
eq_(bq(Session()).first(), (8,))
def test_one_or_none_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(Session()).one_or_none(), None)
def test_one_or_none_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(Session()).one_or_none()
eq_(u1.name, "ed")
def test_one_or_none_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one_or_none()",
bq(Session()).one_or_none,
)
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
assert_raises_message(
orm_exc.NoResultFound,
"No row was found for one()",
bq(Session()).one,
)
def test_one_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(Session()).one()
eq_(u1.name, "ed")
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one()",
bq(Session()).one,
)
def test_get(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
def go():
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(User).get(7) # noqa
def go():
u2 = bq(sess).get(7)
eq_(u2.name, "jack")
self.assert_sql_count(testing.db, go, 0)
def go():
u2 = bq(sess).get(8)
eq_(u2.name, "ed")
self.assert_sql_count(testing.db, go, 1)
def test_scalar(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
sess = Session()
bq += lambda q: q.filter(User.id == 7)
eq_(bq(sess).scalar(), 7)
def test_count(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.id.in_([8, 9]))
eq_(bq(sess).count(), 2)
# original query still works
eq_(
set([(u.id, u.name) for u in bq(sess).all()]),
set([(8, "ed"), (9, "fred")]),
)
def test_count_with_bindparams(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.name == bindparam("uname"))
# calling with *args
eq_(bq(sess).params(uname="fred").count(), 1)
# with multiple params, the **kwargs will be used
bq += lambda q: q.filter(User.id == bindparam("anid"))
eq_(bq(sess).params(uname="fred", anid=9).count(), 1)
eq_(
# wrong id, so 0 results:
bq(sess).params(uname="fred", anid=8).count(),
0,
)
def test_get_pk_w_null(self):
"""test the re-implementation of logic to do get with IS NULL."""
class AddressUser(object):
pass
mapper(
AddressUser,
self.tables.users.outerjoin(self.tables.addresses),
properties={
"id": self.tables.users.c.id,
"address_id": self.tables.addresses.c.id,
},
)
bq = self.bakery(lambda s: s.query(AddressUser))
sess = Session()
def go():
u1 = bq(sess).get((10, None))
eq_(u1.name, "chuck")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(AddressUser).get((10, None)) # noqa
def go():
u2 = bq(sess).get((10, None))
eq_(u2.name, "chuck")
self.assert_sql_count(testing.db, go, 0)
def test_get_includes_getclause(self):
# test issue #3597
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 2)
# simulate race where mapper._get_clause
# may be generated more than once
from sqlalchemy import inspect
del inspect(User).__dict__["_get_clause"]
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 4)
class ResultPostCriteriaTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
mapper(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
mapper(Address, cls.tables.addresses)
mapper(Order, cls.tables.orders)
@contextlib.contextmanager
def _fixture(self):
from sqlalchemy import event
User = self.classes.User
with testing.db.connect() as conn:
@event.listens_for(conn, "before_execute")
def before_execute(conn, clauseelement, multiparams, params):
assert "yes" in conn._execution_options
bq = self.bakery(lambda s: s.query(User.id).order_by(User.id))
sess = Session(conn)
yield sess, bq
def test_first(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.first(), (7,))
def test_iter(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_spoiled(self):
with self._fixture() as (sess, bq):
result = bq.spoil()(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_get(self):
User = self.classes.User
with self._fixture() as (sess, bq):
bq = self.bakery(lambda s: s.query(User))
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.get(7), User(id=7))
class ResultTest(BakedTest):
__backend__ = True
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
mapper(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
mapper(Address, cls.tables.addresses)
mapper(Order, cls.tables.orders)
def test_cachekeys_on_constructor(self):
User = self.classes.User
queue = [7, 8]
def fn(s):
return s.query(User.id).filter_by(id=queue.pop(0))
bq1 = self.bakery(fn, 7)
bq2 = self.bakery(fn, 8)
for i in range(3):
session = Session(autocommit=True)
eq_(bq1(session).all(), [(7,)])
eq_(bq2(session).all(), [(8,)])
def test_no_steps(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
for i in range(3):
session = Session(autocommit=True)
eq_(
bq(session).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_different_limits(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
bq += lambda q: q.limit(bindparam("limit")).offset(bindparam("offset"))
session = Session(autocommit=True)
for i in range(4):
for limit, offset, exp in [
(2, 1, [(8, "ed"), (9, "fred")]),
(3, 0, [(7, "jack"), (8, "ed"), (9, "fred")]),
(1, 2, [(9, "fred")]),
]:
eq_(bq(session).params(limit=limit, offset=offset).all(), exp)
def test_disable_on_session(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True, enable_baked_queries=False)
eq_(bq.add_criteria(fn3)(sess).params(id=7).all(), [(7, "jack")])
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_full_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil(full=True).add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_half_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
bq = self.bakery(fn1)
bq += fn2
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil().add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn3(),
mock.call.fn3(),
],
)
def test_w_new_entities(self):
"""Test that the query can have its entities modified in
an arbitrary callable, and that this new entity list is preserved
when the query is invoked.
"""
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id, User.name))
bq += lambda q: q.from_self().with_entities(func.count(User.id))
for i in range(3):
session = Session(autocommit=True)
eq_(bq(session).all(), [(4,)])
def test_conditional_step(self):
"""Test a large series of conditionals and assert that
results remain correct between all of them within a series
of loops.
"""
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1, cond2, cond3, cond4 in itertools.product(
*[(False, True) for j in range(4)]
):
bq = base_bq._clone()
if cond1:
bq += lambda q: q.filter(User.name != "jack")
if cond2:
bq += lambda q: q.join(User.addresses)
else:
bq += lambda q: q.outerjoin(User.addresses)
elif cond3:
bq += lambda q: q.filter(User.name.like("%ed%"))
else:
bq += lambda q: q.filter(User.name == "jack")
if cond4:
bq += lambda q: q.from_self().with_entities(
func.count(User.id)
)
sess = Session(autocommit=True)
result = bq(sess).all()
if cond4:
if cond1:
if cond2:
eq_(result, [(4,)])
else:
eq_(result, [(5,)])
elif cond3:
eq_(result, [(2,)])
else:
eq_(result, [(1,)])
else:
if cond1:
if cond2:
eq_(
result,
[(8, "ed"), (8, "ed"), (8, "ed"), (9, "fred")],
)
else:
eq_(
result,
[
(8, "ed"),
(8, "ed"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
],
)
elif cond3:
eq_(result, [(8, "ed"), (9, "fred")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_conditional_step_oneline(self):
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
# we were using (filename, firstlineno) as cache key,
# which fails for this kind of thing!
bq += (
(lambda q: q.filter(User.name != "jack"))
if cond1
else (lambda q: q.filter(User.name == "jack"))
) # noqa
sess = Session(autocommit=True)
result = bq(sess).all()
if cond1:
eq_(result, [(8, u"ed"), (9, u"fred"), (10, u"chuck")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_to_query_query(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += (
lambda q: q.filter(User.id == Address.user_id)
.filter(User.name == "ed")
.correlate(Address)
)
main_bq = self.bakery(lambda s: s.query(Address.id))
main_bq += lambda q: q.filter(sub_bq.to_query(q).exists())
main_bq += lambda q: q.order_by(Address.id)
sess = Session()
result = main_bq(sess).all()
eq_(result, [(2,), (3,), (4,)])
def test_to_query_session(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(
Address
)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(s).scalar_subquery())
)
main_bq += lambda q: q.filter(
sub_bq.to_query(q).scalar_subquery() == "ed"
)
main_bq += lambda q: q.order_by(Address.id)
sess = Session()
result = main_bq(sess).all()
eq_(result, [(2, "ed"), (3, "ed"), (4, "ed")])
def test_to_query_args(self):
User = self.classes.User
sub_bq = self.bakery(lambda s: s.query(User.name))
q = Query([], None)
assert_raises_message(
sa_exc.ArgumentError,
"Given Query needs to be associated with a Session",
sub_bq.to_query,
q,
)
assert_raises_message(
TypeError,
"Query or Session object expected, got .*'int'.*",
sub_bq.to_query,
5,
)
def test_subquery_eagerloading(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
# Override the default bakery for one with a smaller size. This used to
# trigger a bug when unbaking subqueries.
self.bakery = baked.bakery(size=3)
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.options(
subqueryload(User.addresses), subqueryload(User.orders)
)
base_bq += lambda q: q.order_by(User.id)
assert_result = [
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
orders=[Order(id=1), Order(id=3), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(
id=9,
addresses=[Address(id=5)],
orders=[Order(id=2), Order(id=4)],
),
User(id=10, addresses=[]),
]
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]
):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == "jack")
else:
bq += lambda q: q.filter(User.name.like("%ed%"))
if cond2:
ct = func.count(Address.id).label("count")
subq = (
sess.query(ct, Address.user_id)
.group_by(Address.user_id)
.having(ct > 2)
.subquery()
)
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 3)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 3)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_subqueryload_post_context(self):
User = self.classes.User
Address = self.classes.Address
assert_result = [
User(
id=7, addresses=[Address(id=1, email_address="jack@bean.com")]
)
]
self.bakery = baked.bakery(size=3)
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.options(subqueryload(User.addresses))
bq += lambda q: q.order_by(User.id)
bq += lambda q: q.filter(User.name == bindparam("name"))
sess = Session()
def set_params(q):
return q.params(name="jack")
# test that the changes we make using with_post_criteria()
# are also applied to the subqueryload query.
def go():
result = bq(sess).with_post_criteria(set_params).all()
eq_(assert_result, result)
self.assert_sql_count(testing.db, go, 2)
class LazyLoaderTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(Address, self.tables.addresses)
return User, Address
def _o2m_twolevel_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
Dingaling = self.classes.Dingaling
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(
Address,
self.tables.addresses,
properties={"dingalings": relationship(Dingaling, lazy=lazy)},
)
mapper(Dingaling, self.tables.dingalings)
return User, Address, Dingaling
def _m2o_fixture(self):
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={"user": relationship(User)},
)
return User, Address
def test_unsafe_unbound_option_cancels_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
class SubDingaling(Dingaling):
pass
mapper(SubDingaling, None, inherits=Dingaling)
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
defaultload(User.addresses).lazyload(
Address.dingalings.of_type(aliased(SubDingaling))
)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 1)
def test_unsafe_bound_option_cancels_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
class SubDingaling(Dingaling):
pass
mapper(SubDingaling, None, inherits=Dingaling)
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
Load(User)
.defaultload(User.addresses)
.lazyload(
Address.dingalings.of_type(aliased(SubDingaling))
)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 1)
def test_safe_unbound_option_allows_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
defaultload(User.addresses).lazyload(Address.dingalings)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 2)
def test_safe_bound_option_allows_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
Load(User)
.defaultload(User.addresses)
.lazyload(Address.dingalings)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 2)
def test_baked_lazy_loading_relationship_flag_true(self):
self._test_baked_lazy_loading_relationship_flag(True)
def test_baked_lazy_loading_relationship_flag_false(self):
self._test_baked_lazy_loading_relationship_flag(False)
def _test_baked_lazy_loading_relationship_flag(self, flag):
User, Address = self._o2m_fixture(bake_queries=flag)
sess = Session()
u1 = sess.query(User).first()
from sqlalchemy.orm import Query
canary = mock.Mock()
# I would think Mock can do this but apparently
# it cannot (wrap / autospec don't work together)
real_compile_context = Query._compile_context
def _my_compile_context(*arg, **kw):
if arg[0].column_descriptions[0]["entity"] is Address:
canary()
return real_compile_context(*arg, **kw)
with mock.patch.object(Query, "_compile_context", _my_compile_context):
u1.addresses
sess.expire(u1)
u1.addresses
if flag:
eq_(canary.call_count, 1)
else:
eq_(canary.call_count, 2)
def test_baked_lazy_loading_option_o2m(self):
User, Address = self._o2m_fixture()
self._test_baked_lazy_loading(set_option=True)
def test_baked_lazy_loading_mapped_o2m(self):
User, Address = self._o2m_fixture(lazy="baked_select")
self._test_baked_lazy_loading(set_option=False)
def _test_baked_lazy_loading(self, set_option):
User, Address = self.classes.User, self.classes.Address
base_bq = self.bakery(lambda s: s.query(User))
if set_option:
base_bq += lambda q: q.options(lazyload(User.addresses))
base_bq += lambda q: q.order_by(User.id)
assert_result = self.static.user_address_result
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]
):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == "jack")
else:
bq += lambda q: q.filter(User.name.like("%ed%"))
if cond2:
ct = func.count(Address.id).label("count")
subq = (
sess.query(ct, Address.user_id)
.group_by(Address.user_id)
.having(ct > 2)
.subquery()
)
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 2)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_baked_lazy_loading_m2o(self):
User, Address = self._m2o_fixture()
base_bq = self.bakery(lambda s: s.query(Address))
base_bq += lambda q: q.options(lazyload(Address.user))
base_bq += lambda q: q.order_by(Address.id)
assert_result = self.static.address_user_result
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(
Address.email_address == "jack@bean.com"
)
else:
bq += lambda q: q.filter(
Address.email_address.like("ed@%")
)
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:4], result)
self.assert_sql_count(testing.db, go, 2)
sess.close()
def test_useget_cancels_eager(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={
"user": relationship(
User,
lazy="joined",
backref=backref("addresses", lazy="baked_select"),
)
},
)
sess = Session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_useget_cancels_eager_propagated_present(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side, even when a propagated
option is present."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={
"user": relationship(
User,
lazy="joined",
backref=backref("addresses", lazy="baked_select"),
)
},
)
from sqlalchemy.orm.interfaces import MapperOption
class MyBogusOption(MapperOption):
propagate_to_loaders = True
sess = Session()
u1 = (
sess.query(User)
.options(MyBogusOption())
.filter(User.id == 8)
.one()
)
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_simple_lazy_clause_no_race_on_generate(self):
User, Address = self._o2m_fixture()
expr1, paramdict1 = (
User.addresses.property._lazy_strategy._simple_lazy_clause
)
# delete the attr, as though a concurrent thread is also generating it
del User.addresses.property._lazy_strategy._simple_lazy_clause
expr2, paramdict2 = (
User.addresses.property._lazy_strategy._simple_lazy_clause
)
eq_(paramdict1, paramdict2)
# additional tests:
# 1. m2m w lazyload
# 2. o2m lazyload where m2o backrefs have an eager load, test
# that eager load is canceled out
# 3. uselist = False, uselist=False assertion
# assert that the integration style illustrated in the dogpile.cache
# example works w/ baked
class CustomIntegrationTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(Address, self.tables.addresses)
return User, Address
def _query_fixture(self):
from sqlalchemy.orm.query import Query, _generative
class CachingQuery(Query):
cache = {}
@_generative()
def set_cache_key(self, key):
self._cache_key = key
def __iter__(self):
super_ = super(CachingQuery, self)
if hasattr(self, "_cache_key"):
return self.get_value(
createfunc=lambda: list(super_.__iter__())
)
else:
return super_.__iter__()
def _execute_and_instances(self, context):
super_ = super(CachingQuery, self)
if context.query is not self and hasattr(self, "_cache_key"):
return self.get_value(
createfunc=lambda: list(
super_._execute_and_instances(context)
)
)
else:
return super_._execute_and_instances(context)
def get_value(self, createfunc):
if self._cache_key in self.cache:
return iter(self.cache[self._cache_key])
else:
self.cache[self._cache_key] = retval = createfunc()
return iter(retval)
return Session(query_cls=CachingQuery)
def _option_fixture(self):
from sqlalchemy.orm.interfaces import MapperOption
class RelationshipCache(MapperOption):
propagate_to_loaders = True
def process_query_conditionally(self, query):
if query._current_path:
query._cache_key = "user7_addresses"
def _generate_cache_key(self, path):
return None
return RelationshipCache()
def test_non_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
eq_(q.cache, {"user7": [User(id=7, addresses=[Address(id=1)])]})
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
def test_use_w_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.filter(User.id == 7)
base_bq += lambda q: q.set_cache_key("user7")
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
eq_(q.cache, {"user7": [User(id=7, addresses=[Address(id=1)])]})
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
def test_plain_w_baked_lazyload(self):
User, Address = self._o2m_fixture()
opt = self._option_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).options(opt)
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(q.cache, {"user7_addresses": [Address(id=1)]})
sess.close()
# ensure caching logic works after query has been baked
q.cache.clear()
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(q.cache, {"user7_addresses": [Address(id=1)]})
| |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for handling messages and concurrency for run-webkit-tests
and test-webkitpy. This module follows the design for multiprocessing.Pool
and concurrency.futures.ProcessPoolExecutor, with the following differences:
* Tasks are executed in stateful subprocesses via objects that implement the
Worker interface - this allows the workers to share state across tasks.
* The pool provides an asynchronous event-handling interface so the caller
may receive events as tasks are processed.
If you don't need these features, use multiprocessing.Pool or concurrency.futures
instead.
"""
import cPickle
import logging
import multiprocessing
import Queue
import sys
import traceback
from webkitpy.common.host import Host
from webkitpy.common.system import stack_utils
_log = logging.getLogger(__name__)
def get(caller, worker_factory, num_workers, host=None):
"""Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
return _MessagePool(caller, worker_factory, num_workers, host)
class _MessagePool(object):
def __init__(self, caller, worker_factory, num_workers, host=None):
self._caller = caller
self._worker_factory = worker_factory
self._num_workers = num_workers
self._workers = []
self._workers_stopped = set()
self._host = host
self._name = 'manager'
self._running_inline = (self._num_workers == 1)
if self._running_inline:
self._messages_to_worker = Queue.Queue()
self._messages_to_manager = Queue.Queue()
else:
self._messages_to_worker = multiprocessing.Queue()
self._messages_to_manager = multiprocessing.Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self._close()
return False
def run(self, shards):
"""Posts a list of messages to the pool and waits for them to complete."""
for message in shards:
self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
for _ in xrange(self._num_workers):
self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
self.wait()
def _start_workers(self):
assert not self._workers
self._workers_stopped = set()
host = None
if self._running_inline or self._can_pickle(self._host):
host = self._host
for worker_number in xrange(self._num_workers):
worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory,
worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
self._workers.append(worker)
worker.start()
def _worker_log_level(self):
log_level = logging.NOTSET
for handler in logging.root.handlers:
if handler.level != logging.NOTSET:
if log_level == logging.NOTSET:
log_level = handler.level
else:
log_level = min(log_level, handler.level)
return log_level
def wait(self):
try:
self._start_workers()
if self._running_inline:
self._workers[0].run()
self._loop(block=False)
else:
self._loop(block=True)
finally:
self._close()
def _close(self):
for worker in self._workers:
if worker.is_alive():
worker.terminate()
worker.join()
self._workers = []
if not self._running_inline:
# FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
multiprocessing.util._exiting = True
if self._messages_to_worker:
self._messages_to_worker.close()
self._messages_to_worker = None
if self._messages_to_manager:
self._messages_to_manager.close()
self._messages_to_manager = None
def _log_messages(self, messages):
for message in messages:
logging.root.handle(message)
def _handle_done(self, source):
self._workers_stopped.add(source)
@staticmethod
def _handle_worker_exception(source, exception_type, exception_value, _):
if exception_type == KeyboardInterrupt:
raise exception_type(exception_value)
raise WorkerException(str(exception_value))
def _can_pickle(self, host):
try:
cPickle.dumps(host)
return True
except TypeError:
return False
def _loop(self, block):
try:
while True:
if len(self._workers_stopped) == len(self._workers):
block = False
message = self._messages_to_manager.get(block)
self._log_messages(message.logs)
if message.from_user:
self._caller.handle(message.name, message.src, *message.args)
continue
method = getattr(self, '_handle_' + message.name)
assert method, 'bad message %s' % repr(message)
method(message.src, *message.args)
except Queue.Empty:
pass
class WorkerException(BaseException):
"""Raised when we receive an unexpected/unknown exception from a worker."""
class _Message(object):
def __init__(self, src, message_name, message_args, from_user, logs):
self.src = src
self.name = message_name
self.args = message_args
self.from_user = from_user
self.logs = logs
def __repr__(self):
return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (
self.src, self.name, self.args, self.from_user, self.logs)
class _Worker(multiprocessing.Process):
def __init__(self, host, messages_to_manager, messages_to_worker,
worker_factory, worker_number, running_inline, manager, log_level):
super(_Worker, self).__init__()
self.host = host
self.worker_number = worker_number
self.name = 'worker/%d' % worker_number
self.log_messages = []
self.log_level = log_level
self._running = False
self._running_inline = running_inline
self._manager = manager
self._messages_to_manager = messages_to_manager
self._messages_to_worker = messages_to_worker
self._worker = worker_factory(self)
self._logger = None
self._log_handler = None
def terminate(self):
if self._worker:
if hasattr(self._worker, 'stop'):
self._worker.stop()
self._worker = None
if self.is_alive():
super(_Worker, self).terminate()
def _close(self):
if self._log_handler and self._logger:
self._logger.removeHandler(self._log_handler)
self._log_handler = None
self._logger = None
def start(self):
if not self._running_inline:
super(_Worker, self).start()
def run(self):
if not self.host:
self.host = Host()
if not self._running_inline:
self._set_up_logging()
worker = self._worker
_log.debug('%s starting', self.name)
self._running = True
try:
if hasattr(worker, 'start'):
worker.start()
while self._running:
message = self._messages_to_worker.get()
if message.from_user:
worker.handle(message.name, message.src, *message.args)
self._yield_to_manager()
else:
assert message.name == 'stop', 'bad message %s' % repr(message)
break
_log.debug('%s exiting', self.name)
except Queue.Empty:
assert False, '%s: ran out of messages in worker queue.' % self.name
except KeyboardInterrupt:
self._raise(sys.exc_info())
except Exception:
self._raise(sys.exc_info())
finally:
try:
if hasattr(worker, 'stop'):
worker.stop()
finally:
self._post(name='done', args=(), from_user=False)
self._close()
def stop_running(self):
self._running = False
def post(self, name, *args):
self._post(name, args, from_user=True)
self._yield_to_manager()
def _yield_to_manager(self):
if self._running_inline:
self._manager._loop(block=False)
def _post(self, name, args, from_user):
log_messages = self.log_messages
self.log_messages = []
self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
def _raise(self, exc_info):
exception_type, exception_value, exception_traceback = exc_info
if self._running_inline:
raise exception_type, exception_value, exception_traceback
if exception_type == KeyboardInterrupt:
_log.debug('%s: interrupted, exiting', self.name)
stack_utils.log_traceback(_log.debug, exception_traceback)
else:
_log.error("%s: %s('%s') raised:", self.name, exception_value.__class__.__name__, str(exception_value))
stack_utils.log_traceback(_log.error, exception_traceback)
# Since tracebacks aren't picklable, send the extracted stack instead.
stack = traceback.extract_tb(exception_traceback)
self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
def _set_up_logging(self):
self._logger = logging.getLogger()
# The unix multiprocessing implementation clones any log handlers into the child process,
# so we remove them to avoid duplicate logging.
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._log_handler = _WorkerLogHandler(self)
self._logger.addHandler(self._log_handler)
self._logger.setLevel(self.log_level)
class _WorkerLogHandler(logging.Handler):
def __init__(self, worker):
logging.Handler.__init__(self)
self._worker = worker
self.setLevel(worker.log_level)
def emit(self, record):
self._worker.log_messages.append(record)
| |
"""The tests for the Async Media player helper functions."""
import unittest
import asyncio
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_ON, STATE_OFF, STATE_IDLE)
from homeassistant.util.async_ import run_coroutine_threadsafe
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerDevice):
"""Async media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@asyncio.coroutine
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
@asyncio.coroutine
def async_media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
@asyncio.coroutine
def async_media_pause(self):
"""Send pause command."""
self._state = STATE_PAUSED
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._state = STATE_ON
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerDevice):
"""Sync media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + .2))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - .2))
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
@asyncio.coroutine
def async_media_play_pause(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_media_play_pause()
@asyncio.coroutine
def async_toggle(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
yield from super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop).result()
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
def tearDown(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PLAYING
run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_ON
run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop).result()
assert self.player.state == STATE_OFF
| |
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.core.validators import RegexValidator
from django_markdown.models import MarkdownField
from django.conf import settings
from easy_thumbnails.fields import ThumbnailerImageField
@python_2_unicode_compatible
class News(models.Model):
CHOICE = [('C', 'Campus News'),
('N', 'In the News'),
('S', 'Spotlight'),
]
title = models.CharField(max_length=300)
news_pic = models.ImageField(upload_to='article_pics/%Y-%m-%d/',null=True,blank=True)
thumbnail = ThumbnailerImageField(upload_to='news_thumbnail/%Y-%m-%d/', blank=True)
details = MarkdownField()
category = models.CharField(max_length=1, choices=CHOICE)
timestamp = models.DateTimeField(auto_now_add=True)
pinned = models.BooleanField(default=False)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Announcements(models.Model):
title = models.CharField(max_length=300)
details = MarkdownField(null=True, blank=True)
link = models.URLField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
pinned = models.BooleanField(default=False)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Club(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
name = models.CharField(max_length=300)
convenor = models.CharField(max_length=100)
strength = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class CoreMember(models.Model):
DESIGNATION = [('PR','President'),
('VP','Vice President'),
('GS','General Secretary'),
('JU','Joint Secretary UG'),
('JP','Joint Secretary PG'),
('GR','PG/PhD Girls\' Representative'),
('TU','Technical Secretary UG'),
('TP','Technical Secretary PG'),
('CU','Cultural Secretary UG'),
('CP','Cultural Secretary PG'),
('EC','Engineer Convenor'),
('IC','Incident Convenor'),
('IT','Incident Treasurer'),
('ET','Engineer Treasurer'),
('EJ','Engineer Joint Convenor'),
('IJ','Incident Joint Convenor'),
]
name = models.CharField(max_length=50)
prof_pic = models.ImageField(upload_to='member_pic_thumbnail/%Y-%m-%d/', blank=True)
designation = models.CharField(max_length=2, choices=DESIGNATION)
email = models.EmailField()
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone_number = models.CharField(max_length=15, validators=[phone_regex], blank=True)
def __str__(self):
return u'%s %s' % (self.name,self.get_designation_display())
@python_2_unicode_compatible
class Member(models.Model):
BRANCH_LIST = [('CH', 'Chemical Engineering'),
('CO', 'Computer Engineering'),
('CV', 'Civil Engineering'),
('EC', 'Electronics and Communications Engineering'),
('EE', 'Elelctrical and Electronics Engineering'),
('IT', 'Information Technology'),
('ME', 'Mechanical Engineering'),
('MN', 'Mining Engineering'),
('MT', 'Materials and Metallurgical Engineering'),
]
YEAR_LIST = [(1,'First Year'),
(2,'Second Year'),
(3,'Third Year'),
(4,'Final Year'),
(5,'PG'),
(6, 'PhD'),
]
name = models.CharField(max_length=50)
branch = models.CharField(max_length=2, choices=BRANCH_LIST)
prof_pic = models.ImageField(upload_to='member_pic_thumbnail/%Y-%m-%d/', blank=True)
year = models.IntegerField(choices=YEAR_LIST)
email = models.EmailField()
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone_number = models.CharField(max_length=15, validators=[phone_regex], blank=True)
def __str__(self):
return u'%s %s %s' % (self.name,self.branch,self.get_year_display())
@python_2_unicode_compatible
class Events(models.Model):
title = models.CharField(max_length=200)
organizer = models.ForeignKey(Club)
details = MarkdownField()
start = models.DateTimeField()
end = models.DateTimeField()
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
contact = models.CharField(max_length=15, validators=[phone_regex], blank=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
prof_pic = models.ImageField(upload_to='author_profile_pics/%Y-%m-%d/',null=True,blank=True)
blurb = MarkdownField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Articles(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(Author)
article_pic = models.ImageField(upload_to='article_pics/%Y-%m-%d/',null=True,blank=True)
content = MarkdownField()
published = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Complaint(models.Model):
choice = [('H', 'HCC'),
('S', 'Security'),
('L', 'LAN'),
('P', 'Sports'),
('E', 'Eateries'),
('I', 'Independent Bodies'),
('A', 'Academics'),
('M', 'Miscellaneous'),
]
complaint = models.CharField(max_length=300)
details = models.CharField(max_length=2000)
date = models.DateTimeField(auto_now_add=True)
ctype = models.CharField(max_length=1, choices=choice)
def __str__(self):
return self.complaint
@python_2_unicode_compatible
class Minute(models.Model):
link = models.URLField()
date_of_meeting = models.DateField()
title = models.CharField(max_length=100, default="Minutes of the Meeting")
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.title + '-' + str(self.date_of_meeting)
@python_2_unicode_compatible
class ResearchGrant(models.Model):
link = models.URLField()
date_of_grant = models.DateField()
title = models.CharField(max_length=100)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class MoU(models.Model):
link = models.URLField()
date_of_signing = models.DateField()
title = models.CharField(max_length=100)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class ResourceCategory(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Resource(models.Model):
link = models.URLField()
title = models.CharField(max_length=100)
category = models.ForeignKey(ResourceCategory)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class SenateReport(models.Model):
link = models.URLField()
date_of_report = models.DateField()
title = models.CharField(max_length=100)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.title + '-' + str(self.date_of_report)
@python_2_unicode_compatible
class Letter(models.Model):
LIST = (('DR','Director'),
('SW','Dean SW'),
('FW','Dean FW'),
('PD','Dean P&D'),
('HS','Hostel Office'),
('MC','Miscellaneous'),
)
addressee = models.CharField(max_length=2, choices=LIST)
subject = models.CharField(max_length=100)
link = models.URLField()
date_of_letter = models.DateField()
def __str__(self):
return self.addressee + '-' + self.subject
@python_2_unicode_compatible
class MessageFromPresident(models.Model):
name = models.CharField(max_length=50)
pic = models.ImageField(upload_to='presi_image/')
year = models.DateField()
message = MarkdownField()
def __str__(self):
return self.name
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo"
_path_str = "scattergeo.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.